diff --git a/.gitignore b/.gitignore index 17cb38c8..f9e26445 100644 --- a/.gitignore +++ b/.gitignore @@ -10,3 +10,5 @@ files/*.gz files/images stack-screenrc *.pem +accrc +.stackenv diff --git a/AUTHORS b/AUTHORS index cd0acac1..35c0a522 100644 --- a/AUTHORS +++ b/AUTHORS @@ -1,6 +1,7 @@ Aaron Lee Aaron Rosen Adam Gandelman +Akihiro MOTOKI Andrew Laski Andy Smith Anthony Young @@ -18,6 +19,7 @@ Gabriel Hurley Gary Kotton Hengqing Hu Hua ZHANG +Isaku Yamahata Jake Dahn James E. Blair Jason Cannavale @@ -34,6 +36,7 @@ Matt Joyce Osamu Habuka Russell Bryant Scott Moser +Sumit Naiksatam Thierry Carrez Todd Willey Tres Henry diff --git a/HACKING.rst b/HACKING.rst index e8f90c78..6ad8c7e6 100644 --- a/HACKING.rst +++ b/HACKING.rst @@ -15,6 +15,16 @@ https://github.com/openstack-dev/devstack.git. Besides the master branch that tracks the OpenStack trunk branches a separate branch is maintained for all OpenStack releases starting with Diablo (stable/diablo). +Contributing code to DevStack follows the usual OpenStack process as described +in `How To Contribute`__ in the OpenStack wiki. `DevStack's LaunchPad project`__ +contains the usual links for blueprints, bugs, tec. + +__ contribute_ +.. _contribute: http://wiki.openstack.org/HowToContribute. + +__ lp_ +.. _lp: https://launchpad.net/~devstack + The primary script in DevStack is ``stack.sh``, which performs the bulk of the work for DevStack's use cases. There is a subscript ``functions`` that contains generally useful shell functions and is used by a number of the scripts in @@ -53,8 +63,8 @@ configuration of the user environment:: source $TOP_DIR/openrc ``stack.sh`` is a rather large monolithic script that flows through from beginning -to end. The process of breaking it down into project-level sub-scripts has begun -with the introduction of ``lib/cinder`` and ``lib/ceilometer``. +to end. The process of breaking it down into project-level sub-scripts is nearly +complete and should make ``stack.sh`` easier to read and manage. These library sub-scripts have a number of fixed entry points, some of which may just be stubs. These entry points will be called by ``stack.sh`` in the @@ -71,6 +81,36 @@ There is a sub-script template in ``lib/templates`` to be used in creating new service sub-scripts. The comments in ``<>`` are meta comments describing how to use the template and should be removed. +In order to show the dependencies and conditions under which project functions +are executed the top-level conditional testing for things like ``is_service_enabled`` +should be done in ``stack.sh``. There may be nested conditionals that need +to be in the sub-script, such as testing for keystone being enabled in +``configure_swift()``. + + +stackrc +------- + +``stackrc`` is the global configuration file for DevStack. It is responsible for +calling ``localrc`` if it exists so configuration can be overridden by the user. + +The criteria for what belongs in ``stackrc`` can be vaguely summarized as +follows: + +* All project respositories and branches (for historical reasons) +* Global configuration that may be referenced in ``localrc``, i.e. ``DEST``, ``DATA_DIR`` +* Global service configuration like ``ENABLED_SERVICES`` +* Variables used by multiple services that do not have a clear owner, i.e. + ``VOLUME_BACKING_FILE_SIZE`` (nova-volumes and cinder) or ``PUBLIC_NETWORK_NAME`` + (nova-network and quantum) +* Variables that can not be cleanly declared in a project file due to + dependency ordering, i.e. the order of sourcing the project files can + not be changed for other reasons but the earlier file needs to dereference a + variable set in the later file. This should be rare. + +Also, variable declarations in ``stackrc`` do NOT allow overriding (the form +``FOO=${FOO:-baz}``); if they did then they can already be changed in ``localrc`` +and can stay in the project file. Documentation ------------- diff --git a/exercise.sh b/exercise.sh index a0349ce4..5b3c56e2 100755 --- a/exercise.sh +++ b/exercise.sh @@ -28,7 +28,7 @@ skips="" # Loop over each possible script (by basename) for script in $basenames; do - if [[ "$SKIP_EXERCISES" =~ $script ]] ; then + if [[ ,$SKIP_EXERCISES, =~ ,$script, ]] ; then skips="$skips $script" else echo "=====================================================================" diff --git a/exerciserc b/exerciserc index 82c74b7f..c26ec2ce 100644 --- a/exerciserc +++ b/exerciserc @@ -26,3 +26,7 @@ export VOLUME_TIMEOUT=${VOLUME_TIMEOUT:-30} # Max time to wait for a euca-delete command to propogate export VOLUME_DELETE_TIMEOUT=${SNAPSHOT_DELETE_TIMEOUT:-60} + +# The size of the volume we want to boot from; some storage back-ends +# do not allow a disk resize, so it's important that this can be tuned +export DEFAULT_VOLUME_SIZE=${DEFAULT_VOLUME_SIZE:-1} diff --git a/exercises/aggregates.sh b/exercises/aggregates.sh index adc3393b..ae3198f9 100755 --- a/exercises/aggregates.sh +++ b/exercises/aggregates.sh @@ -39,9 +39,8 @@ source $TOP_DIR/openrc # Import exercise configuration source $TOP_DIR/exerciserc -# run test as the admin user -_OLD_USERNAME=$OS_USERNAME -OS_USERNAME=admin +# Test as the admin user +. openrc admin admin # Create an aggregate @@ -54,7 +53,7 @@ AGGREGATE_A_ZONE=nova exit_if_aggregate_present() { aggregate_name=$1 - if [ `nova aggregate-list | grep -c " $aggregate_name "` == 0 ]; then + if [ $(nova aggregate-list | grep -c " $aggregate_name ") == 0 ]; then echo "SUCCESS $aggregate_name not present" else echo "ERROR found aggregate: $aggregate_name" @@ -64,8 +63,8 @@ exit_if_aggregate_present() { exit_if_aggregate_present $AGGREGATE_NAME -AGGREGATE_ID=`nova aggregate-create $AGGREGATE_NAME $AGGREGATE_A_ZONE | grep " $AGGREGATE_NAME " | get_field 1` -AGGREGATE2_ID=`nova aggregate-create $AGGREGATE2_NAME $AGGREGATE_A_ZONE | grep " $AGGREGATE2_NAME " | get_field 1` +AGGREGATE_ID=$(nova aggregate-create $AGGREGATE_NAME $AGGREGATE_A_ZONE | grep " $AGGREGATE_NAME " | get_field 1) +AGGREGATE2_ID=$(nova aggregate-create $AGGREGATE2_NAME $AGGREGATE_A_ZONE | grep " $AGGREGATE2_NAME " | get_field 1) # check aggregate created nova aggregate-list | grep -q " $AGGREGATE_NAME " || die "Aggregate $AGGREGATE_NAME not created" @@ -99,8 +98,8 @@ META_DATA_1_KEY=asdf META_DATA_2_KEY=foo META_DATA_3_KEY=bar -#ensure no metadata is set -nova aggregate-details $AGGREGATE_ID | grep {} +#ensure no additional metadata is set +nova aggregate-details $AGGREGATE_ID | egrep "{u'availability_zone': u'$AGGREGATE_A_ZONE'}|{}" nova aggregate-set-metadata $AGGREGATE_ID ${META_DATA_1_KEY}=123 nova aggregate-details $AGGREGATE_ID | grep $META_DATA_1_KEY @@ -117,7 +116,7 @@ nova aggregate-details $AGGREGATE_ID | grep $META_DATA_3_KEY nova aggregate-details $AGGREGATE_ID | grep $META_DATA_2_KEY && die "ERROR metadata was not cleared" nova aggregate-set-metadata $AGGREGATE_ID $META_DATA_3_KEY $META_DATA_1_KEY -nova aggregate-details $AGGREGATE_ID | grep {} +nova aggregate-details $AGGREGATE_ID | egrep "{u'availability_zone': u'$AGGREGATE_A_ZONE'}|{}" # Test aggregate-add/remove-host @@ -125,7 +124,7 @@ nova aggregate-details $AGGREGATE_ID | grep {} if [ "$VIRT_DRIVER" == "xenserver" ]; then echo "TODO(johngarbutt) add tests for add/remove host from pool aggregate" fi -FIRST_HOST=`nova host-list | grep compute | get_field 1 | head -1` +FIRST_HOST=$(nova host-list | grep compute | get_field 1 | head -1) # Make sure can add two aggregates to same host nova aggregate-add-host $AGGREGATE_ID $FIRST_HOST nova aggregate-add-host $AGGREGATE2_ID $FIRST_HOST @@ -142,12 +141,6 @@ nova aggregate-delete $AGGREGATE_ID nova aggregate-delete $AGGREGATE2_ID exit_if_aggregate_present $AGGREGATE_NAME - -# Test complete -# ============= -OS_USERNAME=$_OLD_USERNAME -echo "AGGREGATE TEST PASSED" - set +o xtrace echo "**************************************************" echo "End DevStack Exercise: $0" diff --git a/exercises/boot_from_volume.sh b/exercises/boot_from_volume.sh index b06c8ddb..679091bb 100755 --- a/exercises/boot_from_volume.sh +++ b/exercises/boot_from_volume.sh @@ -32,59 +32,92 @@ source $TOP_DIR/functions # Import configuration source $TOP_DIR/openrc +# Import quantum functions if needed +if is_service_enabled quantum; then + source $TOP_DIR/lib/quantum +fi + # Import exercise configuration source $TOP_DIR/exerciserc -# If cinder or n-vol are not enabled we exit with exitcode 55 so that +# If cinder is not enabled we exit with exitcode 55 so that # the exercise is skipped -is_service_enabled cinder n-vol || exit 55 +is_service_enabled cinder || exit 55 + +# Instance type to create +DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.tiny} # Boot this image, use first AMI image if unset DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-ami} -# Instance type -DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.tiny} +# Security group name +SECGROUP=${SECGROUP:-boot_secgroup} -# Default floating IP pool name -DEFAULT_FLOATING_POOL=${DEFAULT_FLOATING_POOL:-nova} +# Instance and volume names +VM_NAME=${VM_NAME:-ex-bfv-inst} +VOL_NAME=${VOL_NAME:-ex-vol-bfv} -# Default user -DEFAULT_INSTANCE_USER=${DEFAULT_INSTANCE_USER:-cirros} -# Security group name -SECGROUP=${SECGROUP:-boot_secgroup} +# Launching a server +# ================== +# List servers for tenant: +nova list -# Launching servers -# ================= +# Images +# ------ + +# List the images available +glance image-list # Grab the id of the image to launch -IMAGE=`glance image-list | egrep " $DEFAULT_IMAGE_NAME " | get_field 1` -die_if_not_set IMAGE "Failure getting image" +IMAGE=$(glance image-list | egrep " $DEFAULT_IMAGE_NAME " | get_field 1) +die_if_not_set IMAGE "Failure getting image $DEFAULT_IMAGE_NAME" + +# Security Groups +# --------------- + +# List security groups +nova secgroup-list + +# Create a secgroup +if ! nova secgroup-list | grep -q $SECGROUP; then + nova secgroup-create $SECGROUP "$SECGROUP description" + if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! nova secgroup-list | grep -q $SECGROUP; do sleep 1; done"; then + echo "Security group not created" + exit 1 + fi +fi -# Instance and volume names -VOL_INSTANCE_NAME=${VOL_INSTANCE_NAME:-test_vol_instance} -VOL_NAME=${VOL_NAME:-test_volume} +# Configure Security Group Rules +if ! nova secgroup-list-rules $SECGROUP | grep -q icmp; then + nova secgroup-add-rule $SECGROUP icmp -1 -1 0.0.0.0/0 +fi +if ! nova secgroup-list-rules $SECGROUP | grep -q " tcp .* 22 "; then + nova secgroup-add-rule $SECGROUP tcp 22 22 0.0.0.0/0 +fi -# Clean-up from previous runs -nova delete $VOL_INSTANCE_NAME || true +# List secgroup rules +nova secgroup-list-rules $SECGROUP -if ! timeout $ACTIVE_TIMEOUT sh -c "while nova show $VOL_INSTANCE_NAME; do sleep 1; done"; then - echo "server didn't terminate!" - exit 1 -fi +# Set up instance +# --------------- -# Configure Security Groups -nova secgroup-delete $SECGROUP || true -nova secgroup-create $SECGROUP "$SECGROUP description" -nova secgroup-add-rule $SECGROUP icmp -1 -1 0.0.0.0/0 -nova secgroup-add-rule $SECGROUP tcp 22 22 0.0.0.0/0 +# List flavors +nova flavor-list -# Determinine instance type -INSTANCE_TYPE=`nova flavor-list | grep $DEFAULT_INSTANCE_TYPE | cut -d"|" -f2` +# Select a flavor +INSTANCE_TYPE=$(nova flavor-list | grep $DEFAULT_INSTANCE_TYPE | get_field 1) if [[ -z "$INSTANCE_TYPE" ]]; then # grab the first flavor in the list to launch if default doesn't exist - INSTANCE_TYPE=`nova flavor-list | head -n 4 | tail -n 1 | cut -d"|" -f2` + INSTANCE_TYPE=$(nova flavor-list | head -n 4 | tail -n 1 | get_field 1) +fi + +# Clean-up from previous runs +nova delete $VM_NAME || true +if ! timeout $ACTIVE_TIMEOUT sh -c "while nova show $VM_NAME; do sleep 1; done"; then + echo "server didn't terminate!" + exit 1 fi # Setup Keypair @@ -94,78 +127,80 @@ nova keypair-delete $KEY_NAME || true nova keypair-add $KEY_NAME > $KEY_FILE chmod 600 $KEY_FILE -# Delete the old volume -cinder delete $VOL_NAME || true - -# Free every floating ips - setting FREE_ALL_FLOATING_IPS=True in localrc will make life easier for testers -if [ "$FREE_ALL_FLOATING_IPS" = "True" ]; then - nova floating-ip-list | grep nova | cut -d "|" -f2 | tr -d " " | xargs -n1 nova floating-ip-delete || true -fi +# Set up volume +# ------------- -# Allocate floating ip -FLOATING_IP=`nova floating-ip-create | grep $DEFAULT_FLOATING_POOL | get_field 1` - -# Make sure the ip gets allocated -if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! nova floating-ip-list | grep -q $FLOATING_IP; do sleep 1; done"; then - echo "Floating IP not allocated" +# Delete any old volume +cinder delete $VOL_NAME || true +if ! timeout $ACTIVE_TIMEOUT sh -c "while cinder list | grep $VOL_NAME; do sleep 1; done"; then + echo "Volume $VOL_NAME not deleted" exit 1 fi # Create the bootable volume -cinder create --display_name=$VOL_NAME --image-id $IMAGE 1 - -# Wait for volume to activate +start_time=$(date +%s) +cinder create --image-id $IMAGE --display_name=$VOL_NAME --display_description "test bootable volume: $VOL_NAME" $DEFAULT_VOLUME_SIZE || \ + die "Failure creating volume $VOL_NAME" if ! timeout $ACTIVE_TIMEOUT sh -c "while ! cinder list | grep $VOL_NAME | grep available; do sleep 1; done"; then echo "Volume $VOL_NAME not created" exit 1 fi +end_time=$(date +%s) +echo "Completed cinder create in $((end_time - start_time)) seconds" + +# Get volume ID +VOL_ID=$(cinder list | grep $VOL_NAME | get_field 1) +die_if_not_set VOL_ID "Failure retrieving volume ID for $VOL_NAME" -VOLUME_ID=`cinder list | grep $VOL_NAME | get_field 1` +# Boot instance +# ------------- -# Boot instance from volume! This is done with the --block_device_mapping param. -# The format of mapping is: +# Boot using the --block_device_mapping param. The format of mapping is: # =::: # Leaving the middle two fields blank appears to do-the-right-thing -VOL_VM_UUID=`nova boot --flavor $INSTANCE_TYPE --image $IMAGE --block_device_mapping vda=$VOLUME_ID:::0 --security_groups=$SECGROUP --key_name $KEY_NAME $VOL_INSTANCE_NAME | grep ' id ' | get_field 2` -die_if_not_set VOL_VM_UUID "Failure launching $VOL_INSTANCE_NAME" +VM_UUID=$(nova boot --flavor $INSTANCE_TYPE --image $IMAGE --block-device-mapping vda=$VOL_ID --security_groups=$SECGROUP --key_name $KEY_NAME $VM_NAME | grep ' id ' | get_field 2) +die_if_not_set VM_UUID "Failure launching $VM_NAME" # Check that the status is active within ACTIVE_TIMEOUT seconds -if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova show $VOL_VM_UUID | grep status | grep -q ACTIVE; do sleep 1; done"; then +if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova show $VM_UUID | grep status | grep -q ACTIVE; do sleep 1; done"; then echo "server didn't become active!" exit 1 fi -# Add floating ip to our server -nova add-floating-ip $VOL_VM_UUID $FLOATING_IP - -# Test we can ping our floating ip within ASSOCIATE_TIMEOUT seconds -ping_check "$PUBLIC_NETWORK_NAME" $FLOATING_IP $ASSOCIATE_TIMEOUT +# Get the instance IP +IP=$(nova show $VM_UUID | grep "$PRIVATE_NETWORK_NAME" | get_field 2) +die_if_not_set IP "Failure retrieving IP address" -# Make sure our volume-backed instance launched -ssh_check "$PUBLIC_NETWORK_NAME" $KEY_FILE $FLOATING_IP $DEFAULT_INSTANCE_USER $ACTIVE_TIMEOUT +# Private IPs can be pinged in single node deployments +ping_check "$PRIVATE_NETWORK_NAME" $IP $BOOT_TIMEOUT -# Remove floating ip from volume-backed instance -nova remove-floating-ip $VOL_VM_UUID $FLOATING_IP +# Clean up +# -------- # Delete volume backed instance -nova delete $VOL_INSTANCE_NAME || \ - die "Failure deleting instance volume $VOL_INSTANCE_NAME" +nova delete $VM_UUID || die "Failure deleting instance $VM_NAME" +if ! timeout $TERMINATE_TIMEOUT sh -c "while nova list | grep -q $VM_UUID; do sleep 1; done"; then + echo "Server $VM_NAME not deleted" + exit 1 +fi -# Wait till our volume is no longer in-use +# Wait for volume to be released if ! timeout $ACTIVE_TIMEOUT sh -c "while ! cinder list | grep $VOL_NAME | grep available; do sleep 1; done"; then - echo "Volume $VOL_NAME not created" + echo "Volume $VOL_NAME not released" exit 1 fi -# Delete the volume -cinder delete $VOL_NAME || \ - die "Failure deleting volume $VOLUME_NAME" - -# De-allocate the floating ip -nova floating-ip-delete $FLOATING_IP || \ - die "Failure deleting floating IP $FLOATING_IP" +# Delete volume +start_time=$(date +%s) +cinder delete $VOL_ID || die "Failure deleting volume $VOLUME_NAME" +if ! timeout $ACTIVE_TIMEOUT sh -c "while cinder list | grep $VOL_NAME; do sleep 1; done"; then + echo "Volume $VOL_NAME not deleted" + exit 1 +fi +end_time=$(date +%s) +echo "Completed cinder delete in $((end_time - start_time)) seconds" -# Delete a secgroup +# Delete secgroup nova secgroup-delete $SECGROUP || die "Failure deleting security group $SECGROUP" set +o xtrace diff --git a/exercises/bundle.sh b/exercises/bundle.sh index daff5f9c..12f27323 100755 --- a/exercises/bundle.sh +++ b/exercises/bundle.sh @@ -51,7 +51,7 @@ IMAGE=bundle.img truncate -s 5M /tmp/$IMAGE euca-bundle-image -i /tmp/$IMAGE || die "Failure bundling image $IMAGE" -euca-upload-bundle -b $BUCKET -m /tmp/$IMAGE.manifest.xml || die "Failure uploading bundle $IMAGE to $BUCKET" +euca-upload-bundle --debug -b $BUCKET -m /tmp/$IMAGE.manifest.xml || die "Failure uploading bundle $IMAGE to $BUCKET" AMI=`euca-register $BUCKET/$IMAGE.manifest.xml | cut -f2` die_if_not_set AMI "Failure registering $BUCKET/$IMAGE" diff --git a/exercises/client-args.sh b/exercises/client-args.sh index b3e2ad8d..894da742 100755 --- a/exercises/client-args.sh +++ b/exercises/client-args.sh @@ -8,6 +8,14 @@ echo "*********************************************************************" echo "Begin DevStack Exercise: $0" echo "*********************************************************************" +# This script exits on an error so that errors don't compound and you see +# only the first error that occured. +set -o errexit + +# Print the commands being run so that we can see the command that triggers +# an error. It is also useful for following allowing as the install occurs. +set -o xtrace + # Settings # ======== @@ -63,7 +71,7 @@ if [[ "$ENABLED_SERVICES" =~ "key" ]]; then STATUS_KEYSTONE="Skipped" else echo -e "\nTest Keystone" - if keystone $TENANT_ARG $ARGS catalog --service identity; then + if keystone $TENANT_ARG_DASH $ARGS_DASH catalog --service identity; then STATUS_KEYSTONE="Succeeded" else STATUS_KEYSTONE="Failed" @@ -82,7 +90,7 @@ if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then else # Test OSAPI echo -e "\nTest Nova" - if nova $TENANT_ARG $ARGS flavor-list; then + if nova $TENANT_ARG_DASH $ARGS_DASH flavor-list; then STATUS_NOVA="Succeeded" else STATUS_NOVA="Failed" @@ -91,6 +99,23 @@ if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then fi fi +# Cinder client +# ------------- + +if [[ "$ENABLED_SERVICES" =~ "c-api" ]]; then + if [[ "$SKIP_EXERCISES" =~ "c-api" ]] ; then + STATUS_CINDER="Skipped" + else + echo -e "\nTest Cinder" + if cinder $TENANT_ARG_DASH $ARGS_DASH list; then + STATUS_CINDER="Succeeded" + else + STATUS_CINDER="Failed" + RETURN=1 + fi + fi +fi + # Glance client # ------------- @@ -116,7 +141,7 @@ if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then STATUS_SWIFT="Skipped" else echo -e "\nTest Swift" - if swift $TENANT_ARG $ARGS stat; then + if swift $TENANT_ARG_DASH $ARGS_DASH stat; then STATUS_SWIFT="Succeeded" else STATUS_SWIFT="Failed" @@ -125,6 +150,8 @@ if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then fi fi +set +o xtrace + # Results # ------- @@ -137,6 +164,7 @@ function report() { echo -e "\n" report "Keystone" $STATUS_KEYSTONE report "Nova" $STATUS_NOVA +report "Cinder" $STATUS_CINDER report "Glance" $STATUS_GLANCE report "Swift" $STATUS_SWIFT diff --git a/exercises/client-env.sh b/exercises/client-env.sh index 68c0e5ad..c84e84e5 100755 --- a/exercises/client-env.sh +++ b/exercises/client-env.sh @@ -8,6 +8,14 @@ echo "*********************************************************************" echo "Begin DevStack Exercise: $0" echo "*********************************************************************" +# This script exits on an error so that errors don't compound and you see +# only the first error that occured. +set -o errexit + +# Print the commands being run so that we can see the command that triggers +# an error. It is also useful for following allowing as the install occurs. +set -o xtrace + # Settings # ======== @@ -99,6 +107,23 @@ if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then fi fi +# Cinder client +# ------------- + +if [[ "$ENABLED_SERVICES" =~ "c-api" ]]; then + if [[ "$SKIP_EXERCISES" =~ "c-api" ]] ; then + STATUS_CINDER="Skipped" + else + echo -e "\nTest Cinder" + if cinder list; then + STATUS_CINDER="Succeeded" + else + STATUS_CINDER="Failed" + RETURN=1 + fi + fi +fi + # Glance client # ------------- @@ -133,6 +158,8 @@ if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then fi fi +set +o xtrace + # Results # ------- @@ -146,6 +173,7 @@ echo -e "\n" report "Keystone" $STATUS_KEYSTONE report "Nova" $STATUS_NOVA report "EC2" $STATUS_EC2 +report "Cinder" $STATUS_CINDER report "Glance" $STATUS_GLANCE report "Swift" $STATUS_SWIFT diff --git a/exercises/euca.sh b/exercises/euca.sh index b1214930..8b15da8d 100755 --- a/exercises/euca.sh +++ b/exercises/euca.sh @@ -33,13 +33,18 @@ source $TOP_DIR/functions # Import EC2 configuration source $TOP_DIR/eucarc +# Import quantum functions if needed +if is_service_enabled quantum; then + source $TOP_DIR/lib/quantum +fi + # Import exercise configuration source $TOP_DIR/exerciserc # Instance type to create DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.tiny} -# Boot this image, use first AMI-format image if unset +# Boot this image, use first AMI image if unset DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-ami} # Security group name @@ -51,6 +56,7 @@ SECGROUP=${SECGROUP:-euca_secgroup} # Find a machine image to boot IMAGE=`euca-describe-images | grep machine | grep ${DEFAULT_IMAGE_NAME} | cut -f2 | head -n1` +die_if_not_set IMAGE "Failure getting image $DEFAULT_IMAGE_NAME" # Add a secgroup if ! euca-describe-groups | grep -q $SECGROUP; then @@ -73,7 +79,7 @@ fi # Volumes # ------- -if [[ "$ENABLED_SERVICES" =~ "n-vol" || "$ENABLED_SERVICES" =~ "c-vol" ]]; then +if [[ "$ENABLED_SERVICES" =~ "c-vol" ]]; then VOLUME_ZONE=`euca-describe-availability-zones | head -n1 | cut -f2` die_if_not_set VOLUME_ZONE "Failure to find zone for volume" @@ -85,7 +91,7 @@ if [[ "$ENABLED_SERVICES" =~ "n-vol" || "$ENABLED_SERVICES" =~ "c-vol" ]]; then die_if_not_set VOLUME "Failure to get volume" # Test volume has become available - if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! euca-describe-volumes $VOLUME | grep -q available; do sleep 1; done"; then + if ! timeout $RUNNING_TIMEOUT sh -c "while ! euca-describe-volumes $VOLUME | grep -q available; do sleep 1; done"; then echo "volume didnt become available within $RUNNING_TIMEOUT seconds" exit 1 fi @@ -160,13 +166,16 @@ fi euca-terminate-instances $INSTANCE || \ die "Failure terminating instance $INSTANCE" -# Assure it has terminated within a reasonable time -if ! timeout $TERMINATE_TIMEOUT sh -c "while euca-describe-instances $INSTANCE | grep -q $INSTANCE; do sleep 1; done"; then +# Assure it has terminated within a reasonable time. The behaviour of this +# case changed with bug/836978. Requesting the status of an invalid instance +# will now return an error message including the instance id, so we need to +# filter that out. +if ! timeout $TERMINATE_TIMEOUT sh -c "while euca-describe-instances $INSTANCE | grep -ve \"\\\(InstanceNotFound\\\|InvalidInstanceID\[.\]NotFound\\\)\" | grep -q $INSTANCE; do sleep 1; done"; then echo "server didn't terminate within $TERMINATE_TIMEOUT seconds" exit 1 fi -# Delete group +# Delete secgroup euca-delete-group $SECGROUP || die "Failure deleting security group $SECGROUP" set +o xtrace diff --git a/exercises/floating_ips.sh b/exercises/floating_ips.sh index 67878787..34ab69d9 100755 --- a/exercises/floating_ips.sh +++ b/exercises/floating_ips.sh @@ -2,8 +2,7 @@ # **floating_ips.sh** - using the cloud can be fun -# we will use the ``nova`` cli tool provided by the ``python-novaclient`` -# package to work out the instance connectivity +# Test instance connectivity with the ``nova`` command from ``python-novaclient`` echo "*********************************************************************" echo "Begin DevStack Exercise: $0" @@ -31,13 +30,18 @@ source $TOP_DIR/functions # Import configuration source $TOP_DIR/openrc +# Import quantum functions if needed +if is_service_enabled quantum; then + source $TOP_DIR/lib/quantum +fi + # Import exercise configuration source $TOP_DIR/exerciserc # Instance type to create DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.tiny} -# Boot this image, use first AMi image if unset +# Boot this image, use first AMI image if unset DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-ami} # Security group name @@ -49,6 +53,9 @@ DEFAULT_FLOATING_POOL=${DEFAULT_FLOATING_POOL:-nova} # Additional floating IP pool and range TEST_FLOATING_POOL=${TEST_FLOATING_POOL:-test} +# Instance name +VM_NAME="ex-float" + # Launching a server # ================== @@ -59,19 +66,17 @@ nova list # Images # ------ -# Nova has a **deprecated** way of listing images. -nova image-list - -# But we recommend using glance directly +# List the images available glance image-list # Grab the id of the image to launch IMAGE=$(glance image-list | egrep " $DEFAULT_IMAGE_NAME " | get_field 1) +die_if_not_set IMAGE "Failure getting image $DEFAULT_IMAGE_NAME" # Security Groups # --------------- -# List of secgroups: +# List security groups nova secgroup-list # Create a secgroup @@ -83,115 +88,122 @@ if ! nova secgroup-list | grep -q $SECGROUP; then fi fi -# Determinine instance type -# ------------------------- +# Configure Security Group Rules +if ! nova secgroup-list-rules $SECGROUP | grep -q icmp; then + nova secgroup-add-rule $SECGROUP icmp -1 -1 0.0.0.0/0 +fi +if ! nova secgroup-list-rules $SECGROUP | grep -q " tcp .* 22 "; then + nova secgroup-add-rule $SECGROUP tcp 22 22 0.0.0.0/0 +fi -# List of instance types: +# List secgroup rules +nova secgroup-list-rules $SECGROUP + +# Set up instance +# --------------- + +# List flavors nova flavor-list -INSTANCE_TYPE=`nova flavor-list | grep $DEFAULT_INSTANCE_TYPE | get_field 1` +# Select a flavor +INSTANCE_TYPE=$(nova flavor-list | grep $DEFAULT_INSTANCE_TYPE | get_field 1) if [[ -z "$INSTANCE_TYPE" ]]; then # grab the first flavor in the list to launch if default doesn't exist - INSTANCE_TYPE=`nova flavor-list | head -n 4 | tail -n 1 | get_field 1` + INSTANCE_TYPE=$(nova flavor-list | head -n 4 | tail -n 1 | get_field 1) fi -NAME="ex-float" - -VM_UUID=`nova boot --flavor $INSTANCE_TYPE --image $IMAGE $NAME --security_groups=$SECGROUP | grep ' id ' | get_field 2` -die_if_not_set VM_UUID "Failure launching $NAME" - - -# Testing -# ======= +# Clean-up from previous runs +nova delete $VM_NAME || true +if ! timeout $ACTIVE_TIMEOUT sh -c "while nova show $VM_NAME; do sleep 1; done"; then + echo "server didn't terminate!" + exit 1 +fi -# First check if it spins up (becomes active and responds to ping on -# internal ip). If you run this script from a nova node, you should -# bypass security groups and have direct access to the server. +# Boot instance +# ------------- -# Waiting for boot -# ---------------- +VM_UUID=$(nova boot --flavor $INSTANCE_TYPE --image $IMAGE --security_groups=$SECGROUP $VM_NAME | grep ' id ' | get_field 2) +die_if_not_set VM_UUID "Failure launching $VM_NAME" -# check that the status is active within ACTIVE_TIMEOUT seconds +# Check that the status is active within ACTIVE_TIMEOUT seconds if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova show $VM_UUID | grep status | grep -q ACTIVE; do sleep 1; done"; then echo "server didn't become active!" exit 1 fi -# get the IP of the server -IP=`nova show $VM_UUID | grep "$PRIVATE_NETWORK_NAME" | get_field 2` +# Get the instance IP +IP=$(nova show $VM_UUID | grep "$PRIVATE_NETWORK_NAME" | get_field 2) die_if_not_set IP "Failure retrieving IP address" +# Private IPs can be pinged in single node deployments ping_check "$PRIVATE_NETWORK_NAME" $IP $BOOT_TIMEOUT -# Security Groups & Floating IPs -# ------------------------------ +# Floating IPs +# ------------ -if ! nova secgroup-list-rules $SECGROUP | grep -q icmp; then - # allow icmp traffic (ping) - nova secgroup-add-rule $SECGROUP icmp -1 -1 0.0.0.0/0 - if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! nova secgroup-list-rules $SECGROUP | grep -q icmp; do sleep 1; done"; then - echo "Security group rule not created" - exit 1 - fi -fi +# Allocate a floating IP from the default pool +FLOATING_IP=$(nova floating-ip-create | grep $DEFAULT_FLOATING_POOL | get_field 1) +die_if_not_set FLOATING_IP "Failure creating floating IP from pool $DEFAULT_FLOATING_POOL" -# List rules for a secgroup -nova secgroup-list-rules $SECGROUP - -# allocate a floating ip from default pool -FLOATING_IP=`nova floating-ip-create | grep $DEFAULT_FLOATING_POOL | get_field 1` -die_if_not_set FLOATING_IP "Failure creating floating IP" - -# list floating addresses +# List floating addresses if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! nova floating-ip-list | grep -q $FLOATING_IP; do sleep 1; done"; then echo "Floating IP not allocated" exit 1 fi -# add floating ip to our server +# Add floating IP to our server nova add-floating-ip $VM_UUID $FLOATING_IP || \ - die "Failure adding floating IP $FLOATING_IP to $NAME" + die "Failure adding floating IP $FLOATING_IP to $VM_NAME" -# test we can ping our floating ip within ASSOCIATE_TIMEOUT seconds +# Test we can ping our floating IP within ASSOCIATE_TIMEOUT seconds ping_check "$PUBLIC_NETWORK_NAME" $FLOATING_IP $ASSOCIATE_TIMEOUT -# Allocate an IP from second floating pool -TEST_FLOATING_IP=`nova floating-ip-create $TEST_FLOATING_POOL | grep $TEST_FLOATING_POOL | get_field 1` -die_if_not_set TEST_FLOATING_IP "Failure creating floating IP in $TEST_FLOATING_POOL" +if ! is_service_enabled quantum; then + # Allocate an IP from second floating pool + TEST_FLOATING_IP=$(nova floating-ip-create $TEST_FLOATING_POOL | grep $TEST_FLOATING_POOL | get_field 1) + die_if_not_set TEST_FLOATING_IP "Failure creating floating IP in $TEST_FLOATING_POOL" -# list floating addresses -if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! nova floating-ip-list | grep $TEST_FLOATING_POOL | grep -q $TEST_FLOATING_IP; do sleep 1; done"; then - echo "Floating IP not allocated" - exit 1 + # list floating addresses + if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! nova floating-ip-list | grep $TEST_FLOATING_POOL | grep -q $TEST_FLOATING_IP; do sleep 1; done"; then + echo "Floating IP not allocated" + exit 1 + fi fi -# dis-allow icmp traffic (ping) -nova secgroup-delete-rule $SECGROUP icmp -1 -1 0.0.0.0/0 || die "Failure deleting security group rule from $SECGROUP" +# Dis-allow icmp traffic (ping) +nova secgroup-delete-rule $SECGROUP icmp -1 -1 0.0.0.0/0 || \ + die "Failure deleting security group rule from $SECGROUP" # FIXME (anthony): make xs support security groups if [ "$VIRT_DRIVER" != "xenserver" -a "$VIRT_DRIVER" != "openvz" ]; then - # test we can aren't able to ping our floating ip within ASSOCIATE_TIMEOUT seconds - ping_check "$PUBLIC_NETWORK_NAME" $FLOATING_IP $ASSOCIATE_TIMEOUT + # Test we can aren't able to ping our floating ip within ASSOCIATE_TIMEOUT seconds + ping_check "$PUBLIC_NETWORK_NAME" $FLOATING_IP $ASSOCIATE_TIMEOUT Fail fi -# Delete second floating IP -nova floating-ip-delete $TEST_FLOATING_IP || die "Failure deleting floating IP $TEST_FLOATING_IP" - +# Clean up +# -------- -# de-allocate the floating ip -nova floating-ip-delete $FLOATING_IP || die "Failure deleting floating IP $FLOATING_IP" +if ! is_service_enabled quantum; then + # Delete second floating IP + nova floating-ip-delete $TEST_FLOATING_IP || \ + die "Failure deleting floating IP $TEST_FLOATING_IP" +fi -# Shutdown the server -nova delete $VM_UUID || die "Failure deleting instance $NAME" +# Delete the floating ip +nova floating-ip-delete $FLOATING_IP || \ + die "Failure deleting floating IP $FLOATING_IP" +# Delete instance +nova delete $VM_UUID || die "Failure deleting instance $VM_NAME" # Wait for termination if ! timeout $TERMINATE_TIMEOUT sh -c "while nova list | grep -q $VM_UUID; do sleep 1; done"; then - echo "Server $NAME not deleted" + echo "Server $VM_NAME not deleted" exit 1 fi -# Delete a secgroup -nova secgroup-delete $SECGROUP || die "Failure deleting security group $SECGROUP" +# Delete secgroup +nova secgroup-delete $SECGROUP || \ + die "Failure deleting security group $SECGROUP" set +o xtrace echo "*********************************************************************" diff --git a/exercises/horizon.sh b/exercises/horizon.sh new file mode 100755 index 00000000..c5dae3ab --- /dev/null +++ b/exercises/horizon.sh @@ -0,0 +1,45 @@ +#!/usr/bin/env bash + +# **horizon.sh** + +# Sanity check that horizon started if enabled + +echo "*********************************************************************" +echo "Begin DevStack Exercise: $0" +echo "*********************************************************************" + +# This script exits on an error so that errors don't compound and you see +# only the first error that occured. +set -o errexit + +# Print the commands being run so that we can see the command that triggers +# an error. It is also useful for following allowing as the install occurs. +set -o xtrace + + +# Settings +# ======== + +# Keep track of the current directory +EXERCISE_DIR=$(cd $(dirname "$0") && pwd) +TOP_DIR=$(cd $EXERCISE_DIR/..; pwd) + +# Import common functions +source $TOP_DIR/functions + +# Import configuration +source $TOP_DIR/openrc + +# Import exercise configuration +source $TOP_DIR/exerciserc + +is_service_enabled horizon || exit 55 + +# can we get the front page +curl http://$SERVICE_HOST 2>/dev/null | grep -q '

Log In

' || die "Horizon front page not functioning!" + +set +o xtrace +echo "*********************************************************************" +echo "SUCCESS: End DevStack Exercise: $0" +echo "*********************************************************************" + diff --git a/exercises/quantum-adv-test.sh b/exercises/quantum-adv-test.sh index 8f15b634..bc33fe82 100755 --- a/exercises/quantum-adv-test.sh +++ b/exercises/quantum-adv-test.sh @@ -1,10 +1,9 @@ #!/usr/bin/env bash # -# **quantum.sh** +# **quantum-adv-test.sh** -# We will use this test to perform integration testing of nova and -# other components with Quantum. +# Perform integration testing of Nova and other components with Quantum. echo "*********************************************************************" echo "Begin DevStack Exercise: $0" @@ -14,6 +13,7 @@ echo "*********************************************************************" # only the first error that occured. set -o errtrace + trap failed ERR failed() { local r=$? @@ -30,17 +30,8 @@ failed() { # an error. It is also useful for following allowing as the install occurs. set -o xtrace -#------------------------------------------------------------------------------ -# Quantum config check -#------------------------------------------------------------------------------ -# Warn if quantum is not enabled -if [[ ! "$ENABLED_SERVICES" =~ "q-svc" ]]; then - echo "WARNING: Running quantum test without enabling quantum" -fi - -#------------------------------------------------------------------------------ # Environment -#------------------------------------------------------------------------------ +# ----------- # Keep track of the current directory EXERCISE_DIR=$(cd $(dirname "$0") && pwd) @@ -52,16 +43,18 @@ source $TOP_DIR/functions # Import configuration source $TOP_DIR/openrc -# Import exercise configuration -source $TOP_DIR/exerciserc - # If quantum is not enabled we exit with exitcode 55 which mean # exercise is skipped. is_service_enabled quantum && is_service_enabled q-agt && is_service_enabled q-dhcp || exit 55 -#------------------------------------------------------------------------------ -# Test settings for quantum -#------------------------------------------------------------------------------ +# Import quantum fucntions +source $TOP_DIR/lib/quantum + +# Import exercise configuration +source $TOP_DIR/exerciserc + +# Quantum Settings +# ---------------- TENANTS="DEMO1" # TODO (nati)_Test public network @@ -76,14 +69,14 @@ DEMO1_NUM_NET=1 DEMO2_NUM_NET=2 PUBLIC_NET1_CIDR="200.0.0.0/24" -DEMO1_NET1_CIDR="10.1.0.0/24" -DEMO2_NET1_CIDR="10.2.0.0/24" -DEMO2_NET2_CIDR="10.2.1.0/24" +DEMO1_NET1_CIDR="10.10.0.0/24" +DEMO2_NET1_CIDR="10.20.0.0/24" +DEMO2_NET2_CIDR="10.20.1.0/24" PUBLIC_NET1_GATEWAY="200.0.0.1" -DEMO1_NET1_GATEWAY="10.1.0.1" -DEMO2_NET1_GATEWAY="10.2.0.1" -DEMO2_NET2_GATEWAY="10.2.1.1" +DEMO1_NET1_GATEWAY="10.10.0.1" +DEMO2_NET1_GATEWAY="10.20.0.1" +DEMO2_NET2_GATEWAY="10.20.1.1" PUBLIC_NUM_VM=1 DEMO1_NUM_VM=1 @@ -103,24 +96,17 @@ PUBLIC_ROUTER1_NET="admin-net1" DEMO1_ROUTER1_NET="demo1-net1" DEMO2_ROUTER1_NET="demo2-net1" -#------------------------------------------------------------------------------ -# Keystone settings. -#------------------------------------------------------------------------------ KEYSTONE="keystone" -#------------------------------------------------------------------------------ -# Get a token for clients that don't support service catalog -#------------------------------------------------------------------------------ - -# manually create a token by querying keystone (sending JSON data). Keystone +# Manually create a token by querying keystone (sending JSON data). Keystone # returns a token and catalog of endpoints. We use python to parse the token # and save it. TOKEN=`keystone token-get | grep ' id ' | awk '{print $4}'` -#------------------------------------------------------------------------------ -# Various functions. -#------------------------------------------------------------------------------ +# Various functions +# ----------------- + function foreach_tenant { COMMAND=$1 for TENANT in ${TENANTS//,/ };do @@ -188,11 +174,10 @@ function get_flavor_id { function confirm_server_active { local VM_UUID=$1 - if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova --no_cache show $VM_UUID | grep status | grep -q ACTIVE; do sleep 1; done"; then - echo "server '$VM_UUID' did not become active!" - false -fi - + if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova show $VM_UUID | grep status | grep -q ACTIVE; do sleep 1; done"; then + echo "server '$VM_UUID' did not become active!" + false + fi } function add_tenant { @@ -211,27 +196,20 @@ function add_tenant { function remove_tenant { local TENANT=$1 local TENANT_ID=$(get_tenant_id $TENANT) - $KEYSTONE tenant-delete $TENANT_ID } function remove_user { local USER=$1 local USER_ID=$(get_user_id $USER) - $KEYSTONE user-delete $USER_ID } - - -#------------------------------------------------------------------------------ -# "Create" functions -#------------------------------------------------------------------------------ - function create_tenants { source $TOP_DIR/openrc admin admin add_tenant demo1 demo1 demo1 add_tenant demo2 demo2 demo2 + source $TOP_DIR/openrc demo demo } function delete_tenants_and_users { @@ -241,6 +219,7 @@ function delete_tenants_and_users { remove_user demo2 remove_tenant demo2 echo "removed all tenants" + source $TOP_DIR/openrc demo demo } function create_network { @@ -256,12 +235,8 @@ function create_network { source $TOP_DIR/openrc $TENANT $TENANT local NET_ID=$(quantum net-create --tenant_id $TENANT_ID $NET_NAME $EXTRA| grep ' id ' | awk '{print $4}' ) quantum subnet-create --ip_version 4 --tenant_id $TENANT_ID --gateway $GATEWAY $NET_ID $CIDR - #T0DO(nati) comment out until l3-agent is merged - #local ROUTER_ID=$($QUANTUM router-create --tenant_id $TENANT_ID $ROUTER_NAME| grep ' id ' | awk '{print $4}' ) - #for NET_NAME in ${NET_NAMES//,/ };do - # SUBNET_ID=`get_subnet_id $NET_NAME` - # $QUANTUM router-interface-create $NAME --subnet_id $SUBNET_ID - #done + quantum-debug probe-create $NET_ID + source $TOP_DIR/openrc demo demo } function create_networks { @@ -285,7 +260,7 @@ function create_vm { done #TODO (nati) Add multi-nic test #TODO (nati) Add public-net test - local VM_UUID=`nova --no_cache boot --flavor $(get_flavor_id m1.tiny) \ + local VM_UUID=`nova boot --flavor $(get_flavor_id m1.tiny) \ --image $(get_image_id) \ $NIC \ $TENANT-server$NUM | grep ' id ' | cut -d"|" -f3 | sed 's/ //g'` @@ -301,32 +276,26 @@ function ping_ip { # Test agent connection. Assumes namespaces are disabled, and # that DHCP is in use, but not L3 local VM_NAME=$1 - IP=`nova --no_cache show $VM_NAME | grep 'network' | awk '{print $5}'` - if ! timeout $BOOT_TIMEOUT sh -c "while ! ping -c1 -w1 $IP; do sleep 1; done"; then - echo "Could not ping $VM_NAME" - false - fi + local NET_NAME=$2 + IP=`nova show $VM_NAME | grep 'network' | awk '{print $5}'` + ping_check $NET_NAME $IP $BOOT_TIMEOUT } function check_vm { local TENANT=$1 local NUM=$2 local VM_NAME="$TENANT-server$NUM" + local NET_NAME=$3 source $TOP_DIR/openrc $TENANT $TENANT - ping_ip $VM_NAME + ping_ip $VM_NAME $NET_NAME # TODO (nati) test ssh connection # TODO (nati) test inter connection between vm - # TODO (nati) test namespace dhcp # TODO (nati) test dhcp host routes # TODO (nati) test multi-nic - # TODO (nati) use test-agent - # TODO (nati) test L3 forwarding - # TODO (nati) test floating ip - # TODO (nati) test security group } function check_vms { - foreach_tenant_vm 'check_vm ${%TENANT%_NAME} %NUM%' + foreach_tenant_vm 'check_vm ${%TENANT%_NAME} %NUM% ${%TENANT%_VM%NUM%_NET}' } function shutdown_vm { @@ -334,12 +303,12 @@ function shutdown_vm { local NUM=$2 source $TOP_DIR/openrc $TENANT $TENANT VM_NAME=${TENANT}-server$NUM - nova --no_cache delete $VM_NAME + nova delete $VM_NAME } function shutdown_vms { foreach_tenant_vm 'shutdown_vm ${%TENANT%_NAME} %NUM%' - if ! timeout $TERMINATE_TIMEOUT sh -c "while nova --no_cache list | grep -q ACTIVE; do sleep 1; done"; then + if ! timeout $TERMINATE_TIMEOUT sh -c "while nova list | grep -q ACTIVE; do sleep 1; done"; then echo "Some VMs failed to shutdown" false fi @@ -347,17 +316,22 @@ function shutdown_vms { function delete_network { local TENANT=$1 + local NUM=$2 + local NET_NAME="${TENANT}-net$NUM" source $TOP_DIR/openrc admin admin local TENANT_ID=$(get_tenant_id $TENANT) #TODO(nati) comment out until l3-agent merged #for res in port subnet net router;do - for res in port subnet net;do - quantum ${res}-list -F id -F tenant_id | grep $TENANT_ID | awk '{print $2}' | xargs -I % quantum ${res}-delete % + for net_id in `quantum net-list -c id -c name | grep $NET_NAME | awk '{print $2}'`;do + delete_probe $net_id + quantum subnet-list | grep $net_id | awk '{print $2}' | xargs -I% quantum subnet-delete % + quantum net-delete $net_id done + source $TOP_DIR/openrc demo demo } function delete_networks { - foreach_tenant 'delete_network ${%TENANT%_NAME}' + foreach_tenant_net 'delete_network ${%TENANT%_NAME} ${%NUM%}' #TODO(nati) add secuirty group check after it is implemented # source $TOP_DIR/openrc demo1 demo1 # nova secgroup-delete-rule default icmp -1 -1 0.0.0.0/0 @@ -383,9 +357,9 @@ function all { delete_all } -#------------------------------------------------------------------------------ -# Test functions. -#------------------------------------------------------------------------------ +# Test functions +# -------------- + function test_functions { IMAGE=$(get_image_id) echo $IMAGE @@ -400,9 +374,9 @@ function test_functions { echo $NETWORK_ID } -#------------------------------------------------------------------------------ -# Usage and main. -#------------------------------------------------------------------------------ +# Usage and main +# -------------- + usage() { echo "$0: [-h]" echo " -h, --help Display help message" @@ -473,10 +447,9 @@ main() { fi } +# Kick off script +# --------------- -#------------------------------------------------------------------------------- -# Kick off script. -#------------------------------------------------------------------------------- echo $* main $* diff --git a/exercises/sec_groups.sh b/exercises/sec_groups.sh index f6810e3e..a33c9c63 100755 --- a/exercises/sec_groups.sh +++ b/exercises/sec_groups.sh @@ -2,7 +2,7 @@ # **sec_groups.sh** -# Test security groups via the command line tools that ship with it. +# Test security groups via the command line echo "*********************************************************************" echo "Begin DevStack Exercise: $0" @@ -41,14 +41,14 @@ source $TOP_DIR/exerciserc nova secgroup-list # Create random name for new sec group and create secgroup of said name -SEC_GROUP_NAME="sec-group-$(openssl rand -hex 4)" +SEC_GROUP_NAME="ex-secgroup-$(openssl rand -hex 4)" nova secgroup-create $SEC_GROUP_NAME 'a test security group' # Add some rules to the secgroup RULES_TO_ADD=( 22 3389 5900 ) for RULE in "${RULES_TO_ADD[@]}"; do - nova secgroup-add-rule $SEC_GROUP_NAME tcp $RULE $RULE 0.0.0.0/00 + nova secgroup-add-rule $SEC_GROUP_NAME tcp $RULE $RULE 0.0.0.0/0 done # Check to make sure rules were added @@ -63,10 +63,12 @@ done # Delete rules and secgroup for RULE in "${RULES_TO_ADD[@]}"; do - nova secgroup-delete-rule $SEC_GROUP_NAME tcp $RULE $RULE 0.0.0.0/00 + nova secgroup-delete-rule $SEC_GROUP_NAME tcp $RULE $RULE 0.0.0.0/0 done -nova secgroup-delete $SEC_GROUP_NAME +# Delete secgroup +nova secgroup-delete $SEC_GROUP_NAME || \ + die "Failure deleting security group $SEC_GROUP_NAME" set +o xtrace echo "*********************************************************************" diff --git a/exercises/swift.sh b/exercises/swift.sh index 4cd487bc..a75f955a 100755 --- a/exercises/swift.sh +++ b/exercises/swift.sh @@ -2,7 +2,7 @@ # **swift.sh** -# Test swift via the command line tools that ship with it. +# Test swift via the ``swift`` command line from ``python-swiftclient` echo "*********************************************************************" echo "Begin DevStack Exercise: $0" @@ -33,13 +33,13 @@ source $TOP_DIR/openrc # Import exercise configuration source $TOP_DIR/exerciserc -# Container name -CONTAINER=ex-swift - # If swift is not enabled we exit with exitcode 55 which mean # exercise is skipped. is_service_enabled swift || exit 55 +# Container name +CONTAINER=ex-swift + # Testing Swift # ============= diff --git a/exercises/volumes.sh b/exercises/volumes.sh index 72c8729e..45cb0c8e 100755 --- a/exercises/volumes.sh +++ b/exercises/volumes.sh @@ -2,14 +2,14 @@ # **volumes.sh** -# Test cinder volumes with the cinder command from python-cinderclient +# Test cinder volumes with the ``cinder`` command from ``python-cinderclient`` echo "*********************************************************************" echo "Begin DevStack Exercise: $0" echo "*********************************************************************" # This script exits on an error so that errors don't compound and you see -# only the first error that occured. +# only the first error that occurred. set -o errexit # Print the commands being run so that we can see the command that triggers @@ -30,22 +30,31 @@ source $TOP_DIR/functions # Import configuration source $TOP_DIR/openrc +# Import quantum functions if needed +if is_service_enabled quantum; then + source $TOP_DIR/lib/quantum +fi + # Import exercise configuration source $TOP_DIR/exerciserc -# If cinder or n-vol are not enabled we exit with exitcode 55 which mean +# If cinder is not enabled we exit with exitcode 55 which mean # exercise is skipped. -is_service_enabled cinder n-vol || exit 55 +is_service_enabled cinder || exit 55 # Instance type to create DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.tiny} -# Boot this image, use first AMi image if unset +# Boot this image, use first AMI image if unset DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-ami} # Security group name SECGROUP=${SECGROUP:-vol_secgroup} +# Instance and volume names +VM_NAME=${VM_NAME:-ex-vol-inst} +VOL_NAME="ex-vol-$(openssl rand -hex 4)" + # Launching a server # ================== @@ -56,19 +65,17 @@ nova list # Images # ------ -# Nova has a **deprecated** way of listing images. -nova image-list - -# But we recommend using glance directly +# List the images available glance image-list # Grab the id of the image to launch IMAGE=$(glance image-list | egrep " $DEFAULT_IMAGE_NAME " | get_field 1) +die_if_not_set IMAGE "Failure getting image $DEFAULT_IMAGE_NAME" # Security Groups # --------------- -# List of secgroups: +# List security groups nova secgroup-list # Create a secgroup @@ -81,129 +88,129 @@ if ! nova secgroup-list | grep -q $SECGROUP; then fi # Configure Security Group Rules -nova secgroup-add-rule $SECGROUP icmp -1 -1 0.0.0.0/0 -nova secgroup-add-rule $SECGROUP tcp 22 22 0.0.0.0/0 +if ! nova secgroup-list-rules $SECGROUP | grep -q icmp; then + nova secgroup-add-rule $SECGROUP icmp -1 -1 0.0.0.0/0 +fi +if ! nova secgroup-list-rules $SECGROUP | grep -q " tcp .* 22 "; then + nova secgroup-add-rule $SECGROUP tcp 22 22 0.0.0.0/0 +fi + +# List secgroup rules +nova secgroup-list-rules $SECGROUP -# determinine instance type -# ------------------------- +# Set up instance +# --------------- -# List of instance types: +# List flavors nova flavor-list -INSTANCE_TYPE=`nova flavor-list | grep $DEFAULT_INSTANCE_TYPE | get_field 1` +# Select a flavor +INSTANCE_TYPE=$(nova flavor-list | grep $DEFAULT_INSTANCE_TYPE | get_field 1) if [[ -z "$INSTANCE_TYPE" ]]; then # grab the first flavor in the list to launch if default doesn't exist - INSTANCE_TYPE=`nova flavor-list | head -n 4 | tail -n 1 | get_field 1` + INSTANCE_TYPE=$(nova flavor-list | head -n 4 | tail -n 1 | get_field 1) fi -NAME="ex-vol" - -VM_UUID=`nova boot --flavor $INSTANCE_TYPE --image $IMAGE $NAME --security_groups=$SECGROUP | grep ' id ' | get_field 2` -die_if_not_set VM_UUID "Failure launching $NAME" - - -# Testing -# ======= +# Clean-up from previous runs +nova delete $VM_NAME || true +if ! timeout $ACTIVE_TIMEOUT sh -c "while nova show $VM_NAME; do sleep 1; done"; then + echo "server didn't terminate!" + exit 1 +fi -# First check if it spins up (becomes active and responds to ping on -# internal ip). If you run this script from a nova node, you should -# bypass security groups and have direct access to the server. +# Boot instance +# ------------- -# Waiting for boot -# ---------------- +VM_UUID=$(nova boot --flavor $INSTANCE_TYPE --image $IMAGE --security_groups=$SECGROUP $VM_NAME | grep ' id ' | get_field 2) +die_if_not_set VM_UUID "Failure launching $VM_NAME" -# check that the status is active within ACTIVE_TIMEOUT seconds +# Check that the status is active within ACTIVE_TIMEOUT seconds if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova show $VM_UUID | grep status | grep -q ACTIVE; do sleep 1; done"; then echo "server didn't become active!" exit 1 fi -# get the IP of the server -IP=`nova show $VM_UUID | grep "$PRIVATE_NETWORK_NAME" | get_field 2` +# Get the instance IP +IP=$(nova show $VM_UUID | grep "$PRIVATE_NETWORK_NAME" | get_field 2) die_if_not_set IP "Failure retrieving IP address" -# for single node deployments, we can ping private ips +# Private IPs can be pinged in single node deployments ping_check "$PRIVATE_NETWORK_NAME" $IP $BOOT_TIMEOUT # Volumes # ------- -VOL_NAME="myvol-$(openssl rand -hex 4)" - # Verify it doesn't exist -if [[ -n "`cinder list | grep $VOL_NAME | head -1 | get_field 2`" ]]; then +if [[ -n $(cinder list | grep $VOL_NAME | head -1 | get_field 2) ]]; then echo "Volume $VOL_NAME already exists" exit 1 fi # Create a new volume -cinder create --display_name $VOL_NAME --display_description "test volume: $VOL_NAME" 1 -if [[ $? != 0 ]]; then - echo "Failure creating volume $VOL_NAME" - exit 1 -fi - -start_time=`date +%s` +start_time=$(date +%s) +cinder create --display_name $VOL_NAME --display_description "test volume: $VOL_NAME" $DEFAULT_VOLUME_SIZE || \ + die "Failure creating volume $VOL_NAME" if ! timeout $ACTIVE_TIMEOUT sh -c "while ! cinder list | grep $VOL_NAME | grep available; do sleep 1; done"; then echo "Volume $VOL_NAME not created" exit 1 fi -end_time=`date +%s` +end_time=$(date +%s) echo "Completed cinder create in $((end_time - start_time)) seconds" # Get volume ID -VOL_ID=`cinder list | grep $VOL_NAME | head -1 | get_field 1` +VOL_ID=$(cinder list | grep $VOL_NAME | head -1 | get_field 1) die_if_not_set VOL_ID "Failure retrieving volume ID for $VOL_NAME" # Attach to server DEVICE=/dev/vdb -start_time=`date +%s` +start_time=$(date +%s) nova volume-attach $VM_UUID $VOL_ID $DEVICE || \ - die "Failure attaching volume $VOL_NAME to $NAME" + die "Failure attaching volume $VOL_NAME to $VM_NAME" if ! timeout $ACTIVE_TIMEOUT sh -c "while ! cinder list | grep $VOL_NAME | grep in-use; do sleep 1; done"; then - echo "Volume $VOL_NAME not attached to $NAME" + echo "Volume $VOL_NAME not attached to $VM_NAME" exit 1 fi -end_time=`date +%s` +end_time=$(date +%s) echo "Completed volume-attach in $((end_time - start_time)) seconds" -VOL_ATTACH=`cinder list | grep $VOL_NAME | head -1 | get_field -1` +VOL_ATTACH=$(cinder list | grep $VOL_NAME | head -1 | get_field -1) die_if_not_set VOL_ATTACH "Failure retrieving $VOL_NAME status" if [[ "$VOL_ATTACH" != $VM_UUID ]]; then echo "Volume not attached to correct instance" exit 1 fi +# Clean up +# -------- + # Detach volume -start_time=`date +%s` -nova volume-detach $VM_UUID $VOL_ID || die "Failure detaching volume $VOL_NAME from $NAME" +start_time=$(date +%s) +nova volume-detach $VM_UUID $VOL_ID || die "Failure detaching volume $VOL_NAME from $VM_NAME" if ! timeout $ACTIVE_TIMEOUT sh -c "while ! cinder list | grep $VOL_NAME | grep available; do sleep 1; done"; then - echo "Volume $VOL_NAME not detached from $NAME" + echo "Volume $VOL_NAME not detached from $VM_NAME" exit 1 fi -end_time=`date +%s` +end_time=$(date +%s) echo "Completed volume-detach in $((end_time - start_time)) seconds" # Delete volume -start_time=`date +%s` +start_time=$(date +%s) cinder delete $VOL_ID || die "Failure deleting volume $VOL_NAME" -if ! timeout $ACTIVE_TIMEOUT sh -c "while ! cinder list | grep $VOL_NAME; do sleep 1; done"; then +if ! timeout $ACTIVE_TIMEOUT sh -c "while cinder list | grep $VOL_NAME; do sleep 1; done"; then echo "Volume $VOL_NAME not deleted" exit 1 fi -end_time=`date +%s` +end_time=$(date +%s) echo "Completed cinder delete in $((end_time - start_time)) seconds" -# Shutdown the server -nova delete $VM_UUID || die "Failure deleting instance $NAME" - -# Wait for termination +# Delete instance +nova delete $VM_UUID || die "Failure deleting instance $VM_NAME" if ! timeout $TERMINATE_TIMEOUT sh -c "while nova list | grep -q $VM_UUID; do sleep 1; done"; then - echo "Server $NAME not deleted" + echo "Server $VM_NAME not deleted" exit 1 fi -# Delete a secgroup +# Delete secgroup nova secgroup-delete $SECGROUP || die "Failure deleting security group $SECGROUP" set +o xtrace diff --git a/extras.d/80-tempest.sh b/extras.d/80-tempest.sh new file mode 100644 index 00000000..f1599557 --- /dev/null +++ b/extras.d/80-tempest.sh @@ -0,0 +1,21 @@ +# tempest.sh - DevStack extras script + +source $TOP_DIR/lib/tempest + +if [[ "$1" == "stack" ]]; then + # Configure Tempest last to ensure that the runtime configuration of + # the various OpenStack services can be queried. + if is_service_enabled tempest; then + echo_summary "Configuring Tempest" + install_tempest + configure_tempest + init_tempest + fi +fi + +if [[ "$1" == "unstack" ]]; then + # no-op + : +fi + + diff --git a/extras.d/README b/extras.d/README new file mode 100644 index 00000000..ffc6793a --- /dev/null +++ b/extras.d/README @@ -0,0 +1,14 @@ +The extras.d directory contains project initialization scripts to be +sourced by stack.sh at the end of its run. This is expected to be +used by external projects that want to be configured, started and +stopped with DevStack. + +Order is controlled by prefixing the script names with the a two digit +sequence number. Script names must end with '.sh'. This provides a +convenient way to disable scripts by simoy renaming them. + +DevStack reserves the sequence numbers 00 through 09 and 90 through 99 +for its own use. + +The scripts are called with an argument of 'stack' by stack.sh and +with an argument of 'unstack' by unstack.sh. diff --git a/files/apts/baremetal b/files/apts/baremetal new file mode 100644 index 00000000..54e76e00 --- /dev/null +++ b/files/apts/baremetal @@ -0,0 +1,9 @@ +busybox +dnsmasq +gcc +ipmitool +make +open-iscsi +qemu-kvm +syslinux +tgt diff --git a/files/apts/general b/files/apts/general index 12a92e0c..0264066a 100644 --- a/files/apts/general +++ b/files/apts/general @@ -6,7 +6,7 @@ screen unzip wget psmisc -git-core +git lsof # useful when debugging openssh-server vim-nox diff --git a/files/apts/horizon b/files/apts/horizon index 2161ccd3..2c2faf1a 100644 --- a/files/apts/horizon +++ b/files/apts/horizon @@ -21,4 +21,5 @@ python-coverage python-cherrypy3 # why? python-migrate nodejs +nodejs-legacy # dist:quantal python-netaddr diff --git a/files/apts/ldap b/files/apts/ldap new file mode 100644 index 00000000..81a00f27 --- /dev/null +++ b/files/apts/ldap @@ -0,0 +1,3 @@ +ldap-utils +slapd # NOPRIME +python-ldap diff --git a/files/apts/n-cpu b/files/apts/n-cpu index a40b6590..ad2d6d71 100644 --- a/files/apts/n-cpu +++ b/files/apts/n-cpu @@ -3,3 +3,5 @@ lvm2 open-iscsi open-iscsi-utils genisoimage +sysfsutils +sg3-utils diff --git a/files/apts/nova b/files/apts/nova index c16a7087..39b4060e 100644 --- a/files/apts/nova +++ b/files/apts/nova @@ -16,6 +16,7 @@ sqlite3 sudo kvm libvirt-bin # NOPRIME +libjs-jquery-tablesorter # Needed for coverage html reports vlan curl rabbitmq-server # NOPRIME @@ -30,6 +31,7 @@ python-libvirt python-libxml2 python-routes python-netaddr +python-numpy # used by websockify for spice console python-pastedeploy python-eventlet python-cheetah diff --git a/files/apts/ryu b/files/apts/ryu index 1e8f2d2b..4a4fc523 100644 --- a/files/apts/ryu +++ b/files/apts/ryu @@ -1,4 +1,5 @@ python-setuptools python-gevent python-gflags +python-netifaces python-sphinx diff --git a/files/apts/swift b/files/apts/swift index f2983778..c52c68b7 100644 --- a/files/apts/swift +++ b/files/apts/swift @@ -1,6 +1,6 @@ curl gcc -memcached # NOPRIME +memcached python-configobj python-coverage python-dev diff --git a/files/apts/tls-proxy b/files/apts/tls-proxy new file mode 100644 index 00000000..0a440159 --- /dev/null +++ b/files/apts/tls-proxy @@ -0,0 +1 @@ +stud # only available in dist:precise,quantal diff --git a/files/keystone_data.sh b/files/keystone_data.sh index 3da11bf0..4c76c9b5 100755 --- a/files/keystone_data.sh +++ b/files/keystone_data.sh @@ -4,17 +4,10 @@ # # Tenant User Roles # ------------------------------------------------------------------ -# admin admin admin # service glance admin -# service nova admin, [ResellerAdmin (swift only)] -# service quantum admin # if enabled # service swift admin # if enabled -# service cinder admin # if enabled # service heat admin # if enabled # service ceilometer admin # if enabled -# demo admin admin -# demo demo Member, anotherrole -# invisible_to_admin demo Member # Tempest Only: # alt_demo alt_demo Member # @@ -40,122 +33,35 @@ function get_id () { echo `"$@" | awk '/ id / { print $4 }'` } - -# Tenants -# ------- - -ADMIN_TENANT=$(get_id keystone tenant-create --name=admin) -SERVICE_TENANT=$(get_id keystone tenant-create --name=$SERVICE_TENANT_NAME) -DEMO_TENANT=$(get_id keystone tenant-create --name=demo) -INVIS_TENANT=$(get_id keystone tenant-create --name=invisible_to_admin) - - -# Users -# ----- - -ADMIN_USER=$(get_id keystone user-create --name=admin \ - --pass="$ADMIN_PASSWORD" \ - --email=admin@example.com) -DEMO_USER=$(get_id keystone user-create --name=demo \ - --pass="$ADMIN_PASSWORD" \ - --email=demo@example.com) +# Lookups +SERVICE_TENANT=$(keystone tenant-list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }") +ADMIN_ROLE=$(keystone role-list | awk "/ admin / { print \$2 }") +MEMBER_ROLE=$(keystone role-list | awk "/ Member / { print \$2 }") # Roles # ----- -ADMIN_ROLE=$(get_id keystone role-create --name=admin) -KEYSTONEADMIN_ROLE=$(get_id keystone role-create --name=KeystoneAdmin) -KEYSTONESERVICE_ROLE=$(get_id keystone role-create --name=KeystoneServiceAdmin) -# ANOTHER_ROLE demonstrates that an arbitrary role may be created and used -# TODO(sleepsonthefloor): show how this can be used for rbac in the future! -ANOTHER_ROLE=$(get_id keystone role-create --name=anotherrole) - - -# Add Roles to Users in Tenants -keystone user-role-add --user_id $ADMIN_USER --role_id $ADMIN_ROLE --tenant_id $ADMIN_TENANT -keystone user-role-add --user_id $ADMIN_USER --role_id $ADMIN_ROLE --tenant_id $DEMO_TENANT -keystone user-role-add --user_id $DEMO_USER --role_id $ANOTHER_ROLE --tenant_id $DEMO_TENANT - -# TODO(termie): these two might be dubious -keystone user-role-add --user_id $ADMIN_USER --role_id $KEYSTONEADMIN_ROLE --tenant_id $ADMIN_TENANT -keystone user-role-add --user_id $ADMIN_USER --role_id $KEYSTONESERVICE_ROLE --tenant_id $ADMIN_TENANT - - -# The Member role is used by Horizon and Swift so we need to keep it: -MEMBER_ROLE=$(get_id keystone role-create --name=Member) -keystone user-role-add --user_id $DEMO_USER --role_id $MEMBER_ROLE --tenant_id $DEMO_TENANT -keystone user-role-add --user_id $DEMO_USER --role_id $MEMBER_ROLE --tenant_id $INVIS_TENANT +# The ResellerAdmin role is used by Nova and Ceilometer so we need to keep it. +# The admin role in swift allows a user to act as an admin for their tenant, +# but ResellerAdmin is needed for a user to act as any tenant. The name of this +# role is also configurable in swift-proxy.conf +RESELLER_ROLE=$(get_id keystone role-create --name=ResellerAdmin) # Services # -------- -# Keystone -if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then - KEYSTONE_SERVICE=$(get_id keystone service-create \ - --name=keystone \ - --type=identity \ - --description="Keystone Identity Service") - keystone endpoint-create \ - --region RegionOne \ - --service_id $KEYSTONE_SERVICE \ - --publicurl "http://$SERVICE_HOST:\$(public_port)s/v2.0" \ - --adminurl "http://$SERVICE_HOST:\$(admin_port)s/v2.0" \ - --internalurl "http://$SERVICE_HOST:\$(public_port)s/v2.0" -fi - -# Nova -if [[ "$ENABLED_SERVICES" =~ "n-cpu" ]]; then - NOVA_USER=$(get_id keystone user-create \ - --name=nova \ - --pass="$SERVICE_PASSWORD" \ - --tenant_id $SERVICE_TENANT \ - --email=nova@example.com) - keystone user-role-add \ - --tenant_id $SERVICE_TENANT \ - --user_id $NOVA_USER \ - --role_id $ADMIN_ROLE - if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then - NOVA_SERVICE=$(get_id keystone service-create \ - --name=nova \ - --type=compute \ - --description="Nova Compute Service") - keystone endpoint-create \ - --region RegionOne \ - --service_id $NOVA_SERVICE \ - --publicurl "http://$SERVICE_HOST:\$(compute_port)s/v2/\$(tenant_id)s" \ - --adminurl "http://$SERVICE_HOST:\$(compute_port)s/v2/\$(tenant_id)s" \ - --internalurl "http://$SERVICE_HOST:\$(compute_port)s/v2/\$(tenant_id)s" - fi +if [[ "$ENABLED_SERVICES" =~ "n-api" ]] && [[ "$ENABLED_SERVICES" =~ "swift" ]]; then + NOVA_USER=$(keystone user-list | awk "/ nova / { print \$2 }") # Nova needs ResellerAdmin role to download images when accessing - # swift through the s3 api. The admin role in swift allows a user - # to act as an admin for their tenant, but ResellerAdmin is needed - # for a user to act as any tenant. The name of this role is also - # configurable in swift-proxy.conf - RESELLER_ROLE=$(get_id keystone role-create --name=ResellerAdmin) + # swift through the s3 api. keystone user-role-add \ --tenant_id $SERVICE_TENANT \ --user_id $NOVA_USER \ --role_id $RESELLER_ROLE fi -# Volume -if [[ "$ENABLED_SERVICES" =~ "n-vol" ]]; then - if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then - VOLUME_SERVICE=$(get_id keystone service-create \ - --name=volume \ - --type=volume \ - --description="Volume Service") - keystone endpoint-create \ - --region RegionOne \ - --service_id $VOLUME_SERVICE \ - --publicurl "http://$SERVICE_HOST:8776/v1/\$(tenant_id)s" \ - --adminurl "http://$SERVICE_HOST:8776/v1/\$(tenant_id)s" \ - --internalurl "http://$SERVICE_HOST:8776/v1/\$(tenant_id)s" - fi -fi - # Heat if [[ "$ENABLED_SERVICES" =~ "heat" ]]; then HEAT_USER=$(get_id keystone user-create --name=heat \ @@ -165,6 +71,8 @@ if [[ "$ENABLED_SERVICES" =~ "heat" ]]; then keystone user-role-add --tenant_id $SERVICE_TENANT \ --user_id $HEAT_USER \ --role_id $ADMIN_ROLE + # heat_stack_user role is for users created by Heat + keystone role-create --name heat_stack_user if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then HEAT_CFN_SERVICE=$(get_id keystone service-create \ --name=heat-cfn \ @@ -239,30 +147,6 @@ if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then fi fi -if [[ "$ENABLED_SERVICES" =~ "q-svc" ]]; then - QUANTUM_USER=$(get_id keystone user-create \ - --name=quantum \ - --pass="$SERVICE_PASSWORD" \ - --tenant_id $SERVICE_TENANT \ - --email=quantum@example.com) - keystone user-role-add \ - --tenant_id $SERVICE_TENANT \ - --user_id $QUANTUM_USER \ - --role_id $ADMIN_ROLE - if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then - QUANTUM_SERVICE=$(get_id keystone service-create \ - --name=quantum \ - --type=network \ - --description="Quantum Service") - keystone endpoint-create \ - --region RegionOne \ - --service_id $QUANTUM_SERVICE \ - --publicurl "http://$SERVICE_HOST:9696/" \ - --adminurl "http://$SERVICE_HOST:9696/" \ - --internalurl "http://$SERVICE_HOST:9696/" - fi -fi - if [[ "$ENABLED_SERVICES" =~ "ceilometer" ]]; then CEILOMETER_USER=$(get_id keystone user-create --name=ceilometer \ --pass="$SERVICE_PASSWORD" \ @@ -271,6 +155,10 @@ if [[ "$ENABLED_SERVICES" =~ "ceilometer" ]]; then keystone user-role-add --tenant_id $SERVICE_TENANT \ --user_id $CEILOMETER_USER \ --role_id $ADMIN_ROLE + # Ceilometer needs ResellerAdmin role to access swift account stats. + keystone user-role-add --tenant_id $SERVICE_TENANT \ + --user_id $CEILOMETER_USER \ + --role_id $RESELLER_ROLE if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then CEILOMETER_SERVICE=$(get_id keystone service-create \ --name=ceilometer \ @@ -331,25 +219,3 @@ if [[ "$ENABLED_SERVICES" =~ "tempest" ]]; then --user_id $ALT_DEMO_USER \ --role_id $MEMBER_ROLE fi - -if [[ "$ENABLED_SERVICES" =~ "c-api" ]]; then - CINDER_USER=$(get_id keystone user-create --name=cinder \ - --pass="$SERVICE_PASSWORD" \ - --tenant_id $SERVICE_TENANT \ - --email=cinder@example.com) - keystone user-role-add --tenant_id $SERVICE_TENANT \ - --user_id $CINDER_USER \ - --role_id $ADMIN_ROLE - if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then - CINDER_SERVICE=$(get_id keystone service-create \ - --name=cinder \ - --type=volume \ - --description="Cinder Service") - keystone endpoint-create \ - --region RegionOne \ - --service_id $CINDER_SERVICE \ - --publicurl "http://$SERVICE_HOST:8776/v1/\$(tenant_id)s" \ - --adminurl "http://$SERVICE_HOST:8776/v1/\$(tenant_id)s" \ - --internalurl "http://$SERVICE_HOST:8776/v1/\$(tenant_id)s" - fi -fi diff --git a/files/ldap/manager.ldif.in b/files/ldap/manager.ldif.in new file mode 100644 index 00000000..e522150f --- /dev/null +++ b/files/ldap/manager.ldif.in @@ -0,0 +1,10 @@ +dn: olcDatabase={${LDAP_OLCDB_NUMBER}}hdb,cn=config +changetype: modify +replace: olcSuffix +olcSuffix: dc=openstack,dc=org +- +replace: olcRootDN +olcRootDN: dc=Manager,dc=openstack,dc=org +- +${LDAP_ROOTPW_COMMAND}: olcRootPW +olcRootPW: ${SLAPPASS} diff --git a/files/ldap/openstack.ldif b/files/ldap/openstack.ldif new file mode 100644 index 00000000..287fda45 --- /dev/null +++ b/files/ldap/openstack.ldif @@ -0,0 +1,21 @@ +dn: dc=openstack,dc=org +dc: openstack +objectClass: dcObject +objectClass: organizationalUnit +ou: openstack + +dn: ou=Groups,dc=openstack,dc=org +objectClass: organizationalUnit +ou: Groups + +dn: ou=Users,dc=openstack,dc=org +objectClass: organizationalUnit +ou: Users + +dn: ou=Roles,dc=openstack,dc=org +objectClass: organizationalUnit +ou: Roles + +dn: ou=Projects,dc=openstack,dc=org +objectClass: organizationalUnit +ou: Projects diff --git a/files/rpms-suse/ceilometer-collector b/files/rpms-suse/ceilometer-collector new file mode 100644 index 00000000..c76454fd --- /dev/null +++ b/files/rpms-suse/ceilometer-collector @@ -0,0 +1,4 @@ +# Not available in openSUSE main repositories, but can be fetched from OBS +# (devel:languages:python and server:database projects) +mongodb +python-pymongo diff --git a/files/rpms-suse/cinder b/files/rpms-suse/cinder new file mode 100644 index 00000000..e5b47274 --- /dev/null +++ b/files/rpms-suse/cinder @@ -0,0 +1,2 @@ +lvm2 +tgt diff --git a/files/rpms-suse/general b/files/rpms-suse/general new file mode 100644 index 00000000..8ed74ec0 --- /dev/null +++ b/files/rpms-suse/general @@ -0,0 +1,23 @@ +bridge-utils +curl +euca2ools +git-core +iputils +openssh +psmisc +python-cmd2 # dist:opensuse-12.3 +python-netaddr +python-pep8 +python-pip +python-pylint +python-unittest2 +python-virtualenv +screen +tar +tcpdump +unzip +vim-enhanced +wget + +findutils-locate # useful when debugging +lsof # useful when debugging diff --git a/files/rpms-suse/glance b/files/rpms-suse/glance new file mode 100644 index 00000000..dd68ac08 --- /dev/null +++ b/files/rpms-suse/glance @@ -0,0 +1,12 @@ +gcc +libxml2-devel +python-PasteDeploy +python-Routes +python-SQLAlchemy +python-argparse +python-devel +python-eventlet +python-greenlet +python-iso8601 +python-wsgiref +python-xattr diff --git a/files/rpms-suse/horizon b/files/rpms-suse/horizon new file mode 100644 index 00000000..7e46ffe0 --- /dev/null +++ b/files/rpms-suse/horizon @@ -0,0 +1,23 @@ +apache2 # NOPRIME +apache2-mod_wsgi # NOPRIME +nodejs +python-CherryPy # why? (coming from apts) +python-Paste +python-PasteDeploy +python-Routes +python-Sphinx +python-SQLAlchemy +python-WebOb +python-anyjson +python-beautifulsoup +python-coverage +python-dateutil +python-eventlet +python-kombu +python-mox +python-netaddr +python-nose +python-pep8 +python-pylint +python-sqlalchemy-migrate +python-xattr diff --git a/files/rpms-suse/keystone b/files/rpms-suse/keystone new file mode 100644 index 00000000..b3c876ad --- /dev/null +++ b/files/rpms-suse/keystone @@ -0,0 +1,17 @@ +cyrus-sasl-devel +openldap2-devel +python-Paste +python-PasteDeploy +python-PasteScript +python-Routes +python-SQLAlchemy +python-WebOb +python-devel +python-distribute +python-setuptools # instead of python-distribute; dist:sle11sp2 +python-greenlet +python-lxml +python-mysql +python-py-bcrypt +python-pysqlite +sqlite3 diff --git a/files/rpms-suse/n-api b/files/rpms-suse/n-api new file mode 100644 index 00000000..ad943ffd --- /dev/null +++ b/files/rpms-suse/n-api @@ -0,0 +1,2 @@ +gcc # temporary because this pulls in glance to get the client without running the glance prereqs +python-dateutil diff --git a/files/rpms-suse/n-cpu b/files/rpms-suse/n-cpu new file mode 100644 index 00000000..7040b843 --- /dev/null +++ b/files/rpms-suse/n-cpu @@ -0,0 +1,6 @@ +# Stuff for diablo volumes +genisoimage +lvm2 +open-iscsi +sysfsutils +sg3_utils diff --git a/files/rpms-suse/n-novnc b/files/rpms-suse/n-novnc new file mode 100644 index 00000000..c8722b9f --- /dev/null +++ b/files/rpms-suse/n-novnc @@ -0,0 +1 @@ +python-numpy diff --git a/files/rpms-suse/n-vol b/files/rpms-suse/n-vol new file mode 100644 index 00000000..e5b47274 --- /dev/null +++ b/files/rpms-suse/n-vol @@ -0,0 +1,2 @@ +lvm2 +tgt diff --git a/files/rpms-suse/nova b/files/rpms-suse/nova new file mode 100644 index 00000000..03067162 --- /dev/null +++ b/files/rpms-suse/nova @@ -0,0 +1,50 @@ +curl +# Note: we need to package dhcp_release in dnsmasq! +dnsmasq +ebtables +gawk +iptables +iputils +kpartx +kvm +# qemu as fallback if kvm cannot be used +qemu +libvirt # NOPRIME +libvirt-python +libxml2-python +mysql-community-server # NOPRIME +parted +python-M2Crypto +python-m2crypto # dist:sle11sp2 +python-Paste +python-PasteDeploy +python-Routes +python-SQLAlchemy +python-Tempita +python-boto +python-carrot +python-cheetah +python-eventlet +python-feedparser +python-greenlet +python-iso8601 +python-kombu +python-lockfile +python-lxml # needed for glance which is needed for nova --- this shouldn't be here +python-mox +python-mysql +python-netaddr +python-paramiko +python-python-gflags +python-sqlalchemy-migrate +python-suds +python-xattr # needed for glance which is needed for nova --- this shouldn't be here +rabbitmq-server # NOPRIME +socat +sqlite3 +sudo +vlan + +# FIXME: qpid is not part of openSUSE, those names are tentative +python-qpid # NOPRIME +qpidd # NOPRIME diff --git a/files/rpms-suse/postgresql b/files/rpms-suse/postgresql new file mode 100644 index 00000000..bf19d397 --- /dev/null +++ b/files/rpms-suse/postgresql @@ -0,0 +1 @@ +python-psycopg2 diff --git a/files/rpms-suse/quantum b/files/rpms-suse/quantum new file mode 100644 index 00000000..068c15c2 --- /dev/null +++ b/files/rpms-suse/quantum @@ -0,0 +1,27 @@ +# Note: we need to package dhcp_release in dnsmasq! +dnsmasq +ebtables +iptables +iputils +mysql-community-server # NOPRIME +python-boto +python-eventlet +python-greenlet +python-iso8601 +python-kombu +python-mysql +python-netaddr +python-Paste +python-PasteDeploy +python-pyudev +python-Routes +python-SQLAlchemy +python-suds +rabbitmq-server # NOPRIME +sqlite3 +sudo +vlan + +# FIXME: qpid is not part of openSUSE, those names are tentative +python-qpid # NOPRIME +qpidd # NOPRIME diff --git a/files/rpms-suse/ryu b/files/rpms-suse/ryu new file mode 100644 index 00000000..763fd24c --- /dev/null +++ b/files/rpms-suse/ryu @@ -0,0 +1,5 @@ +python-distribute +python-setuptools # instead of python-distribute; dist:sle11sp2 +python-Sphinx +python-gevent +python-python-gflags diff --git a/files/rpms-suse/swift b/files/rpms-suse/swift new file mode 100644 index 00000000..db379bbc --- /dev/null +++ b/files/rpms-suse/swift @@ -0,0 +1,19 @@ +curl +gcc +memcached +python-PasteDeploy +python-WebOb +python-configobj +python-coverage +python-devel +python-distribute +python-setuptools # instead of python-distribute; dist:sle11sp2 +python-eventlet +python-greenlet +python-netifaces +python-nose +python-simplejson +python-xattr +sqlite3 +xfsprogs +xinetd diff --git a/files/rpms/ldap b/files/rpms/ldap new file mode 100644 index 00000000..2f7ab5de --- /dev/null +++ b/files/rpms/ldap @@ -0,0 +1,3 @@ +openldap-servers +openldap-clients +python-ldap diff --git a/files/rpms/n-cpu b/files/rpms/n-cpu index f7054e82..149672ac 100644 --- a/files/rpms/n-cpu +++ b/files/rpms/n-cpu @@ -2,3 +2,5 @@ iscsi-initiator-utils lvm2 genisoimage +sysfsutils +sg3_utils diff --git a/files/rpms/n-spice b/files/rpms/n-spice new file mode 100644 index 00000000..24ce15ab --- /dev/null +++ b/files/rpms/n-spice @@ -0,0 +1 @@ +numpy diff --git a/files/rpms/nova b/files/rpms/nova index 88ad8c31..568ee7f5 100644 --- a/files/rpms/nova +++ b/files/rpms/nova @@ -10,6 +10,7 @@ kvm libvirt-bin # NOPRIME libvirt-python libxml2-python +numpy # needed by websockify for spice console m2crypto mysql-server # NOPRIME parted diff --git a/files/rpms/ryu b/files/rpms/ryu index 1e8f2d2b..4a4fc523 100644 --- a/files/rpms/ryu +++ b/files/rpms/ryu @@ -1,4 +1,5 @@ python-setuptools python-gevent python-gflags +python-netifaces python-sphinx diff --git a/files/rpms/swift b/files/rpms/swift index c9d49e92..ce41ceb8 100644 --- a/files/rpms/swift +++ b/files/rpms/swift @@ -1,6 +1,6 @@ curl gcc -memcached # NOPRIME +memcached python-configobj python-coverage python-devel diff --git a/files/swift/rsyncd.conf b/files/swift/rsyncd.conf index 4e0dcbf9..c670531b 100644 --- a/files/swift/rsyncd.conf +++ b/files/swift/rsyncd.conf @@ -1,79 +1,79 @@ uid = %USER% gid = %GROUP% -log file = /var/log/rsyncd.log -pid file = /var/run/rsyncd.pid +log file = %SWIFT_DATA_DIR%/logs/rsyncd.log +pid file = %SWIFT_DATA_DIR%/run/rsyncd.pid address = 127.0.0.1 [account6012] max connections = 25 path = %SWIFT_DATA_DIR%/1/node/ read only = false -lock file = /var/lock/account6012.lock +lock file = %SWIFT_DATA_DIR%/run/account6012.lock [account6022] max connections = 25 path = %SWIFT_DATA_DIR%/2/node/ read only = false -lock file = /var/lock/account6022.lock +lock file = %SWIFT_DATA_DIR%/run/account6022.lock [account6032] max connections = 25 path = %SWIFT_DATA_DIR%/3/node/ read only = false -lock file = /var/lock/account6032.lock +lock file = %SWIFT_DATA_DIR%/run/account6032.lock [account6042] max connections = 25 path = %SWIFT_DATA_DIR%/4/node/ read only = false -lock file = /var/lock/account6042.lock +lock file = %SWIFT_DATA_DIR%/run/account6042.lock [container6011] max connections = 25 path = %SWIFT_DATA_DIR%/1/node/ read only = false -lock file = /var/lock/container6011.lock +lock file = %SWIFT_DATA_DIR%/run/container6011.lock [container6021] max connections = 25 path = %SWIFT_DATA_DIR%/2/node/ read only = false -lock file = /var/lock/container6021.lock +lock file = %SWIFT_DATA_DIR%/run/container6021.lock [container6031] max connections = 25 path = %SWIFT_DATA_DIR%/3/node/ read only = false -lock file = /var/lock/container6031.lock +lock file = %SWIFT_DATA_DIR%/run/container6031.lock [container6041] max connections = 25 path = %SWIFT_DATA_DIR%/4/node/ read only = false -lock file = /var/lock/container6041.lock +lock file = %SWIFT_DATA_DIR%/run/container6041.lock [object6010] max connections = 25 path = %SWIFT_DATA_DIR%/1/node/ read only = false -lock file = /var/lock/object6010.lock +lock file = %SWIFT_DATA_DIR%/run/object6010.lock [object6020] max connections = 25 path = %SWIFT_DATA_DIR%/2/node/ read only = false -lock file = /var/lock/object6020.lock +lock file = %SWIFT_DATA_DIR%/run/object6020.lock [object6030] max connections = 25 path = %SWIFT_DATA_DIR%/3/node/ read only = false -lock file = /var/lock/object6030.lock +lock file = %SWIFT_DATA_DIR%/run/object6030.lock [object6040] max connections = 25 path = %SWIFT_DATA_DIR%/4/node/ read only = false -lock file = /var/lock/object6040.lock +lock file = %SWIFT_DATA_DIR%/run/object6040.lock diff --git a/functions b/functions index c7f65dbd..ae63436a 100644 --- a/functions +++ b/functions @@ -73,7 +73,6 @@ function die_if_not_set() { set +o xtrace local evar=$1; shift if ! is_set $evar || [ $exitcode != 0 ]; then - set +o xtrace echo $@ exit -1 fi @@ -81,6 +80,27 @@ function die_if_not_set() { } +# HTTP and HTTPS proxy servers are supported via the usual environment variables [1] +# ``http_proxy``, ``https_proxy`` and ``no_proxy``. They can be set in +# ``localrc`` or on the command line if necessary:: +# +# [1] http://www.w3.org/Daemon/User/Proxies/ProxyClients.html +# +# http_proxy=http://proxy.example.com:3128/ no_proxy=repo.example.net ./stack.sh + +function export_proxy_variables() { + if [[ -n "$http_proxy" ]]; then + export http_proxy=$http_proxy + fi + if [[ -n "$https_proxy" ]]; then + export https_proxy=$https_proxy + fi + if [[ -n "$no_proxy" ]]; then + export no_proxy=$no_proxy + fi +} + + # Grab a numbered field from python prettytable output # Fields are numbered starting with 1 # Reverse syntax is supported: -1 is the last field, -2 is second to last, etc. @@ -155,6 +175,10 @@ function get_packages() { if [[ ! $file_to_parse =~ keystone ]]; then file_to_parse="${file_to_parse} keystone" fi + elif [[ $service == q-* ]]; then + if [[ ! $file_to_parse =~ quantum ]]; then + file_to_parse="${file_to_parse} quantum" + fi fi done @@ -221,10 +245,16 @@ GetOSVersion() { os_VENDOR=$(lsb_release -i -s) os_RELEASE=$(lsb_release -r -s) os_UPDATE="" + os_PACKAGE="rpm" if [[ "Debian,Ubuntu" =~ $os_VENDOR ]]; then os_PACKAGE="deb" - else - os_PACKAGE="rpm" + elif [[ "SUSE LINUX" =~ $os_VENDOR ]]; then + lsb_release -d -s | grep -q openSUSE + if [[ $? -eq 0 ]]; then + os_VENDOR="openSUSE" + fi + elif [[ $os_VENDOR =~ Red.*Hat ]]; then + os_VENDOR="Red Hat" fi os_CODENAME=$(lsb_release -c -s) elif [[ -r /etc/redhat-release ]]; then @@ -246,6 +276,23 @@ GetOSVersion() { os_VENDOR="" done os_PACKAGE="rpm" + elif [[ -r /etc/SuSE-release ]]; then + for r in openSUSE "SUSE Linux"; do + if [[ "$r" = "SUSE Linux" ]]; then + os_VENDOR="SUSE LINUX" + else + os_VENDOR=$r + fi + + if [[ -n "`grep \"$r\" /etc/SuSE-release`" ]]; then + os_CODENAME=`grep "CODENAME = " /etc/SuSE-release | sed 's:.* = ::g'` + os_RELEASE=`grep "VERSION = " /etc/SuSE-release | sed 's:.* = ::g'` + os_UPDATE=`grep "PATCHLEVEL = " /etc/SuSE-release | sed 's:.* = ::g'` + break + fi + os_VENDOR="" + done + os_PACKAGE="rpm" fi export os_VENDOR os_RELEASE os_UPDATE os_PACKAGE os_CODENAME } @@ -297,6 +344,15 @@ function GetDistro() { elif [[ "$os_VENDOR" =~ (Fedora) ]]; then # For Fedora, just use 'f' and the release DISTRO="f$os_RELEASE" + elif [[ "$os_VENDOR" =~ (openSUSE) ]]; then + DISTRO="opensuse-$os_RELEASE" + elif [[ "$os_VENDOR" =~ (SUSE LINUX) ]]; then + # For SLE, also use the service pack + if [[ -z "$os_UPDATE" ]]; then + DISTRO="sle${os_RELEASE}" + else + DISTRO="sle${os_RELEASE}sp${os_UPDATE}" + fi else # Catch-all for now is Vendor + Release + Update DISTRO="$os_VENDOR-$os_RELEASE.$os_UPDATE" @@ -305,6 +361,60 @@ function GetDistro() { } +# Determine if current distribution is an Ubuntu-based distribution. +# It will also detect non-Ubuntu but Debian-based distros; this is not an issue +# since Debian and Ubuntu should be compatible. +# is_ubuntu +function is_ubuntu { + if [[ -z "$os_PACKAGE" ]]; then + GetOSVersion + fi + + [ "$os_PACKAGE" = "deb" ] +} + + +# Determine if current distribution is a Fedora-based distribution +# (Fedora, RHEL, CentOS). +# is_fedora +function is_fedora { + if [[ -z "$os_VENDOR" ]]; then + GetOSVersion + fi + + [ "$os_VENDOR" = "Fedora" ] || [ "$os_VENDOR" = "Red Hat" ] || [ "$os_VENDOR" = "CentOS" ] +} + + +# Determine if current distribution is a SUSE-based distribution +# (openSUSE, SLE). +# is_suse +function is_suse { + if [[ -z "$os_VENDOR" ]]; then + GetOSVersion + fi + + [ "$os_VENDOR" = "openSUSE" ] || [ "$os_VENDOR" = "SUSE LINUX" ] +} + + +# Exit after outputting a message about the distribution not being supported. +# exit_distro_not_supported [optional-string-telling-what-is-missing] +function exit_distro_not_supported { + if [[ -z "$DISTRO" ]]; then + GetDistro + fi + + if [ $# -gt 0 ]; then + echo "Support for $DISTRO is incomplete: no support for $@" + else + echo "Support for $DISTRO is incomplete." + fi + + exit 1 +} + + # git clone only if directory doesn't exist already. Since ``DEST`` might not # be owned by the installation user, we create the directory and change the # ownership to the proper user. @@ -370,7 +480,7 @@ function inicomment() { local file=$1 local section=$2 local option=$3 - sed -i -e "/^\[$section\]/,/^\[.*\]/ s|^\($option[ \t]*=.*$\)|#\1|" $file + sed -i -e "/^\[$section\]/,/^\[.*\]/ s|^\($option[ \t]*=.*$\)|#\1|" "$file" } # Uncomment an option in an INI file @@ -379,7 +489,7 @@ function iniuncomment() { local file=$1 local section=$2 local option=$3 - sed -i -e "/^\[$section\]/,/^\[.*\]/ s|[^ \t]*#[ \t]*\($option[ \t]*=.*$\)|\1|" $file + sed -i -e "/^\[$section\]/,/^\[.*\]/ s|[^ \t]*#[ \t]*\($option[ \t]*=.*$\)|\1|" "$file" } @@ -390,10 +500,20 @@ function iniget() { local section=$2 local option=$3 local line - line=$(sed -ne "/^\[$section\]/,/^\[.*\]/ { /^$option[ \t]*=/ p; }" $file) + line=$(sed -ne "/^\[$section\]/,/^\[.*\]/ { /^$option[ \t]*=/ p; }" "$file") echo ${line#*=} } +# Determinate is the given option present in the INI file +# ini_has_option config-file section option +function ini_has_option() { + local file=$1 + local section=$2 + local option=$3 + local line + line=$(sed -ne "/^\[$section\]/,/^\[.*\]/ { /^$option[ \t]*=/ p; }" "$file") + [ -n "$line" ] +} # Set an option in an INI file # iniset config-file section option value @@ -402,18 +522,18 @@ function iniset() { local section=$2 local option=$3 local value=$4 - if ! grep -q "^\[$section\]" $file; then + if ! grep -q "^\[$section\]" "$file"; then # Add section at the end - echo -e "\n[$section]" >>$file + echo -e "\n[$section]" >>"$file" fi - if [[ -z "$(iniget $file $section $option)" ]]; then + if ! ini_has_option "$file" "$section" "$option"; then # Add it sed -i -e "/^\[$section\]/ a\\ $option = $value -" $file +" "$file" else # Replace it - sed -i -e "/^\[$section\]/,/^\[.*\]/ s|^\($option[ \t]*=[ \t]*\).*$|\1$value|" $file + sed -i -e "/^\[$section\]/,/^\[.*\]/ s|^\($option[ \t]*=[ \t]*\).*$|\1$value|" "$file" fi } @@ -462,7 +582,7 @@ function _cleanup_service_list () { # ``ENABLED_SERVICES`` list, if they are not already present. # # For example: -# enable_service n-vol +# enable_service qpid # # This function does not know about the special cases # for nova, glance, and quantum built into is_service_enabled(). @@ -484,7 +604,7 @@ function enable_service() { # ``ENABLED_SERVICES`` list, if they are present. # # For example: -# disable_service n-vol +# disable_service rabbit # # This function does not know about the special cases # for nova, glance, and quantum built into is_service_enabled(). @@ -513,8 +633,8 @@ function disable_all_services() { # Remove all services starting with '-'. For example, to install all default -# services except nova-volume (n-vol) set in ``localrc``: -# ENABLED_SERVICES+=",-n-vol" +# services except rabbit (rabbit) set in ``localrc``: +# ENABLED_SERVICES+=",-rabbit" # Uses global ``ENABLED_SERVICES`` # disable_negated_services function disable_negated_services() { @@ -532,17 +652,17 @@ function disable_negated_services() { # Distro-agnostic package installer # install_package package [package ...] function install_package() { - if [[ -z "$os_PACKAGE" ]]; then - GetOSVersion - fi - - if [[ "$os_PACKAGE" = "deb" ]]; then + if is_ubuntu; then [[ "$NO_UPDATE_REPOS" = "True" ]] || apt_get update NO_UPDATE_REPOS=True apt_get install "$@" - else + elif is_fedora; then yum_install "$@" + elif is_suse; then + zypper_install "$@" + else + exit_distro_not_supported "installing packages" fi } @@ -557,12 +677,13 @@ function is_package_installed() { if [[ -z "$os_PACKAGE" ]]; then GetOSVersion fi + if [[ "$os_PACKAGE" = "deb" ]]; then dpkg -l "$@" > /dev/null - return $? - else + elif [[ "$os_PACKAGE" = "rpm" ]]; then rpm --quiet -q "$@" - return $? + else + exit_distro_not_supported "finding if a package is installed" fi } @@ -571,10 +692,7 @@ function is_package_installed() { # is_set env-var function is_set() { local var=\$"$1" - if eval "[ -z \"$var\" ]"; then - return 1 - fi - return 0 + eval "[ -n \"$var\" ]" # For ex.: sh -c "[ -n \"$var\" ]" would be better, but several exercises depends on this } @@ -593,11 +711,7 @@ function pip_install { SUDO_PIP="env" else SUDO_PIP="sudo" - if [[ "$os_PACKAGE" = "deb" ]]; then - CMD_PIP=/usr/bin/pip - else - CMD_PIP=/usr/bin/pip-python - fi + CMD_PIP=$(get_pip_command) fi if [[ "$PIP_USE_MIRRORS" != "False" ]]; then PIP_MIRROR_OPT="--use-mirrors" @@ -613,10 +727,7 @@ function pip_install { # Service wrapper to restart services # restart_service service-name function restart_service() { - if [[ -z "$os_PACKAGE" ]]; then - GetOSVersion - fi - if [[ "$os_PACKAGE" = "deb" ]]; then + if is_ubuntu; then sudo /usr/sbin/service $1 restart else sudo /sbin/service $1 restart @@ -627,24 +738,33 @@ function restart_service() { # Helper to launch a service in a named screen # screen_it service "command-line" function screen_it { - NL=`echo -ne '\015'` SCREEN_NAME=${SCREEN_NAME:-stack} + SERVICE_DIR=${SERVICE_DIR:-${DEST}/status} + SCREEN_DEV=`trueorfalse True $SCREEN_DEV` + if is_service_enabled $1; then # Append the service to the screen rc file screen_rc "$1" "$2" screen -S $SCREEN_NAME -X screen -t $1 - # sleep to allow bash to be ready to be send the command - we are - # creating a new window in screen and then sends characters, so if - # bash isn't running by the time we send the command, nothing happens - sleep 1.5 if [[ -n ${SCREEN_LOGDIR} ]]; then screen -S $SCREEN_NAME -p $1 -X logfile ${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log screen -S $SCREEN_NAME -p $1 -X log on ln -sf ${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log ${SCREEN_LOGDIR}/screen-${1}.log fi - screen -S $SCREEN_NAME -p $1 -X stuff "$2$NL" + + if [[ "$SCREEN_DEV" = "True" ]]; then + # sleep to allow bash to be ready to be send the command - we are + # creating a new window in screen and then sends characters, so if + # bash isn't running by the time we send the command, nothing happens + sleep 1.5 + + NL=`echo -ne '\015'` + screen -S $SCREEN_NAME -p $1 -X stuff "$2 || touch \"$SERVICE_DIR/$SCREEN_NAME/$1.failure\"$NL" + else + screen -S $SCREEN_NAME -p $1 -X exec /bin/bash -c "$2 || touch \"$SERVICE_DIR/$SCREEN_NAME/$1.failure\"" + fi fi } @@ -669,6 +789,47 @@ function screen_rc { fi } +# Helper to remove the *.failure files under $SERVICE_DIR/$SCREEN_NAME +# This is used for service_check when all the screen_it are called finished +# init_service_check +function init_service_check() { + SCREEN_NAME=${SCREEN_NAME:-stack} + SERVICE_DIR=${SERVICE_DIR:-${DEST}/status} + + if [[ ! -d "$SERVICE_DIR/$SCREEN_NAME" ]]; then + mkdir -p "$SERVICE_DIR/$SCREEN_NAME" + fi + + rm -f "$SERVICE_DIR/$SCREEN_NAME"/*.failure +} + +# Helper to get the status of each running service +# service_check +function service_check() { + local service + local failures + SCREEN_NAME=${SCREEN_NAME:-stack} + SERVICE_DIR=${SERVICE_DIR:-${DEST}/status} + + + if [[ ! -d "$SERVICE_DIR/$SCREEN_NAME" ]]; then + echo "No service status directory found" + return + fi + + # Check if there is any falure flag file under $SERVICE_DIR/$SCREEN_NAME + failures=`ls "$SERVICE_DIR/$SCREEN_NAME"/*.failure 2>/dev/null` + + for service in $failures; do + service=`basename $service` + service=${service::-8} + echo "Error: Service $service is not running" + done + + if [ -n "$failures" ]; then + echo "More details about the above errors can be found with screen, with ./rejoin-stack.sh" + fi +} # ``pip install`` the dependencies of the package before ``setup.py develop`` # so pip and not distutils processes the dependency chain @@ -698,10 +859,7 @@ function setup_develop() { # Service wrapper to start services # start_service service-name function start_service() { - if [[ -z "$os_PACKAGE" ]]; then - GetOSVersion - fi - if [[ "$os_PACKAGE" = "deb" ]]; then + if is_ubuntu; then sudo /usr/sbin/service $1 start else sudo /sbin/service $1 start @@ -712,10 +870,7 @@ function start_service() { # Service wrapper to stop services # stop_service service-name function stop_service() { - if [[ -z "$os_PACKAGE" ]]; then - GetOSVersion - fi - if [[ "$os_PACKAGE" = "deb" ]]; then + if is_ubuntu; then sudo /usr/sbin/service $1 stop else sudo /sbin/service $1 stop @@ -841,6 +996,20 @@ function upload_image() { fi } +# Set the database backend to use +# When called from stackrc/localrc DATABASE_BACKENDS has not been +# initialized yet, just save the configuration selection and call back later +# to validate it. +# $1 The name of the database backend to use (mysql, postgresql, ...) +function use_database { + if [[ -z "$DATABASE_BACKENDS" ]]; then + # The backends haven't initialized yet, just save the selection for now + DATABASE_TYPE=$1 + else + use_exclusive_service DATABASE_BACKENDS DATABASE_TYPE $1 + fi +} + # Toggle enable/disable_service for services that must run exclusive of each other # $1 The name of a variable containing a space-separated list of services # $2 The name of a variable in which to store the enabled service's name @@ -857,6 +1026,14 @@ function use_exclusive_service { return 0 } +# Wait for an HTTP server to start answering requests +# wait_for_service timeout url +function wait_for_service() { + local timeout=$1 + local url=$2 + timeout $timeout sh -c "while ! http_proxy= https_proxy= curl -s $url >/dev/null; do sleep 1; done" +} + # Wrapper for ``yum`` to set proxy environment variables # Uses globals ``OFFLINE``, ``*_proxy` # yum_install package [package ...] @@ -872,7 +1049,11 @@ function yum_install() { # ping check # Uses globals ``ENABLED_SERVICES`` function ping_check() { - _ping_check_novanet "$1" $2 $3 + if is_service_enabled quantum; then + _ping_check_quantum "$1" $2 $3 $4 + return + fi + _ping_check_novanet "$1" $2 $3 $4 } # ping check for nova @@ -881,19 +1062,39 @@ function _ping_check_novanet() { local from_net=$1 local ip=$2 local boot_timeout=$3 + local expected=${4:-"True"} + local check_command="" MULTI_HOST=`trueorfalse False $MULTI_HOST` if [[ "$MULTI_HOST" = "True" && "$from_net" = "$PRIVATE_NETWORK_NAME" ]]; then sleep $boot_timeout return fi - if ! timeout $boot_timeout sh -c "while ! ping -c1 -w1 $ip; do sleep 1; done"; then - echo "Couldn't ping server" + if [[ "$expected" = "True" ]]; then + check_command="while ! ping -c1 -w1 $ip; do sleep 1; done" + else + check_command="while ping -c1 -w1 $ip; do sleep 1; done" + fi + if ! timeout $boot_timeout sh -c "$check_command"; then + if [[ "$expected" = "True" ]]; then + echo "[Fail] Couldn't ping server" + else + echo "[Fail] Could ping server" + fi exit 1 fi } # ssh check + function ssh_check() { + if is_service_enabled quantum; then + _ssh_check_quantum "$1" $2 $3 $4 $5 + return + fi + _ssh_check_novanet "$1" $2 $3 $4 $5 +} + +function _ssh_check_novanet() { local NET_NAME=$1 local KEY_FILE=$2 local FLOATING_IP=$3 @@ -906,6 +1107,66 @@ function ssh_check() { fi } + +# zypper wrapper to set arguments correctly +# zypper_install package [package ...] +function zypper_install() { + [[ "$OFFLINE" = "True" ]] && return + local sudo="sudo" + [[ "$(id -u)" = "0" ]] && sudo="env" + $sudo http_proxy=$http_proxy https_proxy=$https_proxy \ + zypper --non-interactive install --auto-agree-with-licenses "$@" +} + + +# Add a user to a group. +# add_user_to_group user group +function add_user_to_group() { + local user=$1 + local group=$2 + + if [[ -z "$os_VENDOR" ]]; then + GetOSVersion + fi + + # SLE11 and openSUSE 12.2 don't have the usual usermod + if ! is_suse || [[ "$os_VENDOR" = "openSUSE" && "$os_RELEASE" != "12.2" ]]; then + sudo usermod -a -G "$group" "$user" + else + sudo usermod -A "$group" "$user" + fi +} + + +# Get the path to the direcotry where python executables are installed. +# get_python_exec_prefix +function get_python_exec_prefix() { + if is_fedora; then + echo "/usr/bin" + else + echo "/usr/local/bin" + fi +} + +# Get the location of the $module-rootwrap executables, where module is cinder +# or nova. +# get_rootwrap_location module +function get_rootwrap_location() { + local module=$1 + + echo "$(get_python_exec_prefix)/$module-rootwrap" +} + +# Get the path to the pip command. +# get_pip_command +function get_pip_command() { + if is_fedora; then + which pip-python + else + which pip + fi +} + # Restore xtrace $XTRACE diff --git a/lib/baremetal b/lib/baremetal new file mode 100644 index 00000000..26593867 --- /dev/null +++ b/lib/baremetal @@ -0,0 +1,435 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2012 Hewlett-Packard Development Company, L.P. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +# This file provides devstack with the environment and utilities to +# control nova-compute's baremetal driver. +# It sets reasonable defaults to run within a single host, +# using virtual machines in place of physical hardware. +# However, by changing just a few options, devstack+baremetal can in fact +# control physical hardware resources on the same network, if you know +# the MAC address(es) and IPMI credentials. +# +# At a minimum, to enable the baremetal driver, you must set these in loclarc: +# VIRT_DRIVER=baremetal +# ENABLED_SERVICES="$ENABLED_SERVICES,baremetal" +# +# +# We utilize diskimage-builder to create a ramdisk, and then +# baremetal driver uses that to push a disk image onto the node(s). +# +# Below we define various defaults which control the behavior of the +# baremetal compute service, and inform it of the hardware it will contorl. +# +# Below that, various functions are defined, which are called by devstack +# in the following order: +# +# before nova-cpu starts: +# - prepare_baremetal_toolchain +# - configure_baremetal_nova_dirs +# +# after nova and glance have started: +# - build_and_upload_baremetal_deploy_k_and_r $token +# - create_baremetal_flavor $BM_DEPLOY_KERNEL_ID $BM_DEPLOY_RAMDISK_ID +# - upload_baremetal_image $url $token +# - add_baremetal_node + + +# Save trace setting +XTRACE=$(set +o | grep xtrace) +set +o xtrace + +# Sub-driver settings +# ------------------- + +# sub-driver to use for kernel deployment +# - nova.virt.baremetal.pxe.PXE +# - nova.virt.baremetal.tilera.TILERA +BM_DRIVER=${BM_DRIVER:-nova.virt.baremetal.pxe.PXE} + +# sub-driver to use for remote power management +# - nova.virt.baremetal.fake.FakePowerManager, for manual power control +# - nova.virt.baremetal.ipmi.Ipmi, for remote IPMI +# - nova.virt.baremetal.tilera_pdu.Pdu, for TilePro hardware +BM_POWER_MANAGER=${BM_POWER_MANAGER:-nova.virt.baremetal.fake.FakePowerManager} + + +# These should be customized to your environment and hardware +# ----------------------------------------------------------- + +# whether to create a fake environment, eg. for devstack-gate +BM_USE_FAKE_ENV=`trueorfalse False $BM_USE_FAKE_ENV` + +# Extra options to pass to bm_poseur +# change the bridge name or IP: --bridge br99 --bridge-ip 192.0.2.1 +# change the virtualization type: --engine qemu +BM_POSEUR_EXTRA_OPTS=${BM_POSEUR_EXTRA_OPTS:-} + +# BM_DNSMASQ_IFACE should match FLAT_NETWORK_BRIDGE +if [ "$BM_USE_FAKE_ENV" ]; then + BM_DNSMASQ_IFACE=${BM_DNSMASQ_IFACE:-br99} + BM_DNSMASQ_RANGE=${BM_DNSMASQ_RANGE:-192.0.2.32,192.0.2.48} +else + BM_DNSMASQ_IFACE=${BM_DNSMASQ_IFACE:-eth0} + # if testing on a physical network, + # BM_DNSMASQ_RANGE must be changed to suit your network + BM_DNSMASQ_RANGE=${BM_DNSMASQ_RANGE:-} +fi + +# BM_DNSMASQ_DNS provide dns server to bootstrap clients +BM_DNSMASQ_DNS=${BM_DNSMASQ_DNS:-} + +# BM_FIRST_MAC *must* be set to the MAC address of the node you will boot. +# This is passed to dnsmasq along with the kernel/ramdisk to +# deploy via PXE. +BM_FIRST_MAC=${BM_FIRST_MAC:-} + +# BM_SECOND_MAC is only important if the host has >1 NIC. +BM_SECOND_MAC=${BM_SECOND_MAC:-} + +# Hostname for the baremetal nova-compute node, if not run on this host +BM_HOSTNAME=${BM_HOSTNAME:-$(hostname -f)} + +# BM_PM_* options are only necessary if BM_POWER_MANAGER=...IPMI +BM_PM_ADDR=${BM_PM_ADDR:-0.0.0.0} +BM_PM_USER=${BM_PM_USER:-user} +BM_PM_PASS=${BM_PM_PASS:-pass} + +# BM_FLAVOR_* options are arbitrary and not necessarily related to physical +# hardware capacity. These can be changed if you are testing +# BaremetalHostManager with multiple nodes and different flavors. +BM_CPU_ARCH=${BM_CPU_ARCH:-x86_64} +BM_FLAVOR_CPU=${BM_FLAVOR_CPU:-1} +BM_FLAVOR_RAM=${BM_FLAVOR_RAM:-1024} +BM_FLAVOR_ROOT_DISK=${BM_FLAVOR_ROOT_DISK:-10} +BM_FLAVOR_EPHEMERAL_DISK=${BM_FLAVOR_EPHEMERAL_DISK:-0} +BM_FLAVOR_SWAP=${BM_FLAVOR_SWAP:-1} +BM_FLAVOR_NAME=${BM_FLAVOR_NAME:-bm.small} +BM_FLAVOR_ID=${BM_FLAVOR_ID:-11} +BM_FLAVOR_ARCH=${BM_FLAVOR_ARCH:-$BM_CPU_ARCH} + + +# Below this, we set some path and filenames. +# Defaults are probably sufficient. +BM_IMAGE_BUILD_DIR=${BM_IMAGE_BUILD_DIR:-$DEST/diskimage-builder} +BM_POSEUR_DIR=${BM_POSEUR_DIR:-$DEST/bm_poseur} + +BM_HOST_CURRENT_KERNEL=$(uname -r) +BM_DEPLOY_RAMDISK=${BM_DEPLOY_RAMDISK:-bm-deploy-$BM_HOST_CURRENT_KERNEL-initrd} +BM_DEPLOY_KERNEL=${BM_DEPLOY_KERNEL:-bm-deploy-$BM_HOST_CURRENT_KERNEL-vmlinuz} + +# If you need to add any extra flavors to the deploy ramdisk image +# eg, specific network drivers, specify them here +BM_DEPLOY_FLAVOR=${BM_DEPLOY_FLAVOR:-} + +# set URL and version for google shell-in-a-box +BM_SHELL_IN_A_BOX=${BM_SHELL_IN_A_BOX:-http://shellinabox.googlecode.com/files/shellinabox-2.14.tar.gz} + + +# Functions +# --------- + +# Check if baremetal is properly enabled +# Returns false if VIRT_DRIVER is not baremetal, or if ENABLED_SERVICES +# does not contain "baremetal" +function is_baremetal() { + if [[ "$ENABLED_SERVICES" =~ 'baremetal' && "$VIRT_DRIVER" = 'baremetal' ]]; then + return 0 + fi + return 1 +} + +# Install diskimage-builder and shell-in-a-box +# so that we can build the deployment kernel & ramdisk +function prepare_baremetal_toolchain() { + git_clone $BM_IMAGE_BUILD_REPO $BM_IMAGE_BUILD_DIR $BM_IMAGE_BUILD_BRANCH + git_clone $BM_POSEUR_REPO $BM_POSEUR_DIR $BM_POSEUR_BRANCH + + local shellinabox_basename=$(basename $BM_SHELL_IN_A_BOX) + if [[ ! -e $DEST/$shellinabox_basename ]]; then + cd $DEST + wget $BM_SHELL_IN_A_BOX + fi + if [[ ! -d $DEST/${shellinabox_basename%%.tar.gz} ]]; then + cd $DEST + tar xzf $shellinabox_basename + fi + if [[ ! $(which shellinaboxd) ]]; then + cd $DEST/${shellinabox_basename%%.tar.gz} + ./configure + make + sudo make install + fi +} + +# set up virtualized environment for devstack-gate testing +function create_fake_baremetal_env() { + local bm_poseur="$BM_POSEUR_DIR/bm_poseur" + # TODO(deva): add support for >1 VM + sudo $bm_poseur $BM_POSEUR_EXTRA_OPTS create-bridge + sudo $bm_poseur $BM_POSEUR_EXTRA_OPTS create-vm + BM_FIRST_MAC=$(sudo $bm_poseur get-macs) + + # NOTE: there is currently a limitation in baremetal driver + # that requires second MAC even if it is not used. + # Passing a fake value allows this to work. + # TODO(deva): remove this after driver issue is fixed. + BM_SECOND_MAC='12:34:56:78:90:12' +} + +function cleanup_fake_baremetal_env() { + local bm_poseur="$BM_POSEUR_DIR/bm_poseur" + sudo $bm_poseur $BM_POSEUR_EXTRA_OPTS destroy-vm + sudo $bm_poseur $BM_POSEUR_EXTRA_OPTS destroy-bridge +} + +# prepare various directories needed by baremetal hypervisor +function configure_baremetal_nova_dirs() { + # ensure /tftpboot is prepared + sudo mkdir -p /tftpboot + sudo mkdir -p /tftpboot/pxelinux.cfg + sudo cp /usr/lib/syslinux/pxelinux.0 /tftpboot/ + sudo chown -R $STACK_USER:libvirtd /tftpboot + + # ensure $NOVA_STATE_PATH/baremetal is prepared + sudo mkdir -p $NOVA_STATE_PATH/baremetal + sudo mkdir -p $NOVA_STATE_PATH/baremetal/console + sudo mkdir -p $NOVA_STATE_PATH/baremetal/dnsmasq + sudo touch $NOVA_STATE_PATH/baremetal/dnsmasq/dnsmasq-dhcp.host + sudo chown -R $STACK_USER $NOVA_STATE_PATH/baremetal + + # ensure dnsmasq is installed but not running + # because baremetal driver will reconfigure and restart this as needed + is_package_installed dnsmasq || install_package dnsmasq + stop_service dnsmasq +} + +# build deploy kernel+ramdisk, then upload them to glance +# this function sets BM_DEPLOY_KERNEL_ID and BM_DEPLOY_RAMDISK_ID +function upload_baremetal_deploy() { + token=$1 + + if [ ! -e $TOP_DIR/files/$BM_DEPLOY_KERNEL -a -e /boot/vmlinuz-$BM_HOST_CURRENT_KERNEL ]; then + sudo cp /boot/vmlinuz-$BM_HOST_CURRENT_KERNEL $TOP_DIR/files/$BM_DEPLOY_KERNEL + sudo chmod a+r $TOP_DIR/files/$BM_DEPLOY_KERNEL + fi + if [ ! -e $TOP_DIR/files/$BM_DEPLOY_RAMDISK ]; then + $BM_IMAGE_BUILD_DIR/bin/ramdisk-image-create $BM_DEPLOY_FLAVOR deploy \ + -o $TOP_DIR/files/$BM_DEPLOY_RAMDISK -k $BM_HOST_CURRENT_KERNEL + fi + + # load them into glance + BM_DEPLOY_KERNEL_ID=$(glance \ + --os-auth-token $token \ + --os-image-url http://$GLANCE_HOSTPORT \ + image-create \ + --name $BM_DEPLOY_KERNEL \ + --public --disk-format=aki \ + < $TOP_DIR/files/$BM_DEPLOY_KERNEL | grep ' id ' | get_field 2) + BM_DEPLOY_RAMDISK_ID=$(glance \ + --os-auth-token $token \ + --os-image-url http://$GLANCE_HOSTPORT \ + image-create \ + --name $BM_DEPLOY_RAMDISK \ + --public --disk-format=ari \ + < $TOP_DIR/files/$BM_DEPLOY_RAMDISK | grep ' id ' | get_field 2) +} + +# create a basic baremetal flavor, associated with deploy kernel & ramdisk +# +# Usage: create_baremetal_flavor +function create_baremetal_flavor() { + aki=$1 + ari=$2 + nova flavor-create $BM_FLAVOR_NAME $BM_FLAVOR_ID \ + $BM_FLAVOR_RAM $BM_FLAVOR_ROOT_DISK $BM_FLAVOR_CPU + nova flavor-key $BM_FLAVOR_NAME set \ + cpu_arch=$BM_FLAVOR_ARCH \ + deploy_kernel_id=$aki \ + deploy_ramdisk_id=$ari +} + +# pull run-time kernel/ramdisk out of disk image and load into glance +# note that $file is currently expected to be in qcow2 format +# Sets KERNEL_ID and RAMDISK_ID +# +# Usage: extract_and_upload_k_and_r_from_image $token $file +function extract_and_upload_k_and_r_from_image() { + token=$1 + file=$2 + image_name=$(basename "$file" ".qcow2") + + # this call returns the file names as "$kernel,$ramdisk" + out=$($BM_IMAGE_BUILD_DIR/bin/disk-image-get-kernel \ + -x -d $TOP_DIR/files -o bm-deploy -i $file) + if [ $? -ne 0 ]; then + die "Failed to get kernel and ramdisk from $file" + fi + XTRACE=$(set +o | grep xtrace) + set +o xtrace + out=$(echo "$out" | tail -1) + $XTRACE + OUT_KERNEL=${out%%,*} + OUT_RAMDISK=${out##*,} + + # load them into glance + KERNEL_ID=$(glance \ + --os-auth-token $token \ + --os-image-url http://$GLANCE_HOSTPORT \ + image-create \ + --name $image_name-kernel \ + --public --disk-format=aki \ + < $TOP_DIR/files/$OUT_KERNEL | grep ' id ' | get_field 2) + RAMDISK_ID=$(glance \ + --os-auth-token $token \ + --os-image-url http://$GLANCE_HOSTPORT \ + image-create \ + --name $image_name-initrd \ + --public --disk-format=ari \ + < $TOP_DIR/files/$OUT_RAMDISK | grep ' id ' | get_field 2) +} + + +# Re-implementation of devstack's "upload_image" function +# +# Takes the same parameters, but has some peculiarities which made it +# easier to create a separate method, rather than complicate the logic +# of the existing function. +function upload_baremetal_image() { + local image_url=$1 + local token=$2 + + # Create a directory for the downloaded image tarballs. + mkdir -p $FILES/images + + # Downloads the image (uec ami+aki style), then extracts it. + IMAGE_FNAME=`basename "$image_url"` + if [[ ! -f $FILES/$IMAGE_FNAME || \ + "$(stat -c "%s" $FILES/$IMAGE_FNAME)" = "0" ]]; then + wget -c $image_url -O $FILES/$IMAGE_FNAME + if [[ $? -ne 0 ]]; then + echo "Not found: $image_url" + return + fi + fi + + local KERNEL="" + local RAMDISK="" + local DISK_FORMAT="" + local CONTAINER_FORMAT="" + case "$IMAGE_FNAME" in + *.tar.gz|*.tgz) + # Extract ami and aki files + [ "${IMAGE_FNAME%.tar.gz}" != "$IMAGE_FNAME" ] && + IMAGE_NAME="${IMAGE_FNAME%.tar.gz}" || + IMAGE_NAME="${IMAGE_FNAME%.tgz}" + xdir="$FILES/images/$IMAGE_NAME" + rm -Rf "$xdir"; + mkdir "$xdir" + tar -zxf $FILES/$IMAGE_FNAME -C "$xdir" + KERNEL=$(for f in "$xdir/"*-vmlinuz* "$xdir/"aki-*/image; do + [ -f "$f" ] && echo "$f" && break; done; true) + RAMDISK=$(for f in "$xdir/"*-initrd* "$xdir/"ari-*/image; do + [ -f "$f" ] && echo "$f" && break; done; true) + IMAGE=$(for f in "$xdir/"*.img "$xdir/"ami-*/image; do + [ -f "$f" ] && echo "$f" && break; done; true) + if [[ -z "$IMAGE_NAME" ]]; then + IMAGE_NAME=$(basename "$IMAGE" ".img") + fi + DISK_FORMAT=ami + CONTAINER_FORMAT=ami + ;; + *.qcow2) + IMAGE="$FILES/${IMAGE_FNAME}" + IMAGE_NAME=$(basename "$IMAGE" ".qcow2") + DISK_FORMAT=qcow2 + CONTAINER_FORMAT=bare + ;; + *) echo "Do not know what to do with $IMAGE_FNAME"; false;; + esac + + if [ "$CONTAINER_FORMAT" = "bare" ]; then + extract_and_upload_k_and_r_from_image $token $IMAGE + elif [ "$CONTAINER_FORMAT" = "ami" ]; then + KERNEL_ID=$(glance \ + --os-auth-token $token \ + --os-image-url http://$GLANCE_HOSTPORT \ + image-create \ + --name "$IMAGE_NAME-kernel" --public \ + --container-format aki \ + --disk-format aki < "$KERNEL" | grep ' id ' | get_field 2) + RAMDISK_ID=$(glance \ + --os-auth-token $token \ + --os-image-url http://$GLANCE_HOSTPORT \ + image-create \ + --name "$IMAGE_NAME-ramdisk" --public \ + --container-format ari \ + --disk-format ari < "$RAMDISK" | grep ' id ' | get_field 2) + else + # TODO(deva): add support for other image types + return + fi + + glance \ + --os-auth-token $token \ + --os-image-url http://$GLANCE_HOSTPORT \ + image-create \ + --name "${IMAGE_NAME%.img}" --public \ + --container-format $CONTAINER_FORMAT \ + --disk-format $DISK_FORMAT \ + ${KERNEL_ID:+--property kernel_id=$KERNEL_ID} \ + ${RAMDISK_ID:+--property ramdisk_id=$RAMDISK_ID} < "${IMAGE}" + + # override DEFAULT_IMAGE_NAME so that tempest can find the image + # that we just uploaded in glance + DEFAULT_IMAGE_NAME="${IMAGE_NAME%.img}" +} + +function clear_baremetal_of_all_nodes() { + list=$(nova baremetal-node-list | awk -F '| ' 'NR>3 {print $2}' ) + for node in $list + do + nova baremetal-node-delete $node + done +} + +# inform nova-baremetal about nodes, MACs, etc +# Defaults to using BM_FIRST_MAC and BM_SECOND_MAC if parameters not specified +# +# Usage: add_baremetal_node +function add_baremetal_node() { + mac_1=${1:-$BM_FIRST_MAC} + mac_2=${2:-$BM_SECOND_MAC} + + id=$(nova baremetal-node-create \ + --pm_address="$BM_PM_ADDR" \ + --pm_user="$BM_PM_USER" \ + --pm_password="$BM_PM_PASS" \ + "$BM_HOSTNAME" \ + "$BM_FLAVOR_CPU" \ + "$BM_FLAVOR_RAM" \ + "$BM_FLAVOR_ROOT_DISK" \ + "$mac_1" \ + | grep ' id ' | get_field 2 ) + [ $? -eq 0 ] || [ "$id" ] || die "Error adding baremetal node" + id2=$(nova baremetal-add-interface "$id" "$mac_2" ) + [ $? -eq 0 ] || [ "$id2" ] || die "Error adding interface to barmetal node $id" +} + + +# Restore xtrace +$XTRACE diff --git a/lib/ceilometer b/lib/ceilometer index 2b014b05..bc37d92b 100644 --- a/lib/ceilometer +++ b/lib/ceilometer @@ -1,23 +1,24 @@ # lib/ceilometer -# Install and start Ceilometer service +# Install and start **Ceilometer** service + # To enable, add the following to localrc # ENABLED_SERVICES+=ceilometer-acompute,ceilometer-acentral,ceilometer-collector,ceilometer-api - # Dependencies: # - functions # - OS_AUTH_URL for auth in api # - DEST set to the destination directory # - SERVICE_PASSWORD, SERVICE_TENANT_NAME for auth in api +# - STACK_USER service user # stack.sh # --------- -# install_XXX -# configure_XXX -# init_XXX -# start_XXX -# stop_XXX -# cleanup_XXX +# install_ceilometer +# configure_ceilometer +# init_ceilometer +# start_ceilometer +# stop_ceilometer +# cleanup_ceilometer # Save trace setting XTRACE=$(set +o | grep xtrace) @@ -27,17 +28,20 @@ set +o xtrace # Defaults # -------- -# set up default directories +# Set up default directories CEILOMETER_DIR=$DEST/ceilometer +CEILOMETERCLIENT_DIR=$DEST/python-ceilometerclient +CEILOMETER_CONF_DIR=/etc/ceilometer +CEILOMETER_CONF=$CEILOMETER_CONF_DIR/ceilometer.conf +CEILOMETER_API_LOG_DIR=/var/log/ceilometer-api +CEILOMETER_AUTH_CACHE_DIR=${CEILOMETER_AUTH_CACHE_DIR:-/var/cache/ceilometer} + # Support potential entry-points console scripts if [ -d $CEILOMETER_DIR/bin ] ; then CEILOMETER_BIN_DIR=$CEILOMETER_DIR/bin else - CEILOMETER_BIN_DIR=/usr/local/bin + CEILOMETER_BIN_DIR=$(get_python_exec_prefix) fi -CEILOMETER_CONF_DIR=/etc/ceilometer -CEILOMETER_CONF=$CEILOMETER_CONF_DIR/ceilometer.conf -CEILOMETER_API_LOG_DIR=/var/log/ceilometer-api # cleanup_ceilometer() - Remove residual data files, anything left over from previous # runs that a clean run would need to clean up @@ -45,6 +49,11 @@ function cleanup_ceilometer() { mongo ceilometer --eval "db.dropDatabase();" } +# configure_ceilometerclient() - Set config files, create data dirs, etc +function configure_ceilometerclient() { + setup_develop $CEILOMETERCLIENT_DIR +} + # configure_ceilometer() - Set config files, create data dirs, etc function configure_ceilometer() { setup_develop $CEILOMETER_DIR @@ -64,25 +73,47 @@ function configure_ceilometer() { # Install the policy file for the API server cp $CEILOMETER_DIR/etc/ceilometer/policy.json $CEILOMETER_CONF_DIR + cp $CEILOMETER_DIR/etc/ceilometer/pipeline.yaml $CEILOMETER_CONF_DIR iniset $CEILOMETER_CONF DEFAULT policy_file $CEILOMETER_CONF_DIR/policy.json + # the compute and central agents need these credentials in order to + # call out to the public nova and glance APIs + iniset $CEILOMETER_CONF DEFAULT os_username ceilometer + iniset $CEILOMETER_CONF DEFAULT os_password $SERVICE_PASSWORD + iniset $CEILOMETER_CONF DEFAULT os_tenant_name $SERVICE_TENANT_NAME + iniset $CEILOMETER_CONF DEFAULT os_auth_url $OS_AUTH_URL + iniset $CEILOMETER_CONF keystone_authtoken auth_protocol http iniset $CEILOMETER_CONF keystone_authtoken admin_user ceilometer iniset $CEILOMETER_CONF keystone_authtoken admin_password $SERVICE_PASSWORD iniset $CEILOMETER_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME + iniset $CEILOMETER_CONF keystone_authtoken signing_dir $CEILOMETER_AUTH_CACHE_DIR cleanup_ceilometer } +# init_ceilometer() - Initialize etc. +function init_ceilometer() { + # Create cache dir + sudo mkdir -p $CEILOMETER_AUTH_CACHE_DIR + sudo chown $STACK_USER $CEILOMETER_AUTH_CACHE_DIR + rm -f $CEILOMETER_AUTH_CACHE_DIR/* +} + # install_ceilometer() - Collect source and prepare function install_ceilometer() { git_clone $CEILOMETER_REPO $CEILOMETER_DIR $CEILOMETER_BRANCH } +# install_ceilometerclient() - Collect source and prepare +function install_ceilometerclient() { + git_clone $CEILOMETERCLIENT_REPO $CEILOMETERCLIENT_DIR $CEILOMETERCLIENT_BRANCH +} + # start_ceilometer() - Start running processes, including screen function start_ceilometer() { screen_it ceilometer-acompute "cd $CEILOMETER_DIR && sg libvirtd \"$CEILOMETER_BIN_DIR/ceilometer-agent-compute --config-file $CEILOMETER_CONF\"" - screen_it ceilometer-acentral "export OS_USERNAME=ceilometer OS_PASSWORD=$SERVICE_PASSWORD OS_TENANT_NAME=$SERVICE_TENANT_NAME OS_AUTH_URL=$OS_AUTH_URL && cd $CEILOMETER_DIR && $CEILOMETER_BIN_DIR/ceilometer-agent-central --config-file $CEILOMETER_CONF" + screen_it ceilometer-acentral "cd $CEILOMETER_DIR && $CEILOMETER_BIN_DIR/ceilometer-agent-central --config-file $CEILOMETER_CONF" screen_it ceilometer-collector "cd $CEILOMETER_DIR && $CEILOMETER_BIN_DIR/ceilometer-collector --config-file $CEILOMETER_CONF" screen_it ceilometer-api "cd $CEILOMETER_DIR && $CEILOMETER_BIN_DIR/ceilometer-api -d -v --log-dir=$CEILOMETER_API_LOG_DIR --config-file $CEILOMETER_CONF" } diff --git a/lib/cinder b/lib/cinder index c2cf15bf..4d1ab420 100644 --- a/lib/cinder +++ b/lib/cinder @@ -1,20 +1,20 @@ # lib/cinder -# Install and start Cinder volume service +# Install and start **Cinder** volume service # Dependencies: # - functions -# - DEST, DATA_DIR must be defined +# - DEST, DATA_DIR, STACK_USER must be defined # SERVICE_{TENANT_NAME|PASSWORD} must be defined # ``KEYSTONE_TOKEN_FORMAT`` must be defined # stack.sh # --------- -# install_XXX -# configure_XXX -# init_XXX -# start_XXX -# stop_XXX -# cleanup_XXX +# install_cinder +# configure_cinder +# init_cinder +# start_cinder +# stop_cinder +# cleanup_cinder # Save trace setting XTRACE=$(set +o | grep xtrace) @@ -24,30 +24,88 @@ set +o xtrace # Defaults # -------- +# set up default driver +CINDER_DRIVER=${CINDER_DRIVER:-default} + # set up default directories CINDER_DIR=$DEST/cinder CINDERCLIENT_DIR=$DEST/python-cinderclient CINDER_STATE_PATH=${CINDER_STATE_PATH:=$DATA_DIR/cinder} +CINDER_AUTH_CACHE_DIR=${CINDER_AUTH_CACHE_DIR:-/var/cache/cinder} + CINDER_CONF_DIR=/etc/cinder CINDER_CONF=$CINDER_CONF_DIR/cinder.conf -CINDER_AUTH_CACHE_DIR=${CINDER_AUTH_CACHE_DIR:-/var/cache/cinder} +CINDER_API_PASTE_INI=$CINDER_CONF_DIR/api-paste.ini + +# Public facing bits +CINDER_SERVICE_HOST=${CINDER_SERVICE_HOST:-$SERVICE_HOST} +CINDER_SERVICE_PORT=${CINDER_SERVICE_PORT:-8776} +CINDER_SERVICE_PORT_INT=${CINDER_SERVICE_PORT_INT:-18776} +CINDER_SERVICE_PROTOCOL=${CINDER_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} # Support entry points installation of console scripts if [[ -d $CINDER_DIR/bin ]]; then CINDER_BIN_DIR=$CINDER_DIR/bin else - CINDER_BIN_DIR=/usr/local/bin + CINDER_BIN_DIR=$(get_python_exec_prefix) fi # Name of the lvm volume group to use/create for iscsi volumes VOLUME_GROUP=${VOLUME_GROUP:-stack-volumes} VOLUME_NAME_PREFIX=${VOLUME_NAME_PREFIX:-volume-} +# _clean_volume_group removes all cinder volumes from the specified volume group +# _clean_volume_group $VOLUME_GROUP $VOLUME_NAME_PREFIX +function _clean_volume_group() { + local vg=$1 + local vg_prefix=$2 + # Clean out existing volumes + for lv in `sudo lvs --noheadings -o lv_name $vg`; do + # vg_prefix prefixes the LVs we want + if [[ "${lv#$vg_prefix}" != "$lv" ]]; then + sudo lvremove -f $vg/$lv + fi + done +} + # cleanup_cinder() - Remove residual data files, anything left over from previous # runs that a clean run would need to clean up function cleanup_cinder() { - # This function intentionally left blank - : + # ensure the volume group is cleared up because fails might + # leave dead volumes in the group + TARGETS=$(sudo tgtadm --op show --mode target) + if [ $? -ne 0 ]; then + # If tgt driver isn't running this won't work obviously + # So check the response and restart if need be + echo "tgtd seems to be in a bad state, restarting..." + if is_ubuntu; then + restart_service tgt + else + restart_service tgtd + fi + TARGETS=$(sudo tgtadm --op show --mode target) + fi + + if [[ -n "$TARGETS" ]]; then + iqn_list=( $(grep --no-filename -r iqn $SCSI_PERSIST_DIR | sed 's///') ) + for i in "${iqn_list[@]}"; do + echo removing iSCSI target: $i + sudo tgt-admin --delete $i + done + fi + + if is_service_enabled cinder; then + sudo rm -rf $CINDER_STATE_PATH/volumes/* + fi + + if is_ubuntu; then + stop_service tgt + else + stop_service tgtd + fi + + # Campsite rule: leave behind a volume group at least as clean as we found it + _clean_volume_group $VOLUME_GROUP $VOLUME_NAME_PREFIX } # configure_cinder() - Set config files, create data dirs, etc @@ -58,16 +116,12 @@ function configure_cinder() { if [[ ! -d $CINDER_CONF_DIR ]]; then sudo mkdir -p $CINDER_CONF_DIR fi - sudo chown `whoami` $CINDER_CONF_DIR + sudo chown $STACK_USER $CINDER_CONF_DIR cp -p $CINDER_DIR/etc/cinder/policy.json $CINDER_CONF_DIR # Set the paths of certain binaries - if [[ "$os_PACKAGE" = "deb" ]]; then - CINDER_ROOTWRAP=/usr/local/bin/cinder-rootwrap - else - CINDER_ROOTWRAP=/usr/bin/cinder-rootwrap - fi + CINDER_ROOTWRAP=$(get_rootwrap_location cinder) # If Cinder ships the new rootwrap filters files, deploy them # (owned by root) and add a parameter to $CINDER_ROOTWRAP @@ -98,7 +152,6 @@ function configure_cinder() { sudo chown root:root $TEMPFILE sudo mv $TEMPFILE /etc/sudoers.d/cinder-rootwrap - CINDER_API_PASTE_INI=$CINDER_CONF_DIR/api-paste.ini cp $CINDER_DIR/etc/cinder/api-paste.ini $CINDER_API_PASTE_INI iniset $CINDER_API_PASTE_INI filter:authtoken auth_host $KEYSTONE_AUTH_HOST iniset $CINDER_API_PASTE_INI filter:authtoken auth_port $KEYSTONE_AUTH_PORT @@ -106,10 +159,7 @@ function configure_cinder() { iniset $CINDER_API_PASTE_INI filter:authtoken admin_tenant_name $SERVICE_TENANT_NAME iniset $CINDER_API_PASTE_INI filter:authtoken admin_user cinder iniset $CINDER_API_PASTE_INI filter:authtoken admin_password $SERVICE_PASSWORD - - if [[ "$KEYSTONE_TOKEN_FORMAT" == "PKI" ]]; then - iniset $CINDER_API_PASTE_INI filter:authtoken signing_dir $CINDER_AUTH_CACHE_DIR - fi + iniset $CINDER_API_PASTE_INI filter:authtoken signing_dir $CINDER_AUTH_CACHE_DIR cp $CINDER_DIR/etc/cinder/cinder.conf.sample $CINDER_CONF iniset $CINDER_CONF DEFAULT auth_strategy keystone @@ -122,90 +172,152 @@ function configure_cinder() { iniset $CINDER_CONF DEFAULT sql_connection $dburl iniset $CINDER_CONF DEFAULT api_paste_config $CINDER_API_PASTE_INI iniset $CINDER_CONF DEFAULT root_helper "sudo ${CINDER_ROOTWRAP}" - iniset $CINDER_CONF DEFAULT osapi_volume_extension cinder.api.openstack.volume.contrib.standard_extensions + iniset $CINDER_CONF DEFAULT osapi_volume_extension cinder.api.contrib.standard_extensions iniset $CINDER_CONF DEFAULT state_path $CINDER_STATE_PATH + if is_service_enabled tls-proxy; then + # Set the service port for a proxy to take the original + iniset $CINDER_CONF DEFAULT osapi_volume_listen_port $CINDER_SERVICE_PORT_INT + fi + if [ "$SYSLOG" != "False" ]; then iniset $CINDER_CONF DEFAULT use_syslog True fi - if is_service_enabled qpid ; then - iniset $CINDER_CONF DEFAULT rpc_backend cinder.openstack.common.rpc.impl_qpid - elif is_service_enabled zeromq; then - iniset $CINDER_CONF DEFAULT rpc_backend nova.openstack.common.rpc.impl_zmq - elif [ -n "$RABBIT_HOST" ] && [ -n "$RABBIT_PASSWORD" ]; then - iniset $CINDER_CONF DEFAULT rabbit_host $RABBIT_HOST - iniset $CINDER_CONF DEFAULT rabbit_password $RABBIT_PASSWORD - fi + iniset_rpc_backend cinder $CINDER_CONF DEFAULT if [[ "$CINDER_SECURE_DELETE" == "False" ]]; then iniset $CINDER_CONF DEFAULT secure_delete False + iniset $CINDER_CONF DEFAULT volume_clear none fi if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ]; then # Add color to logging output - iniset $CINDER_CONF DEFAULT logging_context_format_string "%(asctime)s %(color)s%(levelname)s %(name)s [%(request_id)s %(user_id)s %(project_id)s%(color)s] %(instance)s%(color)s%(message)s" - iniset $CINDER_CONF DEFAULT logging_default_format_string "%(asctime)s %(color)s%(levelname)s %(name)s [-%(color)s] %(instance)s%(color)s%(message)s" + iniset $CINDER_CONF DEFAULT logging_context_format_string "%(asctime)s.%(msecs)03d %(color)s%(levelname)s %(name)s [%(request_id)s %(user_id)s %(project_id)s%(color)s] %(instance)s%(color)s%(message)s" + iniset $CINDER_CONF DEFAULT logging_default_format_string "%(asctime)s.%(msecs)03d %(color)s%(levelname)s %(name)s [-%(color)s] %(instance)s%(color)s%(message)s" iniset $CINDER_CONF DEFAULT logging_debug_format_suffix "from (pid=%(process)d) %(funcName)s %(pathname)s:%(lineno)d" - iniset $CINDER_CONF DEFAULT logging_exception_prefix "%(color)s%(asctime)s TRACE %(name)s %(instance)s" + iniset $CINDER_CONF DEFAULT logging_exception_prefix "%(color)s%(asctime)s.%(msecs)03d TRACE %(name)s %(instance)s" + fi + + if [ "$CINDER_DRIVER" == "XenAPINFS" ]; then + ( + set -u + iniset $CINDER_CONF DEFAULT volume_driver "cinder.volume.drivers.xenapi.sm.XenAPINFSDriver" + iniset $CINDER_CONF DEFAULT xenapi_connection_url "$CINDER_XENAPI_CONNECTION_URL" + iniset $CINDER_CONF DEFAULT xenapi_connection_username "$CINDER_XENAPI_CONNECTION_USERNAME" + iniset $CINDER_CONF DEFAULT xenapi_connection_password "$CINDER_XENAPI_CONNECTION_PASSWORD" + iniset $CINDER_CONF DEFAULT xenapi_nfs_server "$CINDER_XENAPI_NFS_SERVER" + iniset $CINDER_CONF DEFAULT xenapi_nfs_serverpath "$CINDER_XENAPI_NFS_SERVERPATH" + ) + elif [ "$CINDER_DRIVER" == "sheepdog" ]; then + iniset $CINDER_CONF DEFAULT volume_driver "cinder.volume.drivers.sheepdog.SheepdogDriver" fi } +# create_cinder_accounts() - Set up common required cinder accounts + +# Tenant User Roles +# ------------------------------------------------------------------ +# service cinder admin # if enabled + +# Migrated from keystone_data.sh +create_cinder_accounts() { + + SERVICE_TENANT=$(keystone tenant-list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }") + ADMIN_ROLE=$(keystone role-list | awk "/ admin / { print \$2 }") + + # Cinder + if [[ "$ENABLED_SERVICES" =~ "c-api" ]]; then + CINDER_USER=$(keystone user-create \ + --name=cinder \ + --pass="$SERVICE_PASSWORD" \ + --tenant_id $SERVICE_TENANT \ + --email=cinder@example.com \ + | grep " id " | get_field 2) + keystone user-role-add \ + --tenant_id $SERVICE_TENANT \ + --user_id $CINDER_USER \ + --role_id $ADMIN_ROLE + if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then + CINDER_SERVICE=$(keystone service-create \ + --name=cinder \ + --type=volume \ + --description="Cinder Volume Service" \ + | grep " id " | get_field 2) + keystone endpoint-create \ + --region RegionOne \ + --service_id $CINDER_SERVICE \ + --publicurl "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v1/\$(tenant_id)s" \ + --adminurl "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v1/\$(tenant_id)s" \ + --internalurl "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v1/\$(tenant_id)s" + fi + fi +} + +# create_cinder_cache_dir() - Part of the init_cinder() process +function create_cinder_cache_dir() { + # Create cache dir + sudo mkdir -p $CINDER_AUTH_CACHE_DIR + sudo chown $STACK_USER $CINDER_AUTH_CACHE_DIR + rm -f $CINDER_AUTH_CACHE_DIR/* +} + +create_cinder_volume_group() { + # Configure a default volume group called '`stack-volumes`' for the volume + # service if it does not yet exist. If you don't wish to use a file backed + # volume group, create your own volume group called ``stack-volumes`` before + # invoking ``stack.sh``. + # + # By default, the backing file is 5G in size, and is stored in ``/opt/stack/data``. + + if ! sudo vgs $VOLUME_GROUP; then + VOLUME_BACKING_FILE=${VOLUME_BACKING_FILE:-$DATA_DIR/${VOLUME_GROUP}-backing-file} + + # Only create if the file doesn't already exists + [[ -f $VOLUME_BACKING_FILE ]] || truncate -s $VOLUME_BACKING_FILE_SIZE $VOLUME_BACKING_FILE + + DEV=`sudo losetup -f --show $VOLUME_BACKING_FILE` + + # Only create if the loopback device doesn't contain $VOLUME_GROUP + if ! sudo vgs $VOLUME_GROUP; then + sudo vgcreate $VOLUME_GROUP $DEV + fi + fi + + mkdir -p $CINDER_STATE_PATH/volumes +} + # init_cinder() - Initialize database and volume group function init_cinder() { # Force nova volumes off NOVA_ENABLED_APIS=$(echo $NOVA_ENABLED_APIS | sed "s/osapi_volume,//") if is_service_enabled $DATABASE_BACKENDS; then - # (re)create cinder database + # (Re)create cinder database recreate_database cinder utf8 - # (re)create cinder database + # Migrate cinder database $CINDER_BIN_DIR/cinder-manage db sync fi if is_service_enabled c-vol; then - # Configure a default volume group called '`stack-volumes`' for the volume - # service if it does not yet exist. If you don't wish to use a file backed - # volume group, create your own volume group called ``stack-volumes`` before - # invoking ``stack.sh``. - # - # By default, the backing file is 5G in size, and is stored in ``/opt/stack/data``. - - if ! sudo vgs $VOLUME_GROUP; then - VOLUME_BACKING_FILE=${VOLUME_BACKING_FILE:-$DATA_DIR/${VOLUME_GROUP}-backing-file} - # Only create if the file doesn't already exists - [[ -f $VOLUME_BACKING_FILE ]] || truncate -s $VOLUME_BACKING_FILE_SIZE $VOLUME_BACKING_FILE - DEV=`sudo losetup -f --show $VOLUME_BACKING_FILE` - # Only create if the loopback device doesn't contain $VOLUME_GROUP - if ! sudo vgs $VOLUME_GROUP; then sudo vgcreate $VOLUME_GROUP $DEV; fi - fi - mkdir -p $CINDER_STATE_PATH/volumes + create_cinder_volume_group if sudo vgs $VOLUME_GROUP; then - if [[ "$os_PACKAGE" = "rpm" ]]; then - # RPM doesn't start the service + if is_fedora || is_suse; then + # service is not started by default start_service tgtd fi # Remove iscsi targets sudo tgtadm --op show --mode target | grep $VOLUME_NAME_PREFIX | grep Target | cut -f3 -d ' ' | sudo xargs -n1 tgt-admin --delete || true - # Clean out existing volumes - for lv in `sudo lvs --noheadings -o lv_name $VOLUME_GROUP`; do - # VOLUME_NAME_PREFIX prefixes the LVs we want - if [[ "${lv#$VOLUME_NAME_PREFIX}" != "$lv" ]]; then - sudo lvremove -f $VOLUME_GROUP/$lv - fi - done + # Start with a clean volume group + _clean_volume_group $VOLUME_GROUP $VOLUME_NAME_PREFIX fi fi - if [[ "$KEYSTONE_TOKEN_FORMAT" == "PKI" ]]; then - # Create cache dir - sudo mkdir -p $CINDER_AUTH_CACHE_DIR - sudo chown `whoami` $CINDER_AUTH_CACHE_DIR - fi + create_cinder_cache_dir } # install_cinder() - Collect source and prepare @@ -217,7 +329,7 @@ function install_cinder() { # apply config.d approach (e.g. Oneiric does not have this) function _configure_tgt_for_config_d() { if [[ ! -d /etc/tgt/conf.d/ ]]; then - sudo mkdir /etc/tgt/conf.d + sudo mkdir -p /etc/tgt/conf.d echo "include /etc/tgt/conf.d/*.conf" | sudo tee -a /etc/tgt/targets.conf fi } @@ -225,24 +337,35 @@ function _configure_tgt_for_config_d() { # start_cinder() - Start running processes, including screen function start_cinder() { if is_service_enabled c-vol; then - if [[ "$os_PACKAGE" = "deb" ]]; then - _configure_tgt_for_config_d - if [[ ! -f /etc/tgt/conf.d/cinder.conf ]]; then - echo "include $CINDER_STATE_PATH/volumes/*" | sudo tee /etc/tgt/conf.d/cinder.conf - fi + _configure_tgt_for_config_d + if [[ ! -f /etc/tgt/conf.d/stack.conf ]]; then + echo "include $CINDER_STATE_PATH/volumes/*" | sudo tee /etc/tgt/conf.d/stack.conf + fi + if is_ubuntu; then # tgt in oneiric doesn't restart properly if tgtd isn't running # do it in two steps sudo stop tgt || true sudo start tgt - else + elif is_fedora; then # bypass redirection to systemctl during restart sudo /sbin/service --skip-redirect tgtd restart + elif is_suse; then + restart_service tgtd + else + # note for other distros: unstack.sh also uses the tgt/tgtd service + # name, and would need to be adjusted too + exit_distro_not_supported "restarting tgt" fi fi screen_it c-api "cd $CINDER_DIR && $CINDER_BIN_DIR/cinder-api --config-file $CINDER_CONF" screen_it c-vol "cd $CINDER_DIR && $CINDER_BIN_DIR/cinder-volume --config-file $CINDER_CONF" screen_it c-sch "cd $CINDER_DIR && $CINDER_BIN_DIR/cinder-scheduler --config-file $CINDER_CONF" + + # Start proxies if enabled + if is_service_enabled c-api && is_service_enabled tls-proxy; then + start_tls_proxy '*' $CINDER_SERVICE_PORT $CINDER_SERVICE_HOST $CINDER_SERVICE_PORT_INT & + fi } # stop_cinder() - Stop running processes @@ -253,7 +376,11 @@ function stop_cinder() { done if is_service_enabled c-vol; then - stop_service tgt + if is_ubuntu; then + stop_service tgt + else + stop_service tgtd + fi fi } diff --git a/lib/database b/lib/database index 66fb36fb..07e37aef 100644 --- a/lib/database +++ b/lib/database @@ -62,15 +62,6 @@ function initialize_database_backends { return 0 } -# Set the database backend to use -# $1 The name of the database backend to use (mysql, postgresql, ...) -function use_database { - use_exclusive_service DATABASE_BACKENDS DATABASE_TYPE $1 && return 0 - ret=$? - echo "Invalid database '$1'" - return $ret -} - # Recreate a given database # $1 The name of the database # $2 The character set/encoding of the database diff --git a/lib/databases/mysql b/lib/databases/mysql index ed59290a..94aedc64 100644 --- a/lib/databases/mysql +++ b/lib/databases/mysql @@ -1,11 +1,11 @@ -# lib/mysql -# Functions to control the configuration and operation of the MySQL database backend +# lib/databases/mysql +# Functions to control the configuration and operation of the **MySQL** database backend # Dependencies: # DATABASE_{HOST,USER,PASSWORD} must be defined # Save trace setting -XTRACE=$(set +o | grep xtrace) +MY_XTRACE=$(set +o | grep xtrace) set +o xtrace register_database mysql @@ -20,21 +20,31 @@ function recreate_database_mysql { function configure_database_mysql { echo_summary "Configuring and starting MySQL" - if [[ "$os_PACKAGE" = "deb" ]]; then + if is_ubuntu; then MY_CONF=/etc/mysql/my.cnf MYSQL=mysql - else + elif is_fedora; then MY_CONF=/etc/my.cnf MYSQL=mysqld + elif is_suse; then + MY_CONF=/etc/my.cnf + MYSQL=mysql + else + exit_distro_not_supported "mysql configuration" fi # Start mysql-server - if [[ "$os_PACKAGE" = "rpm" ]]; then - # RPM doesn't start the service + if is_fedora || is_suse; then + # service is not started by default start_service $MYSQL - # Set the root password - only works the first time + fi + + # Set the root password - only works the first time. For Ubuntu, we already + # did that with debconf before installing the package. + if ! is_ubuntu; then sudo mysqladmin -u root password $DATABASE_PASSWORD || true fi + # Update the DB to give user ‘$DATABASE_USER’@’%’ full control of the all databases: sudo mysql -uroot -p$DATABASE_PASSWORD -h127.0.0.1 -e "GRANT ALL PRIVILEGES ON *.* TO '$DATABASE_USER'@'%' identified by '$DATABASE_PASSWORD';" @@ -53,11 +63,26 @@ function configure_database_mysql { default-storage-engine = InnoDB" $MY_CONF fi + # Turn on slow query log + sudo sed -i '/log.slow.queries/d' $MY_CONF + sudo sed -i -e "/^\[mysqld\]/ a \ +log-slow-queries = /var/log/mysql/mysql-slow.log" $MY_CONF + + # Log all queries (any query taking longer than 0 seconds) + sudo sed -i '/long.query.time/d' $MY_CONF + sudo sed -i -e "/^\[mysqld\]/ a \ +long-query-time = 0" $MY_CONF + + # Log all non-indexed queries + sudo sed -i '/log.queries.not.using.indexes/d' $MY_CONF + sudo sed -i -e "/^\[mysqld\]/ a \ +log-queries-not-using-indexes" $MY_CONF + restart_service $MYSQL } function install_database_mysql { - if [[ "$os_PACKAGE" = "deb" ]]; then + if is_ubuntu; then # Seed configuration with mysql password so that apt-get install doesn't # prompt us for a password upon install. cat < - # Set up default directories GLANCE_DIR=$DEST/glance GLANCECLIENT_DIR=$DEST/python-glanceclient @@ -46,7 +44,7 @@ GLANCE_POLICY_JSON=$GLANCE_CONF_DIR/policy.json if [[ -d $GLANCE_DIR/bin ]]; then GLANCE_BIN_DIR=$GLANCE_DIR/bin else - GLANCE_BIN_DIR=/usr/local/bin + GLANCE_BIN_DIR=$(get_python_exec_prefix) fi # Glance connection info. Note the port must be specified. @@ -70,13 +68,6 @@ function configure_glanceclient() { setup_develop $GLANCECLIENT_DIR } -# durable_glance_queues() - Determine if RabbitMQ queues are durable or not -function durable_glance_queues() { - test `rabbitmqctl list_queues name durable | grep true | wc -l` -gt 0 && return 0 - test `rabbitmqctl list_exchanges name durable | grep true | wc -l` -gt 0 && return 0 - return 1 -} - # configure_glance() - Set config files, create data dirs, etc function configure_glance() { setup_develop $GLANCE_DIR @@ -84,7 +75,7 @@ function configure_glance() { if [[ ! -d $GLANCE_CONF_DIR ]]; then sudo mkdir -p $GLANCE_CONF_DIR fi - sudo chown `whoami` $GLANCE_CONF_DIR + sudo chown $STACK_USER $GLANCE_CONF_DIR # Copy over our glance configurations and update them cp $GLANCE_DIR/etc/glance-registry.conf $GLANCE_REGISTRY_CONF @@ -102,9 +93,7 @@ function configure_glance() { iniset $GLANCE_REGISTRY_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME iniset $GLANCE_REGISTRY_CONF keystone_authtoken admin_user glance iniset $GLANCE_REGISTRY_CONF keystone_authtoken admin_password $SERVICE_PASSWORD - if [[ "$KEYSTONE_TOKEN_FORMAT" == "PKI" ]]; then - iniset $GLANCE_REGISTRY_CONF keystone_authtoken signing_dir $GLANCE_AUTH_CACHE_DIR/registry - fi + iniset $GLANCE_REGISTRY_CONF keystone_authtoken signing_dir $GLANCE_AUTH_CACHE_DIR/registry cp $GLANCE_DIR/etc/glance-api.conf $GLANCE_API_CONF iniset $GLANCE_API_CONF DEFAULT debug True @@ -127,16 +116,8 @@ function configure_glance() { iniset $GLANCE_API_CONF DEFAULT notifier_strategy rabbit iniset $GLANCE_API_CONF DEFAULT rabbit_host $RABBIT_HOST iniset $GLANCE_API_CONF DEFAULT rabbit_password $RABBIT_PASSWORD - if [[ durable_glance_queues -eq 0 ]]; then - # This gets around https://bugs.launchpad.net/glance/+bug/1074132 - # that results in a g-api server becoming unresponsive during - # startup... - iniset $GLANCE_API_CONF DEFAULT rabbit_durable_queues True - fi - fi - if [[ "$KEYSTONE_TOKEN_FORMAT" == "PKI" ]]; then - iniset $GLANCE_API_CONF keystone_authtoken signing_dir $GLANCE_AUTH_CACHE_DIR/api fi + iniset $GLANCE_API_CONF keystone_authtoken signing_dir $GLANCE_AUTH_CACHE_DIR/api cp -p $GLANCE_DIR/etc/glance-registry-paste.ini $GLANCE_REGISTRY_PASTE_INI @@ -158,7 +139,17 @@ function configure_glance() { iniset $GLANCE_CACHE_CONF DEFAULT admin_password $SERVICE_PASSWORD cp -p $GLANCE_DIR/etc/policy.json $GLANCE_POLICY_JSON +} +# create_glance_cache_dir() - Part of the init_glance() process +function create_glance_cache_dir() { + # Create cache dir + sudo mkdir -p $GLANCE_AUTH_CACHE_DIR/api + sudo chown $STACK_USER $GLANCE_AUTH_CACHE_DIR/api + rm -f $GLANCE_AUTH_CACHE_DIR/api/* + sudo mkdir -p $GLANCE_AUTH_CACHE_DIR/registry + sudo chown $STACK_USER $GLANCE_AUTH_CACHE_DIR/registry + rm -f $GLANCE_AUTH_CACHE_DIR/registry/* } # init_glance() - Initialize databases, etc. @@ -171,18 +162,13 @@ function init_glance() { rm -rf $GLANCE_CACHE_DIR mkdir -p $GLANCE_CACHE_DIR - # (re)create glance database + # (Re)create glance database recreate_database glance utf8 + # Migrate glance database $GLANCE_BIN_DIR/glance-manage db_sync - if [[ "$KEYSTONE_TOKEN_FORMAT" == "PKI" ]]; then - # Create cache dir - sudo mkdir -p $GLANCE_AUTH_CACHE_DIR/api - sudo chown `whoami` $GLANCE_AUTH_CACHE_DIR/api - sudo mkdir -p $GLANCE_AUTH_CACHE_DIR/registry - sudo chown `whoami` $GLANCE_AUTH_CACHE_DIR/registry - fi + create_glance_cache_dir } # install_glanceclient() - Collect source and prepare diff --git a/lib/heat b/lib/heat index efdcfad3..5b8b360a 100644 --- a/lib/heat +++ b/lib/heat @@ -1,19 +1,22 @@ # lib/heat -# Install and start Heat service +# Install and start **Heat** service + # To enable, add the following to localrc -# ENABLED_SERVICES+=,heat,h-api-cfn,h-api-cw,h-eng,h-meta +# ENABLED_SERVICES+=,heat,h-api-cfn,h-api-cw,h-eng # Dependencies: # - functions # stack.sh # --------- -# install_XXX -# configure_XXX -# init_XXX -# start_XXX -# stop_XXX -# cleanup_XXX +# install_heatclient +# install_heat +# configure_heatclient +# configure_heat +# init_heat +# start_heat +# stop_heat +# cleanup_heat # Save trace setting XTRACE=$(set +o | grep xtrace) @@ -46,20 +49,18 @@ function configure_heat() { if [[ ! -d $HEAT_CONF_DIR ]]; then sudo mkdir -p $HEAT_CONF_DIR fi - sudo chown `whoami` $HEAT_CONF_DIR + sudo chown $STACK_USER $HEAT_CONF_DIR HEAT_API_CFN_HOST=${HEAT_API_CFN_HOST:-$SERVICE_HOST} HEAT_API_CFN_PORT=${HEAT_API_CFN_PORT:-8000} HEAT_ENGINE_HOST=${HEAT_ENGINE_HOST:-$SERVICE_HOST} HEAT_ENGINE_PORT=${HEAT_ENGINE_PORT:-8001} - HEAT_METADATA_HOST=${HEAT_METADATA_HOST:-$SERVICE_HOST} - HEAT_METADATA_PORT=${HEAT_METADATA_PORT:-8002} HEAT_API_CW_HOST=${HEAT_API_CW_HOST:-$SERVICE_HOST} HEAT_API_CW_PORT=${HEAT_API_CW_PORT:-8003} HEAT_API_HOST=${HEAT_API_HOST:-$SERVICE_HOST} HEAT_API_PORT=${HEAT_API_PORT:-8004} - # cloudformation api + # Cloudformation API HEAT_API_CFN_CONF=$HEAT_CONF_DIR/heat-api-cfn.conf cp $HEAT_DIR/etc/heat/heat-api-cfn.conf $HEAT_API_CFN_CONF iniset $HEAT_API_CFN_CONF DEFAULT debug True @@ -68,13 +69,7 @@ function configure_heat() { iniset $HEAT_API_CFN_CONF DEFAULT bind_host $HEAT_API_CFN_HOST iniset $HEAT_API_CFN_CONF DEFAULT bind_port $HEAT_API_CFN_PORT - if is_service_enabled rabbit; then - iniset $HEAT_API_CFN_CONF DEFAULT rpc_backend heat.openstack.common.rpc.impl_kombu - iniset $HEAT_API_CFN_CONF DEFAULT rabbit_password $RABBIT_PASSWORD - iniset $HEAT_API_CFN_CONF DEFAULT rabbit_host $RABBIT_HOST - elif is_service_enabled qpid; then - iniset $HEAT_API_CFN_CONF DEFAULT rpc_backend heat.openstack.common.rpc.impl_qpid - fi + iniset_rpc_backend heat $HEAT_API_CFN_CONF DEFAULT HEAT_API_CFN_PASTE_INI=$HEAT_CONF_DIR/heat-api-cfn-paste.ini cp $HEAT_DIR/etc/heat/heat-api-cfn-paste.ini $HEAT_API_CFN_PASTE_INI @@ -88,7 +83,7 @@ function configure_heat() { iniset $HEAT_API_CFN_PASTE_INI filter:ec2authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0 iniset $HEAT_API_CFN_PASTE_INI filter:ec2authtoken keystone_ec2_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0/ec2tokens - # openstack api + # OpenStack API HEAT_API_CONF=$HEAT_CONF_DIR/heat-api.conf cp $HEAT_DIR/etc/heat/heat-api.conf $HEAT_API_CONF iniset $HEAT_API_CONF DEFAULT debug True @@ -97,13 +92,7 @@ function configure_heat() { iniset $HEAT_API_CONF DEFAULT bind_host $HEAT_API_HOST iniset $HEAT_API_CONF DEFAULT bind_port $HEAT_API_PORT - if is_service_enabled rabbit; then - iniset $HEAT_API_CONF DEFAULT rpc_backend heat.openstack.common.rpc.impl_kombu - iniset $HEAT_API_CONF DEFAULT rabbit_password $RABBIT_PASSWORD - iniset $HEAT_API_CONF DEFAULT rabbit_host $RABBIT_HOST - elif is_service_enabled qpid; then - iniset $HEAT_API_CONF DEFAULT rpc_backend heat.openstack.common.rpc.impl_qpid - fi + iniset_rpc_backend heat $HEAT_API_CONF DEFAULT HEAT_API_PASTE_INI=$HEAT_CONF_DIR/heat-api-paste.ini cp $HEAT_DIR/etc/heat/heat-api-paste.ini $HEAT_API_PASTE_INI @@ -125,40 +114,17 @@ function configure_heat() { iniset $HEAT_ENGINE_CONF DEFAULT use_syslog $SYSLOG iniset $HEAT_ENGINE_CONF DEFAULT bind_host $HEAT_ENGINE_HOST iniset $HEAT_ENGINE_CONF DEFAULT bind_port $HEAT_ENGINE_PORT + iniset $HEAT_ENGINE_CONF DEFAULT heat_metadata_server_url http://$HEAT_API_CFN_HOST:$HEAT_API_CFN_PORT + iniset $HEAT_ENGINE_CONF DEFAULT heat_waitcondition_server_url http://$HEAT_API_CFN_HOST:$HEAT_API_CFN_PORT/v1/waitcondition + iniset $HEAT_ENGINE_CONF DEFAULT heat_watch_server_url http://$HEAT_API_CW_HOST:$HEAT_API_CW_PORT local dburl database_connection_url dburl heat iniset $HEAT_ENGINE_CONF DEFAULT sql_connection $dburl iniset $HEAT_ENGINE_CONF DEFAULT auth_encryption_key `hexdump -n 16 -v -e '/1 "%02x"' /dev/random` - if is_service_enabled rabbit; then - iniset $HEAT_ENGINE_CONF DEFAULT rpc_backend heat.openstack.common.rpc.impl_kombu - iniset $HEAT_ENGINE_CONF DEFAULT rabbit_password $RABBIT_PASSWORD - iniset $HEAT_ENGINE_CONF DEFAULT rabbit_host $RABBIT_HOST - elif is_service_enabled qpid; then - iniset $HEAT_ENGINE_CONF DEFAULT rpc_backend heat.openstack.common.rpc.impl_qpid - fi - - # metadata api - HEAT_METADATA_CONF=$HEAT_CONF_DIR/heat-metadata.conf - cp $HEAT_DIR/etc/heat/heat-metadata.conf $HEAT_METADATA_CONF - iniset $HEAT_METADATA_CONF DEFAULT debug True - inicomment $HEAT_METADATA_CONF DEFAULT log_file - iniset $HEAT_METADATA_CONF DEFAULT use_syslog $SYSLOG - iniset $HEAT_METADATA_CONF DEFAULT bind_host $HEAT_METADATA_HOST - iniset $HEAT_METADATA_CONF DEFAULT bind_port $HEAT_METADATA_PORT - - if is_service_enabled rabbit; then - iniset $HEAT_METADATA_CONF DEFAULT rpc_backend heat.openstack.common.rpc.impl_kombu - iniset $HEAT_METADATA_CONF DEFAULT rabbit_password $RABBIT_PASSWORD - iniset $HEAT_METADATA_CONF DEFAULT rabbit_host $RABBIT_HOST - elif is_service_enabled qpid; then - iniset $HEAT_METADATA_CONF DEFAULT rpc_backend heat.openstack.common.rpc.impl_qpid - fi - - HEAT_METADATA_PASTE_INI=$HEAT_CONF_DIR/heat-metadata-paste.ini - cp $HEAT_DIR/etc/heat/heat-metadata-paste.ini $HEAT_METADATA_PASTE_INI + iniset_rpc_backend heat $HEAT_ENGINE_CONF DEFAULT - # cloudwatch api + # Cloudwatch API HEAT_API_CW_CONF=$HEAT_CONF_DIR/heat-api-cloudwatch.conf cp $HEAT_DIR/etc/heat/heat-api-cloudwatch.conf $HEAT_API_CW_CONF iniset $HEAT_API_CW_CONF DEFAULT debug True @@ -167,13 +133,7 @@ function configure_heat() { iniset $HEAT_API_CW_CONF DEFAULT bind_host $HEAT_API_CW_HOST iniset $HEAT_API_CW_CONF DEFAULT bind_port $HEAT_API_CW_PORT - if is_service_enabled rabbit; then - iniset $HEAT_API_CW_CONF DEFAULT rpc_backend heat.openstack.common.rpc.impl_kombu - iniset $HEAT_API_CW_CONF DEFAULT rabbit_password $RABBIT_PASSWORD - iniset $HEAT_API_CW_CONF DEFAULT rabbit_host $RABBIT_HOST - elif is_service_enabled qpid; then - iniset $HEAT_API_CW_CONF DEFAULT rpc_backend heat.openstack.common.rpc.impl_qpid - fi + iniset_rpc_backend heat $HEAT_API_CW_CONF DEFAULT HEAT_API_CW_PASTE_INI=$HEAT_CONF_DIR/heat-api-cloudwatch-paste.ini cp $HEAT_DIR/etc/heat/heat-api-cloudwatch-paste.ini $HEAT_API_CW_PASTE_INI @@ -194,7 +154,7 @@ function init_heat() { # (re)create heat database recreate_database heat utf8 - $HEAT_DIR/bin/heat-db-setup $os_PACKAGE -r $MYSQL_PASSWORD + $HEAT_DIR/bin/heat-db-setup $os_PACKAGE -r $DATABASE_PASSWORD $HEAT_DIR/tools/nova_create_flavors.sh } @@ -214,13 +174,12 @@ function start_heat() { screen_it h-api "cd $HEAT_DIR; bin/heat-api --config-dir=$HEAT_CONF_DIR/heat-api.conf" screen_it h-api-cfn "cd $HEAT_DIR; bin/heat-api-cfn --config-dir=$HEAT_CONF_DIR/heat-api-cfn.conf" screen_it h-api-cw "cd $HEAT_DIR; bin/heat-api-cloudwatch --config-dir=$HEAT_CONF_DIR/heat-api-cloudwatch.conf" - screen_it h-meta "cd $HEAT_DIR; bin/heat-metadata --config-dir=$HEAT_CONF_DIR/heat-metadata.conf" } # stop_heat() - Stop running processes function stop_heat() { # Kill the cinder screen windows - for serv in h-eng h-api-cfn h-api-cw h-meta; do + for serv in h-eng h-api-cfn h-api-cw; do screen -S $SCREEN_NAME -p $serv -X kill done } diff --git a/lib/horizon b/lib/horizon new file mode 100644 index 00000000..9180370b --- /dev/null +++ b/lib/horizon @@ -0,0 +1,153 @@ +# lib/horizon +# Functions to control the configuration and operation of the horizon service +# + +# Dependencies: +# ``functions`` file +# ``SERVICE_{TENANT_NAME|PASSWORD}`` must be defined +# + +# ``stack.sh`` calls the entry points in this order: +# +# install_horizon +# configure_horizon +# init_horizon +# start_horizon +# stop_horizon +# cleanup_horizon + +# Save trace setting +XTRACE=$(set +o | grep xtrace) +set +o xtrace + + +# Defaults +# -------- + +# + +# Set up default directories +HORIZON_DIR=$DEST/horizon + +# Allow overriding the default Apache user and group, default to +# current user and his default group. +APACHE_USER=${APACHE_USER:-$USER} +APACHE_GROUP=${APACHE_GROUP:-$(id -gn $APACHE_USER)} + + +# Entry Points +# ------------ + +# cleanup_horizon() - Remove residual data files, anything left over from previous +# runs that a clean run would need to clean up +function cleanup_horizon() { + # kill instances (nova) + # delete image files (glance) + # This function intentionally left blank + : +} + +# configure_horizon() - Set config files, create data dirs, etc +function configure_horizon() { + setup_develop $HORIZON_DIR +} + +# init_horizon() - Initialize databases, etc. +function init_horizon() { + # Remove stale session database. + rm -f $HORIZON_DIR/openstack_dashboard/local/dashboard_openstack.sqlite3 + + # ``local_settings.py`` is used to override horizon default settings. + local_settings=$HORIZON_DIR/openstack_dashboard/local/local_settings.py + cp $FILES/horizon_settings.py $local_settings + + # Initialize the horizon database (it stores sessions and notices shown to + # users). The user system is external (keystone). + cd $HORIZON_DIR + python manage.py syncdb --noinput + cd $TOP_DIR + + # Create an empty directory that apache uses as docroot + sudo mkdir -p $HORIZON_DIR/.blackhole + + + if is_ubuntu; then + APACHE_NAME=apache2 + APACHE_CONF=sites-available/horizon + # Clean up the old config name + sudo rm -f /etc/apache2/sites-enabled/000-default + # Be a good citizen and use the distro tools here + sudo touch /etc/$APACHE_NAME/$APACHE_CONF + sudo a2ensite horizon + # WSGI isn't enabled by default, enable it + sudo a2enmod wsgi + elif is_fedora; then + APACHE_NAME=httpd + APACHE_CONF=conf.d/horizon.conf + sudo sed '/^Listen/s/^.*$/Listen 0.0.0.0:80/' -i /etc/httpd/conf/httpd.conf + elif is_suse; then + APACHE_NAME=apache2 + APACHE_CONF=vhosts.d/horizon.conf + # WSGI isn't enabled by default, enable it + sudo a2enmod wsgi + else + exit_distro_not_supported "apache configuration" + fi + + # Configure apache to run horizon + sudo sh -c "sed -e \" + s,%USER%,$APACHE_USER,g; + s,%GROUP%,$APACHE_GROUP,g; + s,%HORIZON_DIR%,$HORIZON_DIR,g; + s,%APACHE_NAME%,$APACHE_NAME,g; + s,%DEST%,$DEST,g; + \" $FILES/apache-horizon.template >/etc/$APACHE_NAME/$APACHE_CONF" + +} + +# install_horizon() - Collect source and prepare +function install_horizon() { + # Apache installation, because we mark it NOPRIME + if is_ubuntu; then + # Install apache2, which is NOPRIME'd + install_package apache2 libapache2-mod-wsgi + elif is_fedora; then + sudo rm -f /etc/httpd/conf.d/000-* + install_package httpd mod_wsgi + elif is_suse; then + install_package apache2 apache2-mod_wsgi + else + exit_distro_not_supported "apache installation" + fi + + # NOTE(sdague) quantal changed the name of the node binary + if is_ubuntu; then + if [[ ! -e "/usr/bin/node" ]]; then + install_package nodejs-legacy + fi + fi + + git_clone $HORIZON_REPO $HORIZON_DIR $HORIZON_BRANCH $HORIZON_TAG +} + +# start_horizon() - Start running processes, including screen +function start_horizon() { + restart_service $APACHE_NAME + screen_it horizon "cd $HORIZON_DIR && sudo tail -f /var/log/$APACHE_NAME/horizon_error.log" +} + +# stop_horizon() - Stop running processes (non-screen) +function stop_horizon() { + if is_ubuntu; then + stop_service apache2 + elif is_fedora; then + stop_service httpd + elif is_suse; then + stop_service apache2 + else + exit_distro_not_supported "apache configuration" + fi +} + +# Restore xtrace +$XTRACE diff --git a/lib/keystone b/lib/keystone index ae890567..866c62e1 100644 --- a/lib/keystone +++ b/lib/keystone @@ -4,10 +4,10 @@ # Dependencies: # ``functions`` file # ``BASE_SQL_CONN`` -# ``SERVICE_HOST`` +# ``SERVICE_HOST``, ``SERVICE_PROTOCOL`` # ``SERVICE_TOKEN`` # ``S3_SERVICE_PORT`` (template backend only) - +# ``STACK_USER`` # ``stack.sh`` calls the entry points in this order: # @@ -15,6 +15,7 @@ # configure_keystone # init_keystone # start_keystone +# create_keystone_accounts # stop_keystone # cleanup_keystone @@ -26,8 +27,6 @@ set +o xtrace # Defaults # -------- -# - # Set up default directories KEYSTONE_DIR=$DEST/keystone KEYSTONE_CONF_DIR=${KEYSTONE_CONF_DIR:-/etc/keystone} @@ -40,18 +39,24 @@ KEYSTONECLIENT_DIR=$DEST/python-keystoneclient KEYSTONE_CATALOG_BACKEND=${KEYSTONE_CATALOG_BACKEND:-sql} KEYSTONE_CATALOG=$KEYSTONE_CONF_DIR/default_catalog.templates +# Select the backend for Tokens +KEYSTONE_TOKEN_BACKEND=${KEYSTONE_TOKEN_BACKEND:-sql} + # Select Keystone's token format # Choose from 'UUID' and 'PKI' KEYSTONE_TOKEN_FORMAT=${KEYSTONE_TOKEN_FORMAT:-PKI} # Set Keystone interface configuration -KEYSTONE_API_PORT=${KEYSTONE_API_PORT:-5000} KEYSTONE_AUTH_HOST=${KEYSTONE_AUTH_HOST:-$SERVICE_HOST} KEYSTONE_AUTH_PORT=${KEYSTONE_AUTH_PORT:-35357} -KEYSTONE_AUTH_PROTOCOL=${KEYSTONE_AUTH_PROTOCOL:-http} +KEYSTONE_AUTH_PORT_INT=${KEYSTONE_AUTH_PORT_INT:-35358} +KEYSTONE_AUTH_PROTOCOL=${KEYSTONE_AUTH_PROTOCOL:-$SERVICE_PROTOCOL} + +# Public facing bits KEYSTONE_SERVICE_HOST=${KEYSTONE_SERVICE_HOST:-$SERVICE_HOST} KEYSTONE_SERVICE_PORT=${KEYSTONE_SERVICE_PORT:-5000} -KEYSTONE_SERVICE_PROTOCOL=${KEYSTONE_SERVICE_PROTOCOL:-http} +KEYSTONE_SERVICE_PORT_INT=${KEYSTONE_SERVICE_PORT_INT:-5001} +KEYSTONE_SERVICE_PROTOCOL=${KEYSTONE_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} # Entry Points @@ -77,8 +82,8 @@ function configure_keystone() { if [[ ! -d $KEYSTONE_CONF_DIR ]]; then sudo mkdir -p $KEYSTONE_CONF_DIR - sudo chown `whoami` $KEYSTONE_CONF_DIR fi + sudo chown $STACK_USER $KEYSTONE_CONF_DIR if [[ "$KEYSTONE_CONF_DIR" != "$KEYSTONE_DIR/etc" ]]; then cp -p $KEYSTONE_DIR/etc/keystone.conf.sample $KEYSTONE_CONF @@ -88,6 +93,24 @@ function configure_keystone() { # Rewrite stock ``keystone.conf`` local dburl database_connection_url dburl keystone + + if is_service_enabled ldap; then + #Set all needed ldap values + iniset $KEYSTONE_CONF ldap password $LDAP_PASSWORD + iniset $KEYSTONE_CONF ldap user "dc=Manager,dc=openstack,dc=org" + iniset $KEYSTONE_CONF ldap suffix "dc=openstack,dc=org" + fi + + if [[ "$KEYSTONE_IDENTITY_BACKEND" == "ldap" ]]; then + iniset $KEYSTONE_CONF identity driver "keystone.identity.backends.ldap.Identity" + fi + + if is_service_enabled tls-proxy; then + # Set the service ports for a proxy to take the originals + iniset $KEYSTONE_CONF DEFAULT public_port $KEYSTONE_SERVICE_PORT_INT + iniset $KEYSTONE_CONF DEFAULT admin_port $KEYSTONE_AUTH_PORT_INT + fi + iniset $KEYSTONE_CONF DEFAULT admin_token "$SERVICE_TOKEN" iniset $KEYSTONE_CONF signing token_format "$KEYSTONE_TOKEN_FORMAT" iniset $KEYSTONE_CONF sql connection $dburl @@ -99,6 +122,12 @@ function configure_keystone() { # Append the S3 bits iniset $KEYSTONE_CONF filter:s3_extension paste.filter_factory "keystone.contrib.s3:S3Extension.factory" + if [[ "$KEYSTONE_TOKEN_BACKEND" = "sql" ]]; then + iniset $KEYSTONE_CONF token driver keystone.token.backends.sql.Token + else + iniset $KEYSTONE_CONF token driver keystone.token.backends.kvs.Token + fi + if [[ "$KEYSTONE_CATALOG_BACKEND" = "sql" ]]; then # Configure ``keystone.conf`` to use sql iniset $KEYSTONE_CONF catalog driver keystone.catalog.backends.sql.Catalog @@ -122,7 +151,7 @@ function configure_keystone() { echo "catalog.RegionOne.network.name = Quantum Service" >> $KEYSTONE_CATALOG fi - sudo sed -e " + sed -e " s,%SERVICE_HOST%,$SERVICE_HOST,g; s,%S3_SERVICE_PORT%,$S3_SERVICE_PORT,g; " -i $KEYSTONE_CATALOG @@ -144,6 +173,100 @@ function configure_keystone() { } +# create_keystone_accounts() - Sets up common required keystone accounts + +# Tenant User Roles +# ------------------------------------------------------------------ +# service -- -- +# -- -- Member +# admin admin admin +# demo admin admin +# demo demo Member, anotherrole +# invisible_to_admin demo Member + +# Migrated from keystone_data.sh +create_keystone_accounts() { + + # admin + ADMIN_TENANT=$(keystone tenant-create \ + --name admin \ + | grep " id " | get_field 2) + ADMIN_USER=$(keystone user-create \ + --name admin \ + --pass "$ADMIN_PASSWORD" \ + --email admin@example.com \ + | grep " id " | get_field 2) + ADMIN_ROLE=$(keystone role-create \ + --name admin \ + | grep " id " | get_field 2) + keystone user-role-add \ + --user_id $ADMIN_USER \ + --role_id $ADMIN_ROLE \ + --tenant_id $ADMIN_TENANT + + # service + SERVICE_TENANT=$(keystone tenant-create \ + --name $SERVICE_TENANT_NAME \ + | grep " id " | get_field 2) + + # The Member role is used by Horizon and Swift so we need to keep it: + MEMBER_ROLE=$(keystone role-create --name=Member | grep " id " | get_field 2) + # ANOTHER_ROLE demonstrates that an arbitrary role may be created and used + # TODO(sleepsonthefloor): show how this can be used for rbac in the future! + ANOTHER_ROLE=$(keystone role-create --name=anotherrole | grep " id " | get_field 2) + + # invisible tenant - admin can't see this one + INVIS_TENANT=$(keystone tenant-create --name=invisible_to_admin | grep " id " | get_field 2) + + # demo + DEMO_TENANT=$(keystone tenant-create \ + --name=demo \ + | grep " id " | get_field 2) + DEMO_USER=$(keystone user-create \ + --name demo \ + --pass "$ADMIN_PASSWORD" \ + --email demo@example.com \ + | grep " id " | get_field 2) + keystone user-role-add --user_id $DEMO_USER --role_id $MEMBER_ROLE --tenant_id $DEMO_TENANT + keystone user-role-add --user_id $ADMIN_USER --role_id $ADMIN_ROLE --tenant_id $DEMO_TENANT + keystone user-role-add --user_id $DEMO_USER --role_id $ANOTHER_ROLE --tenant_id $DEMO_TENANT + keystone user-role-add --user_id $DEMO_USER --role_id $MEMBER_ROLE --tenant_id $INVIS_TENANT + + # Keystone + if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then + KEYSTONE_SERVICE=$(keystone service-create \ + --name keystone \ + --type identity \ + --description "Keystone Identity Service" \ + | grep " id " | get_field 2) + keystone endpoint-create \ + --region RegionOne \ + --service_id $KEYSTONE_SERVICE \ + --publicurl "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0" \ + --adminurl "$KEYSTONE_AUTH_PROTOCOL://$KEYSTONE_AUTH_HOST:$KEYSTONE_AUTH_PORT/v2.0" \ + --internalurl "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0" + fi + + # TODO(dtroyer): This is part of a series of changes...remove these when + # complete if they are really unused +# KEYSTONEADMIN_ROLE=$(keystone role-create \ +# --name KeystoneAdmin \ +# | grep " id " | get_field 2) +# KEYSTONESERVICE_ROLE=$(keystone role-create \ +# --name KeystoneServiceAdmin \ +# | grep " id " | get_field 2) + + # TODO(termie): these two might be dubious +# keystone user-role-add \ +# --user_id $ADMIN_USER \ +# --role_id $KEYSTONEADMIN_ROLE \ +# --tenant_id $ADMIN_TENANT +# keystone user-role-add \ +# --user_id $ADMIN_USER \ +# --role_id $KEYSTONESERVICE_ROLE \ +# --tenant_id $ADMIN_TENANT +} + # init_keystone() - Initialize databases, etc. function init_keystone() { # (Re)create keystone database @@ -154,11 +277,13 @@ function init_keystone() { if [[ "$KEYSTONE_TOKEN_FORMAT" == "PKI" ]]; then # Set up certificates + rm -rf $KEYSTONE_CONF_DIR/ssl $KEYSTONE_DIR/bin/keystone-manage pki_setup # Create cache dir sudo mkdir -p $KEYSTONE_AUTH_CACHE_DIR - sudo chown `whoami` $KEYSTONE_AUTH_CACHE_DIR + sudo chown $STACK_USER $KEYSTONE_AUTH_CACHE_DIR + rm -f $KEYSTONE_AUTH_CACHE_DIR/* fi } @@ -169,13 +294,34 @@ function install_keystoneclient() { # install_keystone() - Collect source and prepare function install_keystone() { + # only install ldap if the service has been enabled + if is_service_enabled ldap; then + install_ldap + fi git_clone $KEYSTONE_REPO $KEYSTONE_DIR $KEYSTONE_BRANCH } # start_keystone() - Start running processes, including screen function start_keystone() { + # Get right service port for testing + local service_port=$KEYSTONE_SERVICE_PORT + if is_service_enabled tls-proxy; then + service_port=$KEYSTONE_SERVICE_PORT_INT + fi + # Start Keystone in a screen window screen_it key "cd $KEYSTONE_DIR && $KEYSTONE_DIR/bin/keystone-all --config-file $KEYSTONE_CONF $KEYSTONE_LOG_CONFIG -d --debug" + echo "Waiting for keystone to start..." + if ! timeout $SERVICE_TIMEOUT sh -c "while ! http_proxy= curl -s http://$SERVICE_HOST:$service_port/v2.0/ >/dev/null; do sleep 1; done"; then + echo "keystone did not start" + exit 1 + fi + + # Start proxies if enabled + if is_service_enabled tls-proxy; then + start_tls_proxy '*' $KEYSTONE_SERVICE_PORT $KEYSTONE_SERVICE_HOST $KEYSTONE_SERVICE_PORT_INT & + start_tls_proxy '*' $KEYSTONE_AUTH_PORT $KEYSTONE_AUTH_HOST $KEYSTONE_AUTH_PORT_INT & + fi } # stop_keystone() - Stop running processes diff --git a/lib/ldap b/lib/ldap new file mode 100644 index 00000000..5cb45347 --- /dev/null +++ b/lib/ldap @@ -0,0 +1,74 @@ +# lib/ldap +# Functions to control the installation and configuration of **ldap** + +# ``stack.sh`` calls the entry points in this order: +# + +# Save trace setting +XTRACE=$(set +o | grep xtrace) +set +o xtrace + +# install_ldap +# install_ldap() - Collect source and prepare +function install_ldap() { + echo "Installing LDAP inside function" + echo "LDAP_PASSWORD is $LDAP_PASSWORD" + echo "os_VENDOR is $os_VENDOR" + printf "installing" + if is_ubuntu; then + echo "os vendor is Ubuntu" + LDAP_OLCDB_NUMBER=1 + LDAP_ROOTPW_COMMAND=replace + sudo DEBIAN_FRONTEND=noninteractive apt-get install slapd ldap-utils + #automatically starts LDAP on ubuntu so no need to call start_ldap + elif is_fedora; then + echo "os vendor is Fedora" + LDAP_OLCDB_NUMBER=2 + LDAP_ROOTPW_COMMAND=add + start_ldap + fi + + printf "generate password file" + SLAPPASS=`slappasswd -s $LDAP_PASSWORD` + + printf "secret is $SLAPPASS\n" + #create manager.ldif + TMP_MGR_DIFF_FILE=`mktemp -t manager_ldiff.$$.XXXXXXXXXX.ldif` + sed -e "s|\${LDAP_OLCDB_NUMBER}|$LDAP_OLCDB_NUMBER|" -e "s|\${SLAPPASS}|$SLAPPASS|" -e "s|\${LDAP_ROOTPW_COMMAND}|$LDAP_ROOTPW_COMMAND|" $FILES/ldap/manager.ldif.in >> $TMP_MGR_DIFF_FILE + + #update ldap olcdb + sudo ldapmodify -Y EXTERNAL -H ldapi:/// -f $TMP_MGR_DIFF_FILE + + # add our top level ldap nodes + if ldapsearch -x -w $LDAP_PASSWORD -H ldap://localhost -D dc=Manager,dc=openstack,dc=org -x -b dc=openstack,dc=org | grep -q "Success" ; then + printf "LDAP already configured for OpenStack\n" + if [[ "$KEYSTONE_CLEAR_LDAP" == "yes" ]]; then + # clear LDAP state + clear_ldap_state + # reconfigure LDAP for OpenStack + ldapadd -c -x -H ldap://localhost -D dc=Manager,dc=openstack,dc=org -w $LDAP_PASSWORD -f $FILES/ldap/openstack.ldif + fi + else + printf "Configuring LDAP for OpenStack\n" + ldapadd -c -x -H ldap://localhost -D dc=Manager,dc=openstack,dc=org -w $LDAP_PASSWORD -f $FILES/ldap/openstack.ldif + fi +} + +# start_ldap() - Start LDAP +function start_ldap() { + sudo service slapd restart +} + + +# stop_ldap() - Stop LDAP +function stop_ldap() { + sudo service slapd stop +} + +# clear_ldap_state() - Clear LDAP State +function clear_ldap_state() { + ldapdelete -x -w $LDAP_PASSWORD -H ldap://localhost -D dc=Manager,dc=openstack,dc=org -x -r "dc=openstack,dc=org" +} + +# Restore xtrace +$XTRACE diff --git a/lib/n-vol b/lib/n-vol deleted file mode 100644 index db53582b..00000000 --- a/lib/n-vol +++ /dev/null @@ -1,126 +0,0 @@ -# lib/n-vol -# Install and start Nova volume service - -# Dependencies: -# - functions -# - DATA_DIR must be defined -# - KEYSTONE_AUTH_* must be defined -# - NOVA_DIR, NOVA_BIN_DIR, NOVA_STATE_PATH must be defined -# SERVICE_{TENANT_NAME|PASSWORD} must be defined -# _configure_tgt_for_config_d() from lib/cinder - -# stack.sh -# --------- -# install_nvol -# configure_nvol -# init_nvol -# start_nvol -# stop_nvol -# cleanup_nvol - -# Save trace setting -XTRACE=$(set +o | grep xtrace) -set +o xtrace - - -# Defaults -# -------- - -# Name of the LVM volume group to use/create for iscsi volumes -VOLUME_GROUP=${VOLUME_GROUP:-stack-volumes} -VOLUME_NAME_PREFIX=${VOLUME_NAME_PREFIX:-volume-} - - -# cleanup_nvol() - Remove residual data files, anything left over from previous -# runs that a clean run would need to clean up -function cleanup_nvol() { - # kill instances (nova) - # delete image files (glance) - # This function intentionally left blank - : -} - -# configure_nvol() - Set config files, create data dirs, etc -function configure_nvol() { - # sudo python setup.py deploy - # iniset $XXX_CONF ... - # This function intentionally left blank - : -} - -# init_nvol() - Initialize databases, etc. -function init_nvol() { - # Configure a default volume group called '`stack-volumes`' for the volume - # service if it does not yet exist. If you don't wish to use a file backed - # volume group, create your own volume group called ``stack-volumes`` before - # invoking ``stack.sh``. - # - # By default, the backing file is 5G in size, and is stored in ``/opt/stack/data``. - - if ! sudo vgs $VOLUME_GROUP; then - VOLUME_BACKING_FILE=${VOLUME_BACKING_FILE:-$DATA_DIR/${VOLUME_GROUP}-backing-file} - # Only create if the file doesn't already exists - [[ -f $VOLUME_BACKING_FILE ]] || truncate -s $VOLUME_BACKING_FILE_SIZE $VOLUME_BACKING_FILE - DEV=`sudo losetup -f --show $VOLUME_BACKING_FILE` - # Only create if the loopback device doesn't contain $VOLUME_GROUP - if ! sudo vgs $VOLUME_GROUP; then sudo vgcreate $VOLUME_GROUP $DEV; fi - fi - - mkdir -p $NOVA_STATE_PATH/volumes - - if sudo vgs $VOLUME_GROUP; then - if [[ "$os_PACKAGE" = "rpm" ]]; then - # RPM doesn't start the service - start_service tgtd - fi - - # Remove nova iscsi targets - sudo tgtadm --op show --mode target | grep $VOLUME_NAME_PREFIX | grep Target | cut -f3 -d ' ' | sudo xargs -n1 tgt-admin --delete || true - # Clean out existing volumes - for lv in `sudo lvs --noheadings -o lv_name $VOLUME_GROUP`; do - # ``VOLUME_NAME_PREFIX`` prefixes the LVs we want - if [[ "${lv#$VOLUME_NAME_PREFIX}" != "$lv" ]]; then - sudo lvremove -f $VOLUME_GROUP/$lv - fi - done - fi -} - -# install_nvol() - Collect source and prepare -function install_nvol() { - # git clone xxx - # Install is handled when installing Nova - : -} - -# start_nvol() - Start running processes, including screen -function start_nvol() { - # Setup the tgt configuration file - if [[ ! -f /etc/tgt/conf.d/nova.conf ]]; then - _configure_tgt_for_config_d - sudo mkdir -p /etc/tgt/conf.d - echo "include $NOVA_STATE_PATH/volumes/*" | sudo tee /etc/tgt/conf.d/nova.conf - fi - - if [[ "$os_PACKAGE" = "deb" ]]; then - # tgt in oneiric doesn't restart properly if tgtd isn't running - # do it in two steps - sudo stop tgt || true - sudo start tgt - else - restart_service tgtd - fi - - screen_it n-vol "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-volume" -} - -# stop_nvol() - Stop running processes -function stop_nvol() { - # Kill the nova volume screen window - screen -S $SCREEN_NAME -p n-vol -X kill - - stop_service tgt -} - -# Restore xtrace -$XTRACE diff --git a/lib/nova b/lib/nova index 2c1413d3..849ec573 100644 --- a/lib/nova +++ b/lib/nova @@ -1,9 +1,9 @@ # lib/nova -# Functions to control the configuration and operation of the XXXX service +# Functions to control the configuration and operation of the **Nova** service # Dependencies: # ``functions`` file -# ``DEST``, ``DATA_DIR`` must be defined +# ``DEST``, ``DATA_DIR``, ``STACK_USER`` must be defined # ``SERVICE_{TENANT_NAME|PASSWORD}`` must be defined # ``LIBVIRT_TYPE`` must be defined # ``INSTANCE_NAME_PREFIX``, ``VOLUME_NAME_PREFIX`` must be defined @@ -39,19 +39,21 @@ NOVA_CONF_DIR=/etc/nova NOVA_CONF=$NOVA_CONF_DIR/nova.conf NOVA_API_PASTE_INI=${NOVA_API_PASTE_INI:-$NOVA_CONF_DIR/api-paste.ini} +# Public facing bits +NOVA_SERVICE_HOST=${NOVA_SERVICE_HOST:-$SERVICE_HOST} +NOVA_SERVICE_PORT=${NOVA_SERVICE_PORT:-8774} +NOVA_SERVICE_PORT_INT=${NOVA_SERVICE_PORT_INT:-18774} +NOVA_SERVICE_PROTOCOL=${NOVA_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} + # Support entry points installation of console scripts if [[ -d $NOVA_DIR/bin ]]; then NOVA_BIN_DIR=$NOVA_DIR/bin else - NOVA_BIN_DIR=/usr/local/bin + NOVA_BIN_DIR=$(get_python_exec_prefix) fi # Set the paths of certain binaries -if [[ "$os_PACKAGE" = "deb" ]]; then - NOVA_ROOTWRAP=/usr/local/bin/nova-rootwrap -else - NOVA_ROOTWRAP=/usr/bin/nova-rootwrap -fi +NOVA_ROOTWRAP=$(get_rootwrap_location nova) # Allow rate limiting to be turned off for testing, like for Tempest # NOTE: Set API_RATE_LIMIT="False" to turn OFF rate limiting @@ -147,7 +149,7 @@ function configure_nova() { if [[ ! -d $NOVA_CONF_DIR ]]; then sudo mkdir -p $NOVA_CONF_DIR fi - sudo chown `whoami` $NOVA_CONF_DIR + sudo chown $STACK_USER $NOVA_CONF_DIR cp -p $NOVA_DIR/etc/nova/policy.json $NOVA_CONF_DIR @@ -164,21 +166,16 @@ function configure_nova() { # Get the sample configuration file in place cp $NOVA_DIR/etc/nova/api-paste.ini $NOVA_CONF_DIR - # Rewrite the authtoken configuration for our Keystone service. - # This is a bit defensive to allow the sample file some variance. - sed -e " - /^admin_token/i admin_tenant_name = $SERVICE_TENANT_NAME - /admin_tenant_name/s/^.*$/admin_tenant_name = $SERVICE_TENANT_NAME/; - /admin_user/s/^.*$/admin_user = nova/; - /admin_password/s/^.*$/admin_password = $SERVICE_PASSWORD/; - s,%SERVICE_TENANT_NAME%,$SERVICE_TENANT_NAME,g; - s,%SERVICE_TOKEN%,$SERVICE_TOKEN,g; - " -i $NOVA_API_PASTE_INI + iniset $NOVA_API_PASTE_INI filter:authtoken auth_host $SERVICE_HOST + if is_service_enabled tls-proxy; then + iniset $NOVA_API_PASTE_INI filter:authtoken auth_protocol $SERVICE_PROTOCOL + fi + iniset $NOVA_API_PASTE_INI filter:authtoken admin_tenant_name $SERVICE_TENANT_NAME + iniset $NOVA_API_PASTE_INI filter:authtoken admin_user nova + iniset $NOVA_API_PASTE_INI filter:authtoken admin_password $SERVICE_PASSWORD fi - if [[ "$KEYSTONE_TOKEN_FORMAT" == "PKI" ]]; then - iniset $NOVA_API_PASTE_INI filter:authtoken signing_dir $NOVA_AUTH_CACHE_DIR - fi + iniset $NOVA_API_PASTE_INI filter:authtoken signing_dir $NOVA_AUTH_CACHE_DIR if is_service_enabled n-cpu; then # Force IP forwarding on, just on case @@ -206,7 +203,7 @@ function configure_nova() { # splitting a system into many smaller parts. LXC uses cgroups and chroot # to simulate multiple systems. if [[ "$LIBVIRT_TYPE" == "lxc" ]]; then - if [[ "$os_PACKAGE" = "deb" ]]; then + if is_ubuntu; then if [[ ! "$DISTRO" > natty ]]; then cgline="none /cgroup cgroup cpuacct,memory,devices,cpu,freezer,blkio 0 0" sudo mkdir -p /cgroup @@ -220,7 +217,12 @@ function configure_nova() { fi fi - if is_service_enabled quantum && is_quantum_ovs_base_plugin "$Q_PLUGIN" && ! sudo grep -q '^cgroup_device_acl' $QEMU_CONF ; then + # Prepare directories and packages for baremetal driver + if is_baremetal; then + configure_baremetal_nova_dirs + fi + + if is_service_enabled quantum && is_quantum_ovs_base_plugin && ! sudo grep -q '^cgroup_device_acl' $QEMU_CONF ; then # Add /dev/net/tun to cgroup_device_acls, needed for type=ethernet interfaces cat </dev/null; then - sudo groupadd libvirtd - fi - sudo bash -c 'cat </etc/polkit-1/localauthority/50-local.d/50-libvirt-remote-access.pkla + LIBVIRT_DAEMON=libvirtd + fi + + + + if is_fedora; then + # Starting with fedora 18 enable stack-user to virsh -c qemu:///system + # by creating a policy-kit rule for stack-user + if [[ "$os_RELEASE" -ge "18" ]]; then + rules_dir=/etc/polkit-1/rules.d + sudo mkdir -p $rules_dir + sudo bash -c "cat < $rules_dir/50-libvirt-$STACK_USER.rules +polkit.addRule(function(action, subject) { + if (action.id == 'org.libvirt.unix.manage' && + subject.user == '"$STACK_USER"') { + return polkit.Result.YES; + } +}); +EOF" + unset rules_dir + else + sudo bash -c 'cat </etc/polkit-1/localauthority/50-local.d/50-libvirt-remote-access.pkla [libvirt Management Access] Identity=unix-group:libvirtd Action=org.libvirt.unix.manage @@ -247,12 +266,26 @@ ResultAny=yes ResultInactive=yes ResultActive=yes EOF' - LIBVIRT_DAEMON=libvirtd + fi + elif is_suse; then + # Work around the fact that polkit-default-privs overrules pklas + # with 'unix-group:$group'. + sudo bash -c "cat </etc/polkit-1/localauthority/50-local.d/50-libvirt-remote-access.pkla +[libvirt Management Access] +Identity=unix-user:$USER +Action=org.libvirt.unix.manage +ResultAny=yes +ResultInactive=yes +ResultActive=yes +EOF" fi # The user that nova runs as needs to be member of **libvirtd** group otherwise # nova-compute will be unable to use libvirt. - sudo usermod -a -G libvirtd `whoami` + if ! getent group libvirtd >/dev/null; then + sudo groupadd libvirtd + fi + add_user_to_group $STACK_USER libvirtd # libvirt detects various settings on startup, as we potentially changed # the system configuration (modules, filesystems), we need to restart @@ -272,7 +305,7 @@ EOF' if [ -L /dev/disk/by-label/nova-instances ]; then if ! mount -n | grep -q $NOVA_INSTANCES_PATH; then sudo mount -L nova-instances $NOVA_INSTANCES_PATH - sudo chown -R `whoami` $NOVA_INSTANCES_PATH + sudo chown -R $STACK_USER $NOVA_INSTANCES_PATH fi fi @@ -281,6 +314,46 @@ EOF' fi } +# create_nova_accounts() - Set up common required nova accounts + +# Tenant User Roles +# ------------------------------------------------------------------ +# service nova admin, [ResellerAdmin (swift only)] + +# Migrated from keystone_data.sh +create_nova_accounts() { + + SERVICE_TENANT=$(keystone tenant-list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }") + ADMIN_ROLE=$(keystone role-list | awk "/ admin / { print \$2 }") + + # Nova + if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then + NOVA_USER=$(keystone user-create \ + --name=nova \ + --pass="$SERVICE_PASSWORD" \ + --tenant_id $SERVICE_TENANT \ + --email=nova@example.com \ + | grep " id " | get_field 2) + keystone user-role-add \ + --tenant_id $SERVICE_TENANT \ + --user_id $NOVA_USER \ + --role_id $ADMIN_ROLE + if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then + NOVA_SERVICE=$(keystone service-create \ + --name=nova \ + --type=compute \ + --description="Nova Compute Service" \ + | grep " id " | get_field 2) + keystone endpoint-create \ + --region RegionOne \ + --service_id $NOVA_SERVICE \ + --publicurl "$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v2/\$(tenant_id)s" \ + --adminurl "$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v2/\$(tenant_id)s" \ + --internalurl "$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v2/\$(tenant_id)s" + fi + fi +} + # create_nova_conf() - Create a new nova.conf file function create_nova_conf() { # Remove legacy ``nova.conf`` @@ -289,73 +362,74 @@ function create_nova_conf() { # (Re)create ``nova.conf`` rm -f $NOVA_CONF add_nova_opt "[DEFAULT]" - add_nova_opt "verbose=True" - add_nova_opt "auth_strategy=keystone" - add_nova_opt "allow_resize_to_same_host=True" - add_nova_opt "api_paste_config=$NOVA_API_PASTE_INI" - add_nova_opt "rootwrap_config=$NOVA_CONF_DIR/rootwrap.conf" - add_nova_opt "compute_scheduler_driver=$SCHEDULER" - add_nova_opt "dhcpbridge_flagfile=$NOVA_CONF" - add_nova_opt "force_dhcp_release=True" - add_nova_opt "fixed_range=$FIXED_RANGE" - add_nova_opt "s3_host=$SERVICE_HOST" - add_nova_opt "s3_port=$S3_SERVICE_PORT" - add_nova_opt "osapi_compute_extension=nova.api.openstack.compute.contrib.standard_extensions" - add_nova_opt "my_ip=$HOST_IP" + iniset $NOVA_CONF DEFAULT verbose "True" + iniset $NOVA_CONF DEFAULT debug "True" + iniset $NOVA_CONF DEFAULT auth_strategy "keystone" + iniset $NOVA_CONF DEFAULT allow_resize_to_same_host "True" + iniset $NOVA_CONF DEFAULT api_paste_config "$NOVA_API_PASTE_INI" + iniset $NOVA_CONF DEFAULT rootwrap_config "$NOVA_CONF_DIR/rootwrap.conf" + iniset $NOVA_CONF DEFAULT compute_scheduler_driver "$SCHEDULER" + iniset $NOVA_CONF DEFAULT dhcpbridge_flagfile "$NOVA_CONF" + iniset $NOVA_CONF DEFAULT force_dhcp_release "True" + iniset $NOVA_CONF DEFAULT fixed_range "$FIXED_RANGE" + iniset $NOVA_CONF DEFAULT default_floating_pool "$PUBLIC_NETWORK_NAME" + iniset $NOVA_CONF DEFAULT s3_host "$SERVICE_HOST" + iniset $NOVA_CONF DEFAULT s3_port "$S3_SERVICE_PORT" + iniset $NOVA_CONF DEFAULT osapi_compute_extension "nova.api.openstack.compute.contrib.standard_extensions" + iniset $NOVA_CONF DEFAULT my_ip "$HOST_IP" local dburl database_connection_url dburl nova - add_nova_opt "sql_connection=$dburl" - add_nova_opt "libvirt_type=$LIBVIRT_TYPE" - add_nova_opt "libvirt_cpu_mode=none" - add_nova_opt "instance_name_template=${INSTANCE_NAME_PREFIX}%08x" + iniset $NOVA_CONF DEFAULT sql_connection "$dburl" + if is_baremetal; then + database_connection_url dburl nova_bm + iniset $NOVA_CONF baremetal sql_connection $dburl + fi + iniset $NOVA_CONF DEFAULT libvirt_type "$LIBVIRT_TYPE" + iniset $NOVA_CONF DEFAULT libvirt_cpu_mode "none" + iniset $NOVA_CONF DEFAULT instance_name_template "${INSTANCE_NAME_PREFIX}%08x" if is_service_enabled n-api; then - add_nova_opt "enabled_apis=$NOVA_ENABLED_APIS" - fi - if is_service_enabled n-vol; then - NOVA_ENABLED_APIS="${NOVA_ENABLED_APIS},osapi_volume" - iniset $NOVA_CONF DEFAULT enabled_apis $NOVA_ENABLED_APIS - add_nova_opt "volume_api_class=nova.volume.api.API" - add_nova_opt "volume_group=$VOLUME_GROUP" - add_nova_opt "volume_name_template=${VOLUME_NAME_PREFIX}%s" - # oneiric no longer supports ietadm - add_nova_opt "iscsi_helper=tgtadm" + iniset $NOVA_CONF DEFAULT enabled_apis "$NOVA_ENABLED_APIS" + if is_service_enabled tls-proxy; then + # Set the service port for a proxy to take the original + iniset $NOVA_CONF DEFAULT osapi_compute_listen_port "$NOVA_SERVICE_PORT_INT" + fi fi if is_service_enabled cinder; then - add_nova_opt "volume_api_class=nova.volume.cinder.API" + iniset $NOVA_CONF DEFAULT volume_api_class "nova.volume.cinder.API" fi if [ -n "$NOVA_STATE_PATH" ]; then - add_nova_opt "state_path=$NOVA_STATE_PATH" - add_nova_opt "lock_path=$NOVA_STATE_PATH" + iniset $NOVA_CONF DEFAULT state_path "$NOVA_STATE_PATH" + iniset $NOVA_CONF DEFAULT lock_path "$NOVA_STATE_PATH" fi if [ -n "$NOVA_INSTANCES_PATH" ]; then - add_nova_opt "instances_path=$NOVA_INSTANCES_PATH" + iniset $NOVA_CONF DEFAULT instances_path "$NOVA_INSTANCES_PATH" fi if [ "$MULTI_HOST" != "False" ]; then - add_nova_opt "multi_host=True" - add_nova_opt "send_arp_for_ha=True" + iniset $NOVA_CONF DEFAULT multi_host "True" + iniset $NOVA_CONF DEFAULT send_arp_for_ha "True" fi if [ "$SYSLOG" != "False" ]; then - add_nova_opt "use_syslog=True" + iniset $NOVA_CONF DEFAULT use_syslog "True" fi if [ "$API_RATE_LIMIT" != "True" ]; then - add_nova_opt "api_rate_limit=False" + iniset $NOVA_CONF DEFAULT api_rate_limit "False" fi if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ]; then # Add color to logging output - add_nova_opt "logging_context_format_string=%(asctime)s %(color)s%(levelname)s %(name)s [%(request_id)s %(user_name)s %(project_name)s%(color)s] %(instance)s%(color)s%(message)s" - add_nova_opt "logging_default_format_string=%(asctime)s %(color)s%(levelname)s %(name)s [-%(color)s] %(instance)s%(color)s%(message)s" - add_nova_opt "logging_debug_format_suffix=from (pid=%(process)d) %(funcName)s %(pathname)s:%(lineno)d" - add_nova_opt "logging_exception_prefix=%(color)s%(asctime)s TRACE %(name)s %(instance)s" + iniset $NOVA_CONF DEFAULT logging_context_format_string "%(asctime)s.%(msecs)03d %(color)s%(levelname)s %(name)s [%(request_id)s %(user_name)s %(project_name)s%(color)s] %(instance)s%(color)s%(message)s" + iniset $NOVA_CONF DEFAULT logging_default_format_string "%(asctime)s.%(msecs)03d %(color)s%(levelname)s %(name)s [-%(color)s] %(instance)s%(color)s%(message)s" + iniset $NOVA_CONF DEFAULT logging_debug_format_suffix "from (pid=%(process)d) %(funcName)s %(pathname)s:%(lineno)d" + iniset $NOVA_CONF DEFAULT logging_exception_prefix "%(color)s%(asctime)s.%(msecs)03d TRACE %(name)s %(instance)s" else # Show user_name and project_name instead of user_id and project_id - add_nova_opt "logging_context_format_string=%(asctime)s %(levelname)s %(name)s [%(request_id)s %(user_name)s %(project_name)s] %(instance)s%(message)s" + iniset $NOVA_CONF DEFAULT logging_context_format_string "%(asctime)s.%(msecs)03d %(levelname)s %(name)s [%(request_id)s %(user_name)s %(project_name)s] %(instance)s%(message)s" fi if is_service_enabled ceilometer; then - add_nova_opt "instance_usage_audit=True" - add_nova_opt "instance_usage_audit_period=hour" - add_nova_opt "notification_driver=nova.openstack.common.notifier.rabbit_notifier" - add_nova_opt "notification_driver=ceilometer.compute.nova_notifier" + iniset $NOVA_CONF DEFAULT instance_usage_audit "True" + iniset $NOVA_CONF DEFAULT instance_usage_audit_period "hour" + iniset $NOVA_CONF DEFAULT notification_driver "nova.openstack.common.notifier.rpc_notifier" + iniset $NOVA_CONF DEFAULT notification_driver "ceilometer.compute.nova_notifier" fi @@ -367,35 +441,59 @@ function create_nova_conf() { # Define extra nova conf flags by defining the array ``EXTRA_OPTS``. # For Example: ``EXTRA_OPTS=(foo=true bar=2)`` for I in "${EXTRA_OPTS[@]}"; do - # Attempt to convert flags to options - add_nova_opt ${I//--} + # Replace the first '=' with ' ' for iniset syntax + iniset $NOVA_CONF DEFAULT ${I/=/ } done } -# init_nova() - Initialize databases, etc. -function init_nova() { - # Nova Database - # ------------- +# create_nova_cache_dir() - Part of the init_nova() process +function create_nova_cache_dir() { + # Create cache dir + sudo mkdir -p $NOVA_AUTH_CACHE_DIR + sudo chown $STACK_USER $NOVA_AUTH_CACHE_DIR + rm -f $NOVA_AUTH_CACHE_DIR/* +} - # All nova components talk to a central database. We will need to do this step - # only once for an entire cluster. +function create_nova_conf_nova_network() { + iniset $NOVA_CONF DEFAULT network_manager "nova.network.manager.$NET_MAN" + iniset $NOVA_CONF DEFAULT public_interface "$PUBLIC_INTERFACE" + iniset $NOVA_CONF DEFAULT vlan_interface "$VLAN_INTERFACE" + iniset $NOVA_CONF DEFAULT flat_network_bridge "$FLAT_NETWORK_BRIDGE" + if [ -n "$FLAT_INTERFACE" ]; then + iniset $NOVA_CONF DEFAULT flat_interface "$FLAT_INTERFACE" + fi +} + +# create_nova_keys_dir() - Part of the init_nova() process +function create_nova_keys_dir() { + # Create keys dir + sudo mkdir -p ${NOVA_STATE_PATH}/keys + sudo chown -R $STACK_USER ${NOVA_STATE_PATH} +} - if is_service_enabled $DATABASE_BACKENDS && is_service_enabled nova; then +# init_nova() - Initialize databases, etc. +function init_nova() { + # All nova components talk to a central database. + # Only do this step once on the API node for an entire cluster. + if is_service_enabled $DATABASE_BACKENDS && is_service_enabled n-api; then # (Re)create nova database # Explicitly use latin1: to avoid lp#829209, nova expects the database to # use latin1 by default, and then upgrades the database to utf8 (see the # 082_essex.py in nova) recreate_database nova latin1 - # (Re)create nova database + # Migrate nova database $NOVA_BIN_DIR/nova-manage db sync - fi - if [[ "$KEYSTONE_TOKEN_FORMAT" == "PKI" ]]; then - # Create cache dir - sudo mkdir -p $NOVA_AUTH_CACHE_DIR - sudo chown `whoami` $NOVA_AUTH_CACHE_DIR + # (Re)create nova baremetal database + if is_baremetal; then + recreate_database nova_bm latin1 + $NOVA_BIN_DIR/nova-baremetal-manage db sync + fi fi + + create_nova_cache_dir + create_nova_keys_dir } # install_novaclient() - Collect source and prepare @@ -406,17 +504,19 @@ function install_novaclient() { # install_nova() - Collect source and prepare function install_nova() { if is_service_enabled n-cpu; then - if [[ "$os_PACKAGE" = "deb" ]]; then - LIBVIRT_PKG_NAME=libvirt-bin + if is_ubuntu; then + install_package libvirt-bin + elif is_fedora || is_suse; then + install_package libvirt else - LIBVIRT_PKG_NAME=libvirt + exit_distro_not_supported "libvirt installation" fi - install_package $LIBVIRT_PKG_NAME + # Install and configure **LXC** if specified. LXC is another approach to # splitting a system into many smaller parts. LXC uses cgroups and chroot # to simulate multiple systems. if [[ "$LIBVIRT_TYPE" == "lxc" ]]; then - if [[ "$os_PACKAGE" = "deb" ]]; then + if is_ubuntu; then if [[ "$DISTRO" > natty ]]; then install_package cgroup-lite fi @@ -431,24 +531,47 @@ function install_nova() { git_clone $NOVA_REPO $NOVA_DIR $NOVA_BRANCH } +# start_nova_api() - Start the API process ahead of other things +function start_nova_api() { + # Get right service port for testing + local service_port=$NOVA_SERVICE_PORT + if is_service_enabled tls-proxy; then + service_port=$NOVA_SERVICE_PORT_INT + fi + + screen_it n-api "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-api" + echo "Waiting for nova-api to start..." + if ! wait_for_service $SERVICE_TIMEOUT http://$SERVICE_HOST:$service_port; then + echo "nova-api did not start" + exit 1 + fi + + # Start proxies if enabled + if is_service_enabled tls-proxy; then + start_tls_proxy '*' $NOVA_SERVICE_PORT $NOVA_SERVICE_HOST $NOVA_SERVICE_PORT_INT & + fi +} + # start_nova() - Start running processes, including screen function start_nova() { # The group **libvirtd** is added to the current user in this script. # Use 'sg' to execute nova-compute as a member of the **libvirtd** group. # ``screen_it`` checks ``is_service_enabled``, it is not needed here + screen_it n-cond "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-conductor" screen_it n-cpu "cd $NOVA_DIR && sg libvirtd $NOVA_BIN_DIR/nova-compute" screen_it n-crt "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-cert" screen_it n-net "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-network" screen_it n-sch "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-scheduler" - screen_it n-novnc "cd $NOVNC_DIR && ./utils/nova-novncproxy --config-file $NOVA_CONF --web ." - screen_it n-xvnc "cd $NOVA_DIR && ./bin/nova-xvpvncproxy --config-file $NOVA_CONF" - screen_it n-cauth "cd $NOVA_DIR && ./bin/nova-consoleauth" + screen_it n-novnc "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-novncproxy --config-file $NOVA_CONF --web $NOVNC_DIR" + screen_it n-xvnc "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-xvpvncproxy --config-file $NOVA_CONF" + screen_it n-spice "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-spicehtml5proxy --config-file $NOVA_CONF --web $SPICE_DIR" + screen_it n-cauth "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-consoleauth" } # stop_nova() - Stop running processes (non-screen) function stop_nova() { # Kill the nova screen windows - for serv in n-api n-cpu n-crt n-net n-sch n-novnc n-xvnc n-cauth; do + for serv in n-api n-cpu n-crt n-net n-sch n-novnc n-xvnc n-cauth n-cond n-spice; do screen -S $SCREEN_NAME -p $serv -X kill done } diff --git a/lib/quantum b/lib/quantum index f9e17825..61a5218e 100644 --- a/lib/quantum +++ b/lib/quantum @@ -1,12 +1,573 @@ # lib/quantum # functions - funstions specific to quantum +# Dependencies: +# ``functions`` file +# ``DEST`` must be defined + +# ``stack.sh`` calls the entry points in this order: +# +# install_quantum +# install_quantumclient +# install_quantum_agent_packages +# install_quantum_third_party +# setup_quantum +# setup_quantumclient +# configure_quantum +# init_quantum +# configure_quantum_third_party +# init_quantum_third_party +# start_quantum_third_party +# create_nova_conf_quantum +# start_quantum_service_and_check +# create_quantum_initial_network +# setup_quantum_debug +# start_quantum_agents +# +# ``unstack.sh`` calls the entry points in this order: +# +# stop_quantum + +# Functions in lib/quantum are classified into the following categories: +# +# - entry points (called from stack.sh or unstack.sh) +# - internal functions +# - quantum exercises +# - 3rd party programs + + +# Quantum Networking +# ------------------ + +# Make sure that quantum is enabled in ``ENABLED_SERVICES``. If you want +# to run Quantum on this host, make sure that q-svc is also in +# ``ENABLED_SERVICES``. +# +# If you're planning to use the Quantum openvswitch plugin, set +# ``Q_PLUGIN`` to "openvswitch" and make sure the q-agt service is enabled +# in ``ENABLED_SERVICES``. If you're planning to use the Quantum +# linuxbridge plugin, set ``Q_PLUGIN`` to "linuxbridge" and make sure the +# q-agt service is enabled in ``ENABLED_SERVICES``. +# +# See "Quantum Network Configuration" below for additional variables +# that must be set in localrc for connectivity across hosts with +# Quantum. +# +# With Quantum networking the NET_MAN variable is ignored. + + # Save trace setting XTRACE=$(set +o | grep xtrace) set +o xtrace + +# Quantum Network Configuration +# ----------------------------- + +# Set up default directories +QUANTUM_DIR=$DEST/quantum +QUANTUMCLIENT_DIR=$DEST/python-quantumclient +QUANTUM_AUTH_CACHE_DIR=${QUANTUM_AUTH_CACHE_DIR:-/var/cache/quantum} + +QUANTUM_CONF_DIR=/etc/quantum +QUANTUM_CONF=$QUANTUM_CONF_DIR/quantum.conf +export QUANTUM_TEST_CONFIG_FILE=${QUANTUM_TEST_CONFIG_FILE:-"$QUANTUM_CONF_DIR/debug.ini"} + +# Default Quantum Plugin +Q_PLUGIN=${Q_PLUGIN:-openvswitch} +# Default Quantum Port +Q_PORT=${Q_PORT:-9696} +# Default Quantum Host +Q_HOST=${Q_HOST:-$HOST_IP} +# Default admin username +Q_ADMIN_USERNAME=${Q_ADMIN_USERNAME:-quantum} +# Default auth strategy +Q_AUTH_STRATEGY=${Q_AUTH_STRATEGY:-keystone} +# Use namespace or not +Q_USE_NAMESPACE=${Q_USE_NAMESPACE:-True} +Q_USE_ROOTWRAP=${Q_USE_ROOTWRAP:-True} +# Meta data IP +Q_META_DATA_IP=${Q_META_DATA_IP:-$HOST_IP} +# Allow Overlapping IP among subnets +Q_ALLOW_OVERLAPPING_IP=${Q_ALLOW_OVERLAPPING_IP:-False} +# Use quantum-debug command +Q_USE_DEBUG_COMMAND=${Q_USE_DEBUG_COMMAND:-False} +# The name of the default q-l3 router +Q_ROUTER_NAME=${Q_ROUTER_NAME:-router1} + +if is_service_enabled quantum; then + Q_RR_CONF_FILE=$QUANTUM_CONF_DIR/rootwrap.conf + if [[ "$Q_USE_ROOTWRAP" == "False" ]]; then + Q_RR_COMMAND="sudo" + else + QUANTUM_ROOTWRAP=$(get_rootwrap_location quantum) + Q_RR_COMMAND="sudo $QUANTUM_ROOTWRAP $Q_RR_CONF_FILE" + fi + + # Provider Network Configurations + # -------------------------------- + + # The following variables control the Quantum openvswitch and + # linuxbridge plugins' allocation of tenant networks and + # availability of provider networks. If these are not configured + # in localrc, tenant networks will be local to the host (with no + # remote connectivity), and no physical resources will be + # available for the allocation of provider networks. + + # To use GRE tunnels for tenant networks, set to True in + # localrc. GRE tunnels are only supported by the openvswitch + # plugin, and currently only on Ubuntu. + ENABLE_TENANT_TUNNELS=${ENABLE_TENANT_TUNNELS:-False} + + # If using GRE tunnels for tenant networks, specify the range of + # tunnel IDs from which tenant networks are allocated. Can be + # overriden in localrc in necesssary. + TENANT_TUNNEL_RANGES=${TENANT_TUNNEL_RANGE:-1:1000} + + # To use VLANs for tenant networks, set to True in localrc. VLANs + # are supported by the openvswitch and linuxbridge plugins, each + # requiring additional configuration described below. + ENABLE_TENANT_VLANS=${ENABLE_TENANT_VLANS:-False} + + # If using VLANs for tenant networks, set in localrc to specify + # the range of VLAN VIDs from which tenant networks are + # allocated. An external network switch must be configured to + # trunk these VLANs between hosts for multi-host connectivity. + # + # Example: ``TENANT_VLAN_RANGE=1000:1999`` + TENANT_VLAN_RANGE=${TENANT_VLAN_RANGE:-} + + # If using VLANs for tenant networks, or if using flat or VLAN + # provider networks, set in localrc to the name of the physical + # network, and also configure OVS_PHYSICAL_BRIDGE for the + # openvswitch agent or LB_PHYSICAL_INTERFACE for the linuxbridge + # agent, as described below. + # + # Example: ``PHYSICAL_NETWORK=default`` + PHYSICAL_NETWORK=${PHYSICAL_NETWORK:-} + + # With the openvswitch plugin, if using VLANs for tenant networks, + # or if using flat or VLAN provider networks, set in localrc to + # the name of the OVS bridge to use for the physical network. The + # bridge will be created if it does not already exist, but a + # physical interface must be manually added to the bridge as a + # port for external connectivity. + # + # Example: ``OVS_PHYSICAL_BRIDGE=br-eth1`` + OVS_PHYSICAL_BRIDGE=${OVS_PHYSICAL_BRIDGE:-} + + # With the linuxbridge plugin, if using VLANs for tenant networks, + # or if using flat or VLAN provider networks, set in localrc to + # the name of the network interface to use for the physical + # network. + # + # Example: ``LB_PHYSICAL_INTERFACE=eth1`` + LB_PHYSICAL_INTERFACE=${LB_PHYSICAL_INTERFACE:-} + + # With the openvswitch plugin, set to True in localrc to enable + # provider GRE tunnels when ``ENABLE_TENANT_TUNNELS`` is False. + # + # Example: ``OVS_ENABLE_TUNNELING=True`` + OVS_ENABLE_TUNNELING=${OVS_ENABLE_TUNNELING:-$ENABLE_TENANT_TUNNELS} +fi + +# Quantum plugin specific functions +# --------------------------------- +# Please refer to lib/quantum_plugins/README.md for details. +source $TOP_DIR/lib/quantum_plugins/$Q_PLUGIN + +# Entry Points +# ------------ + +# configure_quantum() +# Set common config for all quantum server and agents. +function configure_quantum() { + _configure_quantum_common + iniset_rpc_backend quantum $QUANTUM_CONF DEFAULT + + if is_service_enabled q-svc; then + _configure_quantum_service + fi + if is_service_enabled q-agt; then + _configure_quantum_plugin_agent + fi + if is_service_enabled q-dhcp; then + _configure_quantum_dhcp_agent + fi + if is_service_enabled q-l3; then + _configure_quantum_l3_agent + fi + if is_service_enabled q-meta; then + _configure_quantum_metadata_agent + fi + + _configure_quantum_debug_command + + _cleanup_quantum +} + +function create_nova_conf_quantum() { + iniset $NOVA_CONF DEFAULT network_api_class "nova.network.quantumv2.api.API" + iniset $NOVA_CONF DEFAULT quantum_admin_username "$Q_ADMIN_USERNAME" + iniset $NOVA_CONF DEFAULT quantum_admin_password "$SERVICE_PASSWORD" + iniset $NOVA_CONF DEFAULT quantum_admin_auth_url "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_AUTH_PORT/v2.0" + iniset $NOVA_CONF DEFAULT quantum_auth_strategy "$Q_AUTH_STRATEGY" + iniset $NOVA_CONF DEFAULT quantum_admin_tenant_name "$SERVICE_TENANT_NAME" + iniset $NOVA_CONF DEFAULT quantum_url "http://$Q_HOST:$Q_PORT" + + # set NOVA_VIF_DRIVER and optionally set options in nova_conf + quantum_plugin_create_nova_conf + + iniset $NOVA_CONF DEFAULT libvirt_vif_driver "$NOVA_VIF_DRIVER" + iniset $NOVA_CONF DEFAULT linuxnet_interface_driver "$LINUXNET_VIF_DRIVER" + if is_service_enabled q-meta; then + iniset $NOVA_CONF DEFAULT service_quantum_metadata_proxy "True" + fi +} + +# create_quantum_accounts() - Set up common required quantum accounts + +# Tenant User Roles +# ------------------------------------------------------------------ +# service quantum admin # if enabled + +# Migrated from keystone_data.sh +function create_quantum_accounts() { + + SERVICE_TENANT=$(keystone tenant-list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }") + ADMIN_ROLE=$(keystone role-list | awk "/ admin / { print \$2 }") + + if [[ "$ENABLED_SERVICES" =~ "q-svc" ]]; then + QUANTUM_USER=$(keystone user-create \ + --name=quantum \ + --pass="$SERVICE_PASSWORD" \ + --tenant_id $SERVICE_TENANT \ + --email=quantum@example.com \ + | grep " id " | get_field 2) + keystone user-role-add \ + --tenant_id $SERVICE_TENANT \ + --user_id $QUANTUM_USER \ + --role_id $ADMIN_ROLE + if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then + QUANTUM_SERVICE=$(keystone service-create \ + --name=quantum \ + --type=network \ + --description="Quantum Service" \ + | grep " id " | get_field 2) + keystone endpoint-create \ + --region RegionOne \ + --service_id $QUANTUM_SERVICE \ + --publicurl "http://$SERVICE_HOST:9696/" \ + --adminurl "http://$SERVICE_HOST:9696/" \ + --internalurl "http://$SERVICE_HOST:9696/" + fi + fi +} + +function create_quantum_initial_network() { + TENANT_ID=$(keystone tenant-list | grep " demo " | get_field 1) + + # Create a small network + # Since quantum command is executed in admin context at this point, + # ``--tenant_id`` needs to be specified. + if is_baremetal; then + sudo ovs-vsctl add-port $OVS_PHYSICAL_BRIDGE $PUBLIC_INTERFACE + for IP in $(ip addr show dev $PUBLIC_INTERFACE | grep ' inet ' | awk '{print $2}'); do + sudo ip addr del $IP dev $PUBLIC_INTERFACE + sudo ip addr add $IP dev $OVS_PHYSICAL_BRIDGE + done + NET_ID=$(quantum net-create $PHYSICAL_NETWORK --tenant_id $TENANT_ID --provider:network_type flat --provider:physical_network "$PHYSICAL_NETWORK" | grep ' id ' | get_field 2) + SUBNET_ID=$(quantum subnet-create --tenant_id $TENANT_ID --ip_version 4 ${ALLOCATION_POOL:+--allocation-pool $ALLOCATION_POOL} --gateway $NETWORK_GATEWAY $NET_ID $FIXED_RANGE | grep ' id ' | get_field 2) + sudo ifconfig $OVS_PHYSICAL_BRIDGE up + else + NET_ID=$(quantum net-create --tenant_id $TENANT_ID "$PRIVATE_NETWORK_NAME" | grep ' id ' | get_field 2) + SUBNET_ID=$(quantum subnet-create --tenant_id $TENANT_ID --ip_version 4 --gateway $NETWORK_GATEWAY $NET_ID $FIXED_RANGE | grep ' id ' | get_field 2) + fi + + if is_service_enabled q-l3; then + # Create a router, and add the private subnet as one of its interfaces + if [[ "$Q_USE_NAMESPACE" == "True" ]]; then + # If namespaces are enabled, create a tenant-owned router. + ROUTER_ID=$(quantum router-create --tenant_id $TENANT_ID $Q_ROUTER_NAME | grep ' id ' | get_field 2) + else + # If namespaces are disabled, the L3 agent can only target + # a single router, which should not be tenant-owned. + ROUTER_ID=$(quantum router-create $Q_ROUTER_NAME | grep ' id ' | get_field 2) + fi + quantum router-interface-add $ROUTER_ID $SUBNET_ID + # Create an external network, and a subnet. Configure the external network as router gw + EXT_NET_ID=$(quantum net-create "$PUBLIC_NETWORK_NAME" -- --router:external=True | grep ' id ' | get_field 2) + EXT_GW_IP=$(quantum subnet-create --ip_version 4 ${Q_FLOATING_ALLOCATION_POOL:+--allocation-pool $Q_FLOATING_ALLOCATION_POOL} $EXT_NET_ID $FLOATING_RANGE -- --enable_dhcp=False | grep 'gateway_ip' | get_field 2) + quantum router-gateway-set $ROUTER_ID $EXT_NET_ID + + if is_quantum_ovs_base_plugin && [[ "$Q_USE_NAMESPACE" = "True" ]]; then + CIDR_LEN=${FLOATING_RANGE#*/} + sudo ip addr add $EXT_GW_IP/$CIDR_LEN dev $PUBLIC_BRIDGE + sudo ip link set $PUBLIC_BRIDGE up + ROUTER_GW_IP=`quantum port-list -c fixed_ips -c device_owner | grep router_gateway | awk -F '"' '{ print $8; }'` + sudo route add -net $FIXED_RANGE gw $ROUTER_GW_IP + fi + if [[ "$Q_USE_NAMESPACE" == "False" ]]; then + # Explicitly set router id in l3 agent configuration + iniset $Q_L3_CONF_FILE DEFAULT router_id $ROUTER_ID + fi + fi +} + +# init_quantum() - Initialize databases, etc. +function init_quantum() { + : +} + +# install_quantum() - Collect source and prepare +function install_quantum() { + git_clone $QUANTUM_REPO $QUANTUM_DIR $QUANTUM_BRANCH +} + +# install_quantumclient() - Collect source and prepare +function install_quantumclient() { + git_clone $QUANTUMCLIENT_REPO $QUANTUMCLIENT_DIR $QUANTUMCLIENT_BRANCH +} + +# install_quantum_agent_packages() - Collect source and prepare +function install_quantum_agent_packages() { + # install packages that is specific to plugin agent + quantum_plugin_install_agent_packages +} + +function setup_quantum() { + setup_develop $QUANTUM_DIR +} + +function setup_quantumclient() { + setup_develop $QUANTUMCLIENT_DIR +} + +# Start running processes, including screen +function start_quantum_service_and_check() { + # Start the Quantum service + screen_it q-svc "cd $QUANTUM_DIR && python $QUANTUM_DIR/bin/quantum-server --config-file $QUANTUM_CONF --config-file /$Q_PLUGIN_CONF_FILE" + echo "Waiting for Quantum to start..." + if ! timeout $SERVICE_TIMEOUT sh -c "while ! http_proxy= wget -q -O- http://127.0.0.1:9696; do sleep 1; done"; then + echo "Quantum did not start" + exit 1 + fi +} + +# Start running processes, including screen +function start_quantum_agents() { + # Start up the quantum agents if enabled + screen_it q-agt "python $AGENT_BINARY --config-file $QUANTUM_CONF --config-file /$Q_PLUGIN_CONF_FILE" + screen_it q-dhcp "python $AGENT_DHCP_BINARY --config-file $QUANTUM_CONF --config-file=$Q_DHCP_CONF_FILE" + screen_it q-meta "python $AGENT_META_BINARY --config-file $QUANTUM_CONF --config-file=$Q_META_CONF_FILE" + screen_it q-l3 "python $AGENT_L3_BINARY --config-file $QUANTUM_CONF --config-file=$Q_L3_CONF_FILE" +} + +# stop_quantum() - Stop running processes (non-screen) +function stop_quantum() { + if is_service_enabled q-dhcp; then + pid=$(ps aux | awk '/[d]nsmasq.+interface=(tap|ns-)/ { print $2 }') + [ ! -z "$pid" ] && sudo kill -9 $pid + fi +} + +# _cleanup_quantum() - Remove residual data files, anything left over from previous +# runs that a clean run would need to clean up +function _cleanup_quantum() { + : +} + +# _configure_quantum_common() +# Set common config for all quantum server and agents. +# This MUST be called before other _configure_quantum_* functions. +function _configure_quantum_common() { + # Put config files in ``QUANTUM_CONF_DIR`` for everyone to find + if [[ ! -d $QUANTUM_CONF_DIR ]]; then + sudo mkdir -p $QUANTUM_CONF_DIR + fi + sudo chown $STACK_USER $QUANTUM_CONF_DIR + + cp $QUANTUM_DIR/etc/quantum.conf $QUANTUM_CONF + + # set plugin-specific variables + # Q_PLUGIN_CONF_PATH, Q_PLUGIN_CONF_FILENAME, Q_DB_NAME, Q_PLUGIN_CLASS + quantum_plugin_configure_common + + if [[ $Q_PLUGIN_CONF_PATH == '' || $Q_PLUGIN_CONF_FILENAME == '' || $Q_PLUGIN_CLASS == '' ]]; then + echo "Quantum plugin not set.. exiting" + exit 1 + fi + + # If needed, move config file from ``$QUANTUM_DIR/etc/quantum`` to ``QUANTUM_CONF_DIR`` + mkdir -p /$Q_PLUGIN_CONF_PATH + Q_PLUGIN_CONF_FILE=$Q_PLUGIN_CONF_PATH/$Q_PLUGIN_CONF_FILENAME + cp $QUANTUM_DIR/$Q_PLUGIN_CONF_FILE /$Q_PLUGIN_CONF_FILE + + database_connection_url dburl $Q_DB_NAME + iniset /$Q_PLUGIN_CONF_FILE DATABASE sql_connection $dburl + unset dburl + + _quantum_setup_rootwrap +} + +function _configure_quantum_debug_command() { + if [[ "$Q_USE_DEBUG_COMMAND" != "True" ]]; then + return + fi + + cp $QUANTUM_DIR/etc/l3_agent.ini $QUANTUM_TEST_CONFIG_FILE + + iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT verbose False + iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT debug False + iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT use_namespaces $Q_USE_NAMESPACE + iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT root_helper "$Q_RR_COMMAND" + # Intermediate fix until Quantum patch lands and then line above will + # be cleaned. + iniset $QUANTUM_TEST_CONFIG_FILE AGENT root_helper "$Q_RR_COMMAND" + + _quantum_setup_keystone $QUANTUM_TEST_CONFIG_FILE DEFAULT set_auth_url + _quantum_setup_interface_driver $QUANTUM_TEST_CONFIG_FILE + + quantum_plugin_configure_debug_command +} + +function _configure_quantum_dhcp_agent() { + AGENT_DHCP_BINARY="$QUANTUM_DIR/bin/quantum-dhcp-agent" + Q_DHCP_CONF_FILE=$QUANTUM_CONF_DIR/dhcp_agent.ini + + cp $QUANTUM_DIR/etc/dhcp_agent.ini $Q_DHCP_CONF_FILE + + iniset $Q_DHCP_CONF_FILE DEFAULT verbose True + iniset $Q_DHCP_CONF_FILE DEFAULT debug True + iniset $Q_DHCP_CONF_FILE DEFAULT use_namespaces $Q_USE_NAMESPACE + iniset $Q_DHCP_CONF_FILE DEFAULT state_path $DATA_DIR/quantum + iniset $Q_DHCP_CONF_FILE DEFAULT root_helper "$Q_RR_COMMAND" + + _quantum_setup_keystone $Q_DHCP_CONF_FILE DEFAULT set_auth_url + _quantum_setup_interface_driver $Q_DHCP_CONF_FILE + + quantum_plugin_configure_dhcp_agent +} + +function _configure_quantum_l3_agent() { + AGENT_L3_BINARY="$QUANTUM_DIR/bin/quantum-l3-agent" + PUBLIC_BRIDGE=${PUBLIC_BRIDGE:-br-ex} + Q_L3_CONF_FILE=$QUANTUM_CONF_DIR/l3_agent.ini + + cp $QUANTUM_DIR/etc/l3_agent.ini $Q_L3_CONF_FILE + + iniset $Q_L3_CONF_FILE DEFAULT verbose True + iniset $Q_L3_CONF_FILE DEFAULT debug True + iniset $Q_L3_CONF_FILE DEFAULT use_namespaces $Q_USE_NAMESPACE + iniset $Q_L3_CONF_FILE DEFAULT state_path $DATA_DIR/quantum + iniset $Q_L3_CONF_FILE DEFAULT root_helper "$Q_RR_COMMAND" + + _quantum_setup_keystone $Q_L3_CONF_FILE DEFAULT set_auth_url + _quantum_setup_interface_driver $Q_L3_CONF_FILE + + quantum_plugin_configure_l3_agent +} + +function _configure_quantum_metadata_agent() { + AGENT_META_BINARY="$QUANTUM_DIR/bin/quantum-metadata-agent" + Q_META_CONF_FILE=$QUANTUM_CONF_DIR/metadata_agent.ini + + cp $QUANTUM_DIR/etc/metadata_agent.ini $Q_META_CONF_FILE + + iniset $Q_META_CONF_FILE DEFAULT verbose True + iniset $Q_META_CONF_FILE DEFAULT debug True + iniset $Q_META_CONF_FILE DEFAULT state_path $DATA_DIR/quantum + iniset $Q_META_CONF_FILE DEFAULT nova_metadata_ip $Q_META_DATA_IP + iniset $Q_META_CONF_FILE DEFAULT root_helper "$Q_RR_COMMAND" + + _quantum_setup_keystone $Q_META_CONF_FILE DEFAULT set_auth_url +} + +# _configure_quantum_plugin_agent() - Set config files for quantum plugin agent +# It is called when q-agt is enabled. +function _configure_quantum_plugin_agent() { + # Specify the default root helper prior to agent configuration to + # ensure that an agent's configuration can override the default + iniset /$Q_PLUGIN_CONF_FILE AGENT root_helper "$Q_RR_COMMAND" + + # Configure agent for plugin + quantum_plugin_configure_plugin_agent +} + +# _configure_quantum_service() - Set config files for quantum service +# It is called when q-svc is enabled. +function _configure_quantum_service() { + Q_API_PASTE_FILE=$QUANTUM_CONF_DIR/api-paste.ini + Q_POLICY_FILE=$QUANTUM_CONF_DIR/policy.json + + cp $QUANTUM_DIR/etc/api-paste.ini $Q_API_PASTE_FILE + cp $QUANTUM_DIR/etc/policy.json $Q_POLICY_FILE + + if is_service_enabled $DATABASE_BACKENDS; then + recreate_database $Q_DB_NAME utf8 + else + echo "A database must be enabled in order to use the $Q_PLUGIN Quantum plugin." + exit 1 + fi + + # Update either configuration file with plugin + iniset $QUANTUM_CONF DEFAULT core_plugin $Q_PLUGIN_CLASS + + iniset $QUANTUM_CONF DEFAULT verbose True + iniset $QUANTUM_CONF DEFAULT debug True + iniset $QUANTUM_CONF DEFAULT allow_overlapping_ips $Q_ALLOW_OVERLAPPING_IP + + iniset $QUANTUM_CONF DEFAULT auth_strategy $Q_AUTH_STRATEGY + _quantum_setup_keystone $QUANTUM_CONF keystone_authtoken + # Comment out keystone authtoken configuration in api-paste.ini + # It is required to avoid any breakage in Quantum where the sample + # api-paste.ini has authtoken configurations. + _quantum_commentout_keystone_authtoken $Q_API_PASTE_FILE filter:authtoken + + # Configure plugin + quantum_plugin_configure_service +} + +# Utility Functions +#------------------ + +# _quantum_setup_rootwrap() - configure Quantum's rootwrap +function _quantum_setup_rootwrap() { + if [[ "$Q_USE_ROOTWRAP" == "False" ]]; then + return + fi + # Deploy new rootwrap filters files (owned by root). + # Wipe any existing rootwrap.d files first + Q_CONF_ROOTWRAP_D=$QUANTUM_CONF_DIR/rootwrap.d + if [[ -d $Q_CONF_ROOTWRAP_D ]]; then + sudo rm -rf $Q_CONF_ROOTWRAP_D + fi + # Deploy filters to $QUANTUM_CONF_DIR/rootwrap.d + mkdir -p -m 755 $Q_CONF_ROOTWRAP_D + cp -pr $QUANTUM_DIR/etc/quantum/rootwrap.d/* $Q_CONF_ROOTWRAP_D/ + sudo chown -R root:root $Q_CONF_ROOTWRAP_D + sudo chmod 644 $Q_CONF_ROOTWRAP_D/* + # Set up rootwrap.conf, pointing to $QUANTUM_CONF_DIR/rootwrap.d + sudo cp -p $QUANTUM_DIR/etc/rootwrap.conf $Q_RR_CONF_FILE + sudo sed -e "s:^filters_path=.*$:filters_path=$Q_CONF_ROOTWRAP_D:" -i $Q_RR_CONF_FILE + sudo chown root:root $Q_RR_CONF_FILE + sudo chmod 0644 $Q_RR_CONF_FILE + # Specify rootwrap.conf as first parameter to quantum-rootwrap + ROOTWRAP_SUDOER_CMD="$QUANTUM_ROOTWRAP $Q_RR_CONF_FILE *" + + # Set up the rootwrap sudoers for quantum + TEMPFILE=`mktemp` + echo "$USER ALL=(root) NOPASSWD: $ROOTWRAP_SUDOER_CMD" >$TEMPFILE + chmod 0440 $TEMPFILE + sudo chown root:root $TEMPFILE + sudo mv $TEMPFILE /etc/sudoers.d/quantum-rootwrap + + # Update the root_helper + iniset $QUANTUM_CONF AGENT root_helper "$Q_RR_COMMAND" +} + # Configures keystone integration for quantum service and agents -function quantum_setup_keystone() { +function _quantum_setup_keystone() { local conf_file=$1 local section=$2 local use_auth_url=$3 @@ -20,42 +581,147 @@ function quantum_setup_keystone() { iniset $conf_file $section admin_tenant_name $SERVICE_TENANT_NAME iniset $conf_file $section admin_user $Q_ADMIN_USERNAME iniset $conf_file $section admin_password $SERVICE_PASSWORD + iniset $conf_file $section signing_dir $QUANTUM_AUTH_CACHE_DIR + # Create cache dir + sudo mkdir -p $QUANTUM_AUTH_CACHE_DIR + sudo chown $STACK_USER $QUANTUM_AUTH_CACHE_DIR + rm -f $QUANTUM_AUTH_CACHE_DIR/* } -function quantum_setup_ovs_bridge() { - local bridge=$1 - for PORT in `sudo ovs-vsctl --no-wait list-ports $bridge`; do - if [[ "$PORT" =~ tap* ]]; then echo `sudo ip link delete $PORT` > /dev/null; fi - sudo ovs-vsctl --no-wait del-port $bridge $PORT - done - sudo ovs-vsctl --no-wait -- --if-exists del-br $bridge - sudo ovs-vsctl --no-wait add-br $bridge - sudo ovs-vsctl --no-wait br-set-external-id $bridge bridge-id $bridge -} - -function quantum_setup_external_bridge() { - local bridge=$1 - # Create it if it does not exist - sudo ovs-vsctl --no-wait -- --may-exist add-br $bridge - # remove internal ports - for PORT in `sudo ovs-vsctl --no-wait list-ports $bridge`; do - TYPE=$(sudo ovs-vsctl get interface $PORT type) - if [[ "$TYPE" == "internal" ]]; then - echo `sudo ip link delete $PORT` > /dev/null - sudo ovs-vsctl --no-wait del-port $bridge $PORT +function _quantum_commentout_keystone_authtoken() { + local conf_file=$1 + local section=$2 + + inicomment $conf_file $section auth_host + inicomment $conf_file $section auth_port + inicomment $conf_file $section auth_protocol + inicomment $conf_file $section auth_url + + inicomment $conf_file $section admin_tenant_name + inicomment $conf_file $section admin_user + inicomment $conf_file $section admin_password + inicomment $conf_file $section signing_dir +} + +function _quantum_setup_interface_driver() { + quantum_plugin_setup_interface_driver $1 +} + +# Functions for Quantum Exercises +#-------------------------------- + +function delete_probe() { + local from_net="$1" + net_id=`_get_net_id $from_net` + probe_id=`quantum-debug --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD probe-list -c id -c network_id | grep $net_id | awk '{print $2}'` + quantum-debug --os-tenant-name admin --os-username admin probe-delete $probe_id +} + +function setup_quantum_debug() { + if [[ "$Q_USE_DEBUG_COMMAND" == "True" ]]; then + public_net_id=`_get_net_id $PUBLIC_NETWORK_NAME` + quantum-debug --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD probe-create $public_net_id + private_net_id=`_get_net_id $PRIVATE_NETWORK_NAME` + quantum-debug --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD probe-create $private_net_id + fi +} + +function teardown_quantum_debug() { + delete_probe $PUBLIC_NETWORK_NAME + delete_probe $PRIVATE_NETWORK_NAME +} + +function _get_net_id() { + quantum --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD net-list | grep $1 | awk '{print $2}' +} + +function _get_probe_cmd_prefix() { + local from_net="$1" + net_id=`_get_net_id $from_net` + probe_id=`quantum-debug --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD probe-list -c id -c network_id | grep $net_id | awk '{print $2}' | head -n 1` + echo "$Q_RR_COMMAND ip netns exec qprobe-$probe_id" +} + +function _ping_check_quantum() { + local from_net=$1 + local ip=$2 + local timeout_sec=$3 + local expected=${4:-"True"} + local check_command="" + probe_cmd=`_get_probe_cmd_prefix $from_net` + if [[ "$expected" = "True" ]]; then + check_command="while ! $probe_cmd ping -w 1 -c 1 $ip; do sleep 1; done" + else + check_command="while $probe_cmd ping -w 1 -c 1 $ip; do sleep 1; done" + fi + if ! timeout $timeout_sec sh -c "$check_command"; then + if [[ "$expected" = "True" ]]; then + echo "[Fail] Couldn't ping server" + else + echo "[Fail] Could ping server" fi - done - # ensure no IP is configured on the public bridge - sudo ip addr flush dev $bridge + exit 1 + fi } -function is_quantum_ovs_base_plugin() { - local plugin=$1 - if [[ ",openvswitch,ryu," =~ ,${plugin}, ]]; then - return 0 +# ssh check +function _ssh_check_quantum() { + local from_net=$1 + local key_file=$2 + local ip=$3 + local user=$4 + local timeout_sec=$5 + local probe_cmd = "" + probe_cmd=`_get_probe_cmd_prefix $from_net` + if ! timeout $timeout_sec sh -c "while ! $probe_cmd ssh -o StrictHostKeyChecking=no -i $key_file ${user}@$ip echo success ; do sleep 1; done"; then + echo "server didn't become ssh-able!" + exit 1 fi - return 1 } +# Quantum 3rd party programs +#--------------------------- +# please refer to lib/quantum_thirdparty/README.md for details +QUANTUM_THIRD_PARTIES="" +for f in $TOP_DIR/lib/quantum_thirdparty/*; do + third_party=$(basename $f) + if is_service_enabled $third_party; then + source $TOP_DIR/lib/quantum_thirdparty/$third_party + QUANTUM_THIRD_PARTIES="$QUANTUM_THIRD_PARTIES,$third_party" + fi +done + +function _quantum_third_party_do() { + for third_party in ${QUANTUM_THIRD_PARTIES//,/ }; do + ${1}_${third_party} + done +} + +# configure_quantum_third_party() - Set config files, create data dirs, etc +function configure_quantum_third_party() { + _quantum_third_party_do configure +} + +# init_quantum_third_party() - Initialize databases, etc. +function init_quantum_third_party() { + _quantum_third_party_do init +} + +# install_quantum_third_party() - Collect source and prepare +function install_quantum_third_party() { + _quantum_third_party_do install +} + +# start_quantum_third_party() - Start running processes, including screen +function start_quantum_third_party() { + _quantum_third_party_do start +} + +# stop_quantum_third_party - Stop running processes (non-screen) +function stop_quantum_third_party() { + _quantum_third_party_do stop +} + + # Restore xtrace $XTRACE diff --git a/lib/quantum_plugins/README.md b/lib/quantum_plugins/README.md new file mode 100644 index 00000000..a66d35a3 --- /dev/null +++ b/lib/quantum_plugins/README.md @@ -0,0 +1,34 @@ +Quantum plugin specific files +============================= +Quantum plugins require plugin specific behavior. +The files under the directory, ``lib/quantum_plugins/``, will be used +when their service is enabled. +Each plugin has ``lib/quantum_plugins/$Q_PLUGIN`` and define the following +functions. +Plugin specific configuration variables should be in this file. + +* filename: ``$Q_PLUGIN`` + * The corresponding file name MUST be the same to plugin name ``$Q_PLUGIN``. + Plugin specific configuration variables should be in this file. + +functions +--------- +``lib/quantum`` calls the following functions when the ``$Q_PLUGIN`` is enabled + +* ``quantum_plugin_create_nova_conf`` : + set ``NOVA_VIF_DRIVER`` and optionally set options in nova_conf + e.g. + NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtHybridOVSBridgeDriver"} +* ``quantum_plugin_install_agent_packages`` : + install packages that is specific to plugin agent + e.g. + install_package bridge-utils +* ``quantum_plugin_configure_common`` : + set plugin-specific variables, ``Q_PLUGIN_CONF_PATH``, ``Q_PLUGIN_CONF_FILENAME``, + ``Q_DB_NAME``, ``Q_PLUGIN_CLASS`` +* ``quantum_plugin_configure_debug_command`` +* ``quantum_plugin_configure_dhcp_agent`` +* ``quantum_plugin_configure_l3_agent`` +* ``quantum_plugin_configure_plugin_agent`` +* ``quantum_plugin_configure_service`` +* ``quantum_plugin_setup_interface_driver`` diff --git a/lib/quantum_plugins/bigswitch_floodlight b/lib/quantum_plugins/bigswitch_floodlight new file mode 100644 index 00000000..2c928bec --- /dev/null +++ b/lib/quantum_plugins/bigswitch_floodlight @@ -0,0 +1,55 @@ +# Quantum Big Switch/FloodLight plugin +# ------------------------------------ + +# Save trace setting +MY_XTRACE=$(set +o | grep xtrace) +set +o xtrace + +source $TOP_DIR/lib/quantum_plugins/ovs_base +source $TOP_DIR/lib/quantum_thirdparty/bigswitch_floodlight # for third party service specific configuration values + +function quantum_plugin_create_nova_conf() { + NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtHybridOVSBridgeDriver"} +} + +function quantum_plugin_install_agent_packages() { + _quantum_ovs_base_install_agent_packages +} + +function quantum_plugin_configure_common() { + Q_PLUGIN_CONF_PATH=etc/quantum/plugins/bigswitch + Q_PLUGIN_CONF_FILENAME=restproxy.ini + Q_DB_NAME="restproxy_quantum" + Q_PLUGIN_CLASS="quantum.plugins.bigswitch.plugin.QuantumRestProxyV2" + BS_FL_CONTROLLERS_PORT=${BS_FL_CONTROLLERS_PORT:-localhost:80} + BS_FL_CONTROLLER_TIMEOUT=${BS_FL_CONTROLLER_TIMEOUT:-10} +} + +function quantum_plugin_configure_debug_command() { + _quantum_ovs_base_configure_debug_command +} + +function quantum_plugin_configure_dhcp_agent() { + : +} + +function quantum_plugin_configure_l3_agent() { + _quantum_ovs_base_configure_l3_agent +} + +function quantum_plugin_configure_plugin_agent() { + : +} + +function quantum_plugin_configure_service() { + iniset /$Q_PLUGIN_CONF_FILE RESTPROXY servers $BS_FL_CONTROLLERS_PORT + iniset /$Q_PLUGIN_CONF_FILE RESTPROXY servertimeout $BS_FL_CONTROLLER_TIMEOUT +} + +function quantum_plugin_setup_interface_driver() { + local conf_file=$1 + iniset $conf_file DEFAULT interface_driver quantum.agent.linux.interface.OVSInterfaceDriver +} + +# Restore xtrace +$MY_XTRACE diff --git a/lib/quantum_plugins/brocade b/lib/quantum_plugins/brocade new file mode 100644 index 00000000..c372c19f --- /dev/null +++ b/lib/quantum_plugins/brocade @@ -0,0 +1,49 @@ +# Brocade Quantum Plugin +# ---------------------- + +# Save trace setting +BRCD_XTRACE=$(set +o | grep xtrace) +set +o xtrace + +function is_quantum_ovs_base_plugin() { + return 1 +} + +function quantum_plugin_create_nova_conf() { + NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.QuantumLinuxBridgeVIFDriver"} +} + +function quantum_plugin_install_agent_packages() { + install_package bridge-utils +} + +function quantum_plugin_configure_common() { + Q_PLUGIN_CONF_PATH=etc/quantum/plugins/brocade + Q_PLUGIN_CONF_FILENAME=brocade.ini + Q_DB_NAME="brcd_quantum" + Q_PLUGIN_CLASS="quantum.plugins.brocade.QuantumPlugin.BrocadePluginV2" +} + +function quantum_plugin_configure_debug_command() { + : +} + +function quantum_plugin_configure_dhcp_agent() { + : +} + +function quantum_plugin_configure_l3_agent() { + : +} + +function quantum_plugin_configure_plugin_agent() { + AGENT_BINARY="$QUANTUM_DIR/bin/quantum-linuxbridge-agent" +} + +function quantum_plugin_setup_interface_driver() { + local conf_file=$1 + iniset $conf_file DEFAULT interface_driver quantum.agent.linux.interface.BridgeInterfaceDriver +} + +# Restore xtrace +$BRCD_XTRACE diff --git a/lib/quantum_plugins/linuxbridge b/lib/quantum_plugins/linuxbridge new file mode 100644 index 00000000..6d5d4e08 --- /dev/null +++ b/lib/quantum_plugins/linuxbridge @@ -0,0 +1,79 @@ +# Quantum Linux Bridge plugin +# --------------------------- + +# Save trace setting +MY_XTRACE=$(set +o | grep xtrace) +set +o xtrace + +function is_quantum_ovs_base_plugin() { + # linuxbridge doesn't use OVS + return 1 +} + +function quantum_plugin_create_nova_conf() { + NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.QuantumLinuxBridgeVIFDriver"} +} + +function quantum_plugin_install_agent_packages() { + install_package bridge-utils +} + +function quantum_plugin_configure_common() { + Q_PLUGIN_CONF_PATH=etc/quantum/plugins/linuxbridge + Q_PLUGIN_CONF_FILENAME=linuxbridge_conf.ini + Q_DB_NAME="quantum_linux_bridge" + Q_PLUGIN_CLASS="quantum.plugins.linuxbridge.lb_quantum_plugin.LinuxBridgePluginV2" +} + +function quantum_plugin_configure_debug_command() { + iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT external_network_bridge +} + +function quantum_plugin_configure_dhcp_agent() { + : +} + +function quantum_plugin_configure_l3_agent() { + iniset $Q_L3_CONF_FILE DEFAULT external_network_bridge +} + +function quantum_plugin_configure_plugin_agent() { + # Setup physical network interface mappings. Override + # ``LB_VLAN_RANGES`` and ``LB_INTERFACE_MAPPINGS`` in ``localrc`` for more + # complex physical network configurations. + if [[ "$LB_INTERFACE_MAPPINGS" = "" ]] && [[ "$PHYSICAL_NETWORK" != "" ]] && [[ "$LB_PHYSICAL_INTERFACE" != "" ]]; then + LB_INTERFACE_MAPPINGS=$PHYSICAL_NETWORK:$LB_PHYSICAL_INTERFACE + fi + if [[ "$LB_INTERFACE_MAPPINGS" != "" ]]; then + iniset /$Q_PLUGIN_CONF_FILE LINUX_BRIDGE physical_interface_mappings $LB_INTERFACE_MAPPINGS + fi + AGENT_BINARY="$QUANTUM_DIR/bin/quantum-linuxbridge-agent" +} + +function quantum_plugin_configure_service() { + if [[ "$ENABLE_TENANT_VLANS" = "True" ]]; then + iniset /$Q_PLUGIN_CONF_FILE VLANS tenant_network_type vlan + else + echo "WARNING - The linuxbridge plugin is using local tenant networks, with no connectivity between hosts." + fi + + # Override ``LB_VLAN_RANGES`` and ``LB_INTERFACE_MAPPINGS`` in ``localrc`` + # for more complex physical network configurations. + if [[ "$LB_VLAN_RANGES" = "" ]] && [[ "$PHYSICAL_NETWORK" != "" ]]; then + LB_VLAN_RANGES=$PHYSICAL_NETWORK + if [[ "$TENANT_VLAN_RANGE" != "" ]]; then + LB_VLAN_RANGES=$LB_VLAN_RANGES:$TENANT_VLAN_RANGE + fi + fi + if [[ "$LB_VLAN_RANGES" != "" ]]; then + iniset /$Q_PLUGIN_CONF_FILE VLANS network_vlan_ranges $LB_VLAN_RANGES + fi +} + +function quantum_plugin_setup_interface_driver() { + local conf_file=$1 + iniset $conf_file DEFAULT interface_driver quantum.agent.linux.interface.BridgeInterfaceDriver +} + +# Restore xtrace +$MY_XTRACE diff --git a/lib/quantum_plugins/openvswitch b/lib/quantum_plugins/openvswitch new file mode 100644 index 00000000..181e7e71 --- /dev/null +++ b/lib/quantum_plugins/openvswitch @@ -0,0 +1,144 @@ +# Quantum Open vSwtich plugin +# --------------------------- + +# Save trace setting +MY_XTRACE=$(set +o | grep xtrace) +set +o xtrace + +source $TOP_DIR/lib/quantum_plugins/ovs_base + +function quantum_plugin_create_nova_conf() { + NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtHybridOVSBridgeDriver"} + if [ "$VIRT_DRIVER" = 'xenserver' ]; then + iniset $NOVA_CONF DEFAULT xenapi_vif_driver nova.virt.xenapi.vif.XenAPIOpenVswitchDriver + iniset $NOVA_CONF DEFAULT xenapi_ovs_integration_bridge $FLAT_NETWORK_BRIDGE + fi +} + +function quantum_plugin_install_agent_packages() { + _quantum_ovs_base_install_agent_packages +} + +function quantum_plugin_configure_common() { + Q_PLUGIN_CONF_PATH=etc/quantum/plugins/openvswitch + Q_PLUGIN_CONF_FILENAME=ovs_quantum_plugin.ini + Q_DB_NAME="ovs_quantum" + Q_PLUGIN_CLASS="quantum.plugins.openvswitch.ovs_quantum_plugin.OVSQuantumPluginV2" +} + +function quantum_plugin_configure_debug_command() { + _quantum_ovs_base_configure_debug_command +} + +function quantum_plugin_configure_dhcp_agent() { + : +} + +function quantum_plugin_configure_l3_agent() { + _quantum_ovs_base_configure_l3_agent +} + +function quantum_plugin_configure_plugin_agent() { + # Setup integration bridge + OVS_BRIDGE=${OVS_BRIDGE:-br-int} + _quantum_ovs_base_setup_bridge $OVS_BRIDGE + + # Setup agent for tunneling + if [[ "$OVS_ENABLE_TUNNELING" = "True" ]]; then + # Verify tunnels are supported + # REVISIT - also check kernel module support for GRE and patch ports + OVS_VERSION=`ovs-vsctl --version | head -n 1 | awk '{print $4;}'` + if [ $OVS_VERSION \< "1.4" ] && ! is_service_enabled q-svc ; then + echo "You are running OVS version $OVS_VERSION." + echo "OVS 1.4+ is required for tunneling between multiple hosts." + exit 1 + fi + iniset /$Q_PLUGIN_CONF_FILE OVS enable_tunneling True + iniset /$Q_PLUGIN_CONF_FILE OVS local_ip $HOST_IP + fi + + # Setup physical network bridge mappings. Override + # ``OVS_VLAN_RANGES`` and ``OVS_BRIDGE_MAPPINGS`` in ``localrc`` for more + # complex physical network configurations. + if [[ "$OVS_BRIDGE_MAPPINGS" = "" ]] && [[ "$PHYSICAL_NETWORK" != "" ]] && [[ "$OVS_PHYSICAL_BRIDGE" != "" ]]; then + OVS_BRIDGE_MAPPINGS=$PHYSICAL_NETWORK:$OVS_PHYSICAL_BRIDGE + + # Configure bridge manually with physical interface as port for multi-node + sudo ovs-vsctl --no-wait -- --may-exist add-br $OVS_PHYSICAL_BRIDGE + fi + if [[ "$OVS_BRIDGE_MAPPINGS" != "" ]]; then + iniset /$Q_PLUGIN_CONF_FILE OVS bridge_mappings $OVS_BRIDGE_MAPPINGS + fi + AGENT_BINARY="$QUANTUM_DIR/bin/quantum-openvswitch-agent" + + if [ "$VIRT_DRIVER" = 'xenserver' ]; then + # Nova will always be installed along with quantum for a domU + # devstack install, so it should be safe to rely on nova.conf + # for xenapi configuration. + Q_RR_DOM0_COMMAND="$QUANTUM_DIR/bin/quantum-rootwrap-xen-dom0 $NOVA_CONF" + # Under XS/XCP, the ovs agent needs to target the dom0 + # integration bridge. This is enabled by using a root wrapper + # that executes commands on dom0 via a XenAPI plugin. + iniset /$Q_PLUGIN_CONF_FILE AGENT root_helper "$Q_RR_DOM0_COMMAND" + + # FLAT_NETWORK_BRIDGE is the dom0 integration bridge. To + # ensure the bridge lacks direct connectivity, set + # VM_VLAN=-1;VM_DEV=invalid in localrc + iniset /$Q_PLUGIN_CONF_FILE OVS integration_bridge $FLAT_NETWORK_BRIDGE + + # The ovs agent needs to ensure that the ports associated with + # a given network share the same local vlan tag. On + # single-node XS/XCP, this requires monitoring both the dom0 + # bridge, where VM's are attached, and the domU bridge, where + # dhcp servers are attached. + if is_service_enabled q-dhcp; then + iniset /$Q_PLUGIN_CONF_FILE OVS domu_integration_bridge $OVS_BRIDGE + # DomU will use the regular rootwrap + iniset /$Q_PLUGIN_CONF_FILE AGENT domu_root_helper "$Q_RR_COMMAND" + # Plug the vm interface into the domU integration bridge. + sudo ip addr flush dev $GUEST_INTERFACE_DEFAULT + sudo ip link set $OVS_BRIDGE up + # Assign the VM IP only if it has been set explicitly + if [[ "$VM_IP" != "" ]]; then + sudo ip addr add $VM_IP dev $OVS_BRIDGE + fi + sudo ovs-vsctl add-port $OVS_BRIDGE $GUEST_INTERFACE_DEFAULT + fi + fi +} + +function quantum_plugin_configure_service() { + if [[ "$ENABLE_TENANT_TUNNELS" = "True" ]]; then + iniset /$Q_PLUGIN_CONF_FILE OVS tenant_network_type gre + iniset /$Q_PLUGIN_CONF_FILE OVS tunnel_id_ranges $TENANT_TUNNEL_RANGES + elif [[ "$ENABLE_TENANT_VLANS" = "True" ]]; then + iniset /$Q_PLUGIN_CONF_FILE OVS tenant_network_type vlan + else + echo "WARNING - The openvswitch plugin is using local tenant networks, with no connectivity between hosts." + fi + + # Override ``OVS_VLAN_RANGES`` and ``OVS_BRIDGE_MAPPINGS`` in ``localrc`` + # for more complex physical network configurations. + if [[ "$OVS_VLAN_RANGES" = "" ]] && [[ "$PHYSICAL_NETWORK" != "" ]]; then + OVS_VLAN_RANGES=$PHYSICAL_NETWORK + if [[ "$TENANT_VLAN_RANGE" != "" ]]; then + OVS_VLAN_RANGES=$OVS_VLAN_RANGES:$TENANT_VLAN_RANGE + fi + fi + if [[ "$OVS_VLAN_RANGES" != "" ]]; then + iniset /$Q_PLUGIN_CONF_FILE OVS network_vlan_ranges $OVS_VLAN_RANGES + fi + + # Enable tunnel networks if selected + if [[ $OVS_ENABLE_TUNNELING = "True" ]]; then + iniset /$Q_PLUGIN_CONF_FILE OVS enable_tunneling True + fi +} + +function quantum_plugin_setup_interface_driver() { + local conf_file=$1 + iniset $conf_file DEFAULT interface_driver quantum.agent.linux.interface.OVSInterfaceDriver +} + +# Restore xtrace +$MY_XTRACE diff --git a/lib/quantum_plugins/ovs_base b/lib/quantum_plugins/ovs_base new file mode 100644 index 00000000..8563674f --- /dev/null +++ b/lib/quantum_plugins/ovs_base @@ -0,0 +1,49 @@ +# common functions for ovs based plugin +# ------------------------------------- + +# Save trace setting +MY_XTRACE=$(set +o | grep xtrace) +set +o xtrace + +function is_quantum_ovs_base_plugin() { + # Yes, we use OVS. + return 0 +} + +function _quantum_ovs_base_setup_bridge() { + local bridge=$1 + quantum-ovs-cleanup + sudo ovs-vsctl --no-wait -- --may-exist add-br $bridge + sudo ovs-vsctl --no-wait br-set-external-id $bridge bridge-id $bridge +} + +function _quantum_ovs_base_install_agent_packages() { + local kernel_version + # Install deps + # FIXME add to ``files/apts/quantum``, but don't install if not needed! + if is_ubuntu; then + kernel_version=`cat /proc/version | cut -d " " -f3` + install_package make fakeroot dkms openvswitch-switch openvswitch-datapath-dkms linux-headers-$kernel_version + else + ### FIXME(dtroyer): Find RPMs for OpenVSwitch + echo "OpenVSwitch packages need to be located" + # Fedora does not started OVS by default + restart_service openvswitch + fi +} + +function _quantum_ovs_base_configure_debug_command() { + iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT external_network_bridge $PUBLIC_BRIDGE +} + +function _quantum_ovs_base_configure_l3_agent() { + iniset $Q_L3_CONF_FILE DEFAULT external_network_bridge $PUBLIC_BRIDGE + + quantum-ovs-cleanup + sudo ovs-vsctl --no-wait -- --may-exist add-br $PUBLIC_BRIDGE + # ensure no IP is configured on the public bridge + sudo ip addr flush dev $PUBLIC_BRIDGE +} + +# Restore xtrace +$MY_XTRACE diff --git a/lib/quantum_plugins/ryu b/lib/quantum_plugins/ryu new file mode 100644 index 00000000..2dfd4f70 --- /dev/null +++ b/lib/quantum_plugins/ryu @@ -0,0 +1,66 @@ +# Quantum Ryu plugin +# ------------------ + +# Save trace setting +MY_XTRACE=$(set +o | grep xtrace) +set +o xtrace + +source $TOP_DIR/lib/quantum_plugins/ovs_base +source $TOP_DIR/lib/quantum_thirdparty/ryu # for configuration value + +function quantum_plugin_create_nova_conf() { + NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtHybridOVSBridgeDriver"} + iniset $NOVA_CONF DEFAULT libvirt_ovs_integration_bridge "$OVS_BRIDGE" +} + +function quantum_plugin_install_agent_packages() { + _quantum_ovs_base_install_agent_packages + + # quantum_ryu_agent requires ryu module + install_ryu +} + +function quantum_plugin_configure_common() { + Q_PLUGIN_CONF_PATH=etc/quantum/plugins/ryu + Q_PLUGIN_CONF_FILENAME=ryu.ini + Q_DB_NAME="ovs_quantum" + Q_PLUGIN_CLASS="quantum.plugins.ryu.ryu_quantum_plugin.RyuQuantumPluginV2" +} + +function quantum_plugin_configure_debug_command() { + _quantum_ovs_base_configure_debug_command + iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT ryu_api_host $RYU_API_HOST:$RYU_API_PORT +} + +function quantum_plugin_configure_dhcp_agent() { + iniset $Q_DHCP_CONF_FILE DEFAULT ryu_api_host $RYU_API_HOST:$RYU_API_PORT +} + +function quantum_plugin_configure_l3_agent() { + iniset $Q_L3_CONF_FILE DEFAULT ryu_api_host $RYU_API_HOST:$RYU_API_PORT + _quantum_ovs_base_configure_l3_agent +} + +function quantum_plugin_configure_plugin_agent() { + # Set up integration bridge + OVS_BRIDGE=${OVS_BRIDGE:-br-int} + _quantum_ovs_base_setup_bridge $OVS_BRIDGE + if [ -n "$RYU_INTERNAL_INTERFACE" ]; then + sudo ovs-vsctl --no-wait -- --may-exist add-port $OVS_BRIDGE $RYU_INTERNAL_INTERFACE + fi + iniset /$Q_PLUGIN_CONF_FILE OVS integration_bridge $OVS_BRIDGE + AGENT_BINARY="$QUANTUM_DIR/quantum/plugins/ryu/agent/ryu_quantum_agent.py" +} + +function quantum_plugin_configure_service() { + iniset /$Q_PLUGIN_CONF_FILE OVS openflow_rest_api $RYU_API_HOST:$RYU_API_PORT +} + +function quantum_plugin_setup_interface_driver() { + local conf_file=$1 + iniset $conf_file DEFAULT interface_driver quantum.agent.linux.interface.OVSInterfaceDriver + iniset $conf_file DEFAULT ovs_use_veth True +} + +# Restore xtrace +$MY_XTRACE diff --git a/lib/quantum_thirdparty/README.md b/lib/quantum_thirdparty/README.md new file mode 100644 index 00000000..3b5837d7 --- /dev/null +++ b/lib/quantum_thirdparty/README.md @@ -0,0 +1,36 @@ +Quantum third party specific files +================================== +Some Quantum plugins require third party programs to function. +The files under the directory, ``lib/quantum_thirdparty/``, will be used +when their service are enabled. +Third party program specific configuration variables should be in this file. + +* filename: ```` + * The corresponding file name should be same to service name, ````. + +functions +--------- +``lib/quantum`` calls the following functions when the ```` is enabled + +functions to be implemented +* ``configure_``: + set config files, create data dirs, etc + e.g. + sudo python setup.py deploy + iniset $XXXX_CONF... + +* ``init_``: + initialize databases, etc + +* ``install_``: + collect source and prepare + e.g. + git clone xxx + +* ``start_``: + start running processes, including screen + e.g. + screen_it XXXX "cd $XXXXY_DIR && $XXXX_DIR/bin/XXXX-bin" + +* ``stop_``: + stop running processes (non-screen) diff --git a/lib/quantum_thirdparty/bigswitch_floodlight b/lib/quantum_thirdparty/bigswitch_floodlight new file mode 100644 index 00000000..60e39248 --- /dev/null +++ b/lib/quantum_thirdparty/bigswitch_floodlight @@ -0,0 +1,50 @@ +# Big Switch/FloodLight OpenFlow Controller +# ------------------------------------------ + +# Save trace setting +MY_XTRACE=$(set +o | grep xtrace) +set +o xtrace + +BS_FL_CONTROLLERS_PORT=${BS_FL_CONTROLLERS_PORT:-localhost:80} +BS_FL_OF_PORT=${BS_FL_OF_PORT:-6633} +OVS_BRIDGE=${OVS_BRIDGE:-br-int} + +function configure_bigswitch_floodlight() { + : +} + +function init_bigswitch_floodlight() { + install_quantum_agent_packages + + echo -n "Installing OVS managed by the openflow controllers:" + echo ${BS_FL_CONTROLLERS_PORT} + + # Create local OVS bridge and configure it + sudo ovs-vsctl --no-wait -- --if-exists del-br ${OVS_BRIDGE} + sudo ovs-vsctl --no-wait add-br ${OVS_BRIDGE} + sudo ovs-vsctl --no-wait br-set-external-id ${OVS_BRIDGE} bridge-id ${OVS_BRIDGE} + + ctrls= + for ctrl in `echo ${BS_FL_CONTROLLERS_PORT} | tr ',' ' '` + do + ctrl=${ctrl%:*} + ctrls="${ctrls} tcp:${ctrl}:${BS_FL_OF_PORT}" + done + echo "Adding Network conttrollers: " ${ctrls} + sudo ovs-vsctl --no-wait set-controller ${OVS_BRIDGE} ${ctrls} +} + +function install_bigswitch_floodlight() { + : +} + +function start_bigswitch_floodlight() { + : +} + +function stop_bigswitch_floodlight() { + : +} + +# Restore xtrace +$MY_XTRACE diff --git a/lib/quantum_thirdparty/ryu b/lib/quantum_thirdparty/ryu new file mode 100644 index 00000000..7a01923c --- /dev/null +++ b/lib/quantum_thirdparty/ryu @@ -0,0 +1,73 @@ +# Ryu OpenFlow Controller +# ----------------------- + +# Save trace setting +MY_XTRACE=$(set +o | grep xtrace) +set +o xtrace + + +RYU_DIR=$DEST/ryu +# Ryu API Host +RYU_API_HOST=${RYU_API_HOST:-127.0.0.1} +# Ryu API Port +RYU_API_PORT=${RYU_API_PORT:-8080} +# Ryu OFP Host +RYU_OFP_HOST=${RYU_OFP_HOST:-127.0.0.1} +# Ryu OFP Port +RYU_OFP_PORT=${RYU_OFP_PORT:-6633} +# Ryu Applications +RYU_APPS=${RYU_APPS:-ryu.app.simple_isolation,ryu.app.rest} +# Ryu configuration +RYU_CONF_CONTENTS=${RYU_CONF_CONTENTS:-" +--app_lists=$RYU_APPS +--wsapi_host=$RYU_API_HOST +--wsapi_port=$RYU_API_PORT +--ofp_listen_host=$RYU_OFP_HOST +--ofp_tcp_listen_port=$RYU_OFP_PORT +--quantum_url=http://$Q_HOST:$Q_PORT +--quantum_admin_username=$Q_ADMIN_USERNAME +--quantum_admin_password=$SERVICE_PASSWORD +--quantum_admin_tenant_name=$SERVICE_TENANT_NAME +--quantum_admin_auth_url=$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_AUTH_PORT/v2.0 +--quantum_auth_strategy=$Q_AUTH_STRATEGY +--quantum_controller_addr=tcp:$RYU_OFP_HOST:$RYU_OFP_PORT +"} + +function configure_ryu() { + setup_develop $RYU_DIR +} + +function init_ryu() { + RYU_CONF_DIR=/etc/ryu + if [[ ! -d $RYU_CONF_DIR ]]; then + sudo mkdir -p $RYU_CONF_DIR + fi + sudo chown $STACK_USER $RYU_CONF_DIR + RYU_CONF=$RYU_CONF_DIR/ryu.conf + sudo rm -rf $RYU_CONF + + echo "${RYU_CONF_CONTENTS}" > $RYU_CONF +} + +# install_ryu can be called multiple times as quantum_pluing/ryu may call +# this function for quantum-ryu-agent +# Make this function idempotent and avoid cloning same repo many times +# with RECLONE=yes +_RYU_INSTALLED=${_RYU_INSTALLED:-False} +function install_ryu() { + if [[ "$_RYU_INSTALLED" == "False" ]]; then + git_clone $RYU_REPO $RYU_DIR $RYU_BRANCH + _RYU_INSTALLED=True + fi +} + +function start_ryu() { + screen_it ryu "cd $RYU_DIR && $RYU_DIR/bin/ryu-manager --flagfile $RYU_CONF" +} + +function stop_ryu() { + : +} + +# Restore xtrace +$MY_XTRACE diff --git a/lib/rpc_backend b/lib/rpc_backend new file mode 100644 index 00000000..f35f9dbd --- /dev/null +++ b/lib/rpc_backend @@ -0,0 +1,123 @@ +# lib/rpc_backend +# Interface for interactig with different rpc backend +# rpc backend settings + +# Dependencies: +# ``functions`` file +# ``RABBIT_{HOST|PASSWORD}`` must be defined when RabbitMQ is used + +# ``stack.sh`` calls the entry points in this order: +# +# check_rpc_backend +# install_rpc_backend +# restart_rpc_backend +# iniset_rpc_backend + +# Save trace setting +XTRACE=$(set +o | grep xtrace) +set +o xtrace + +# Entry Points +# ------------ + +# Make sure we only have one rpc backend enabled. +# Also check the specified rpc backend is available on your platform. +function check_rpc_backend() { + local rpc_backend_cnt=0 + for svc in qpid zeromq rabbit; do + is_service_enabled $svc && + ((rpc_backend_cnt++)) + done + if [ "$rpc_backend_cnt" -gt 1 ]; then + echo "ERROR: only one rpc backend may be enabled," + echo " set only one of 'rabbit', 'qpid', 'zeromq'" + echo " via ENABLED_SERVICES." + elif [ "$rpc_backend_cnt" == 0 ]; then + echo "ERROR: at least one rpc backend must be enabled," + echo " set one of 'rabbit', 'qpid', 'zeromq'" + echo " via ENABLED_SERVICES." + fi + + if is_service_enabled qpid && ! qpid_is_supported; then + echo "Qpid support is not available for this version of your distribution." + exit 1 + fi +} + +# install rpc backend +function install_rpc_backend() { + if is_service_enabled rabbit; then + # Install rabbitmq-server + # the temp file is necessary due to LP: #878600 + tfile=$(mktemp) + install_package rabbitmq-server > "$tfile" 2>&1 + cat "$tfile" + rm -f "$tfile" + elif is_service_enabled qpid; then + if is_fedora; then + install_package qpid-cpp-server-daemon + elif is_ubuntu; then + install_package qpidd + else + exit_distro_not_supported "qpid installation" + fi + elif is_service_enabled zeromq; then + if is_fedora; then + install_package zeromq python-zmq + elif is_ubuntu; then + install_package libzmq1 python-zmq + elif is_suse; then + install_package libzmq1 python-pyzmq + else + exit_distro_not_supported "zeromq installation" + fi + fi +} + +# restart the rpc backend +function restart_rpc_backend() { + if is_service_enabled rabbit; then + # Start rabbitmq-server + echo_summary "Starting RabbitMQ" + if is_fedora || is_suse; then + # service is not started by default + restart_service rabbitmq-server + fi + # change the rabbit password since the default is "guest" + sudo rabbitmqctl change_password guest $RABBIT_PASSWORD + elif is_service_enabled qpid; then + echo_summary "Starting qpid" + restart_service qpidd + fi +} + +# iniset cofiguration +function iniset_rpc_backend() { + local package=$1 + local file=$2 + local section=$3 + if is_service_enabled zeromq; then + iniset $file $section rpc_backend ${package}.openstack.common.rpc.impl_zmq + elif is_service_enabled qpid; then + iniset $file $section rpc_backend ${package}.openstack.common.rpc.impl_qpid + elif is_service_enabled rabbit || { [ -n "$RABBIT_HOST" ] && [ -n "$RABBIT_PASSWORD" ]; }; then + iniset $file $section rpc_backend ${package}.openstack.common.rpc.impl_kombu + iniset $file $section rabbit_host $RABBIT_HOST + iniset $file $section rabbit_password $RABBIT_PASSWORD + fi +} + +# Check if qpid can be used on the current distro. +# qpid_is_supported +function qpid_is_supported() { + if [[ -z "$DISTRO" ]]; then + GetDistro + fi + + # Qpid was introduced to Ubuntu in precise, disallow it on oneiric; it is + # not in openSUSE either right now. + ( ! ([[ "$DISTRO" = "oneiric" ]] || is_suse) ) +} + +# Restore xtrace +$XTRACE diff --git a/lib/swift b/lib/swift new file mode 100644 index 00000000..5ba7e56f --- /dev/null +++ b/lib/swift @@ -0,0 +1,378 @@ +# lib/swift +# Functions to control the configuration and operation of the **Swift** service + +# Dependencies: +# ``functions`` file +# ``DEST``, ``SCREEN_NAME``, `SWIFT_HASH` must be defined +# ``STACK_USER`` must be defined +# ``SWIFT_DATA_DIR`` or ``DATA_DIR`` must be defined +# ``lib/keystone`` file +# ``stack.sh`` calls the entry points in this order: +# +# install_swift +# configure_swift +# init_swift +# start_swift +# stop_swift +# cleanup_swift + +# Save trace setting +XTRACE=$(set +o | grep xtrace) +set +o xtrace + + +# Defaults +# -------- + +# Set up default directories +SWIFT_DIR=$DEST/swift +SWIFTCLIENT_DIR=$DEST/python-swiftclient +SWIFT_AUTH_CACHE_DIR=${SWIFT_AUTH_CACHE_DIR:-/var/cache/swift} + +# TODO: add logging to different location. + +# Set ``SWIFT_DATA_DIR`` to the location of swift drives and objects. +# Default is the common DevStack data directory. +SWIFT_DATA_DIR=${SWIFT_DATA_DIR:-${DATA_DIR}/swift} + +# Set ``SWIFT_CONFIG_DIR`` to the location of the configuration files. +# Default is ``/etc/swift``. +SWIFT_CONFIG_DIR=${SWIFT_CONFIG_DIR:-/etc/swift} + +# DevStack will create a loop-back disk formatted as XFS to store the +# swift data. Set ``SWIFT_LOOPBACK_DISK_SIZE`` to the disk size in +# kilobytes. +# Default is 1 gigabyte. +SWIFT_LOOPBACK_DISK_SIZE=${SWIFT_LOOPBACK_DISK_SIZE:-1000000} + +# The ring uses a configurable number of bits from a path’s MD5 hash as +# a partition index that designates a device. The number of bits kept +# from the hash is known as the partition power, and 2 to the partition +# power indicates the partition count. Partitioning the full MD5 hash +# ring allows other parts of the cluster to work in batches of items at +# once which ends up either more efficient or at least less complex than +# working with each item separately or the entire cluster all at once. +# By default we define 9 for the partition count (which mean 512). +SWIFT_PARTITION_POWER_SIZE=${SWIFT_PARTITION_POWER_SIZE:-9} + +# Set ``SWIFT_REPLICAS`` to configure how many replicas are to be +# configured for your Swift cluster. By default the three replicas would need a +# bit of IO and Memory on a VM you may want to lower that to 1 if you want to do +# only some quick testing. +SWIFT_REPLICAS=${SWIFT_REPLICAS:-3} +SWIFT_REPLICAS_SEQ=$(seq ${SWIFT_REPLICAS}) + +# Set ``OBJECT_PORT_BASE``, ``CONTAINER_PORT_BASE``, ``ACCOUNT_PORT_BASE`` +# Port bases used in port number calclution for the service "nodes" +# The specified port number will be used, the additinal ports calculated by +# base_port + node_num * 10 +OBJECT_PORT_BASE=6010 +CONTAINER_PORT_BASE=6011 +ACCOUNT_PORT_BASE=6012 + + +# Entry Points +# ------------ + +# cleanup_swift() - Remove residual data files +function cleanup_swift() { + rm -f ${SWIFT_CONFIG_DIR}{*.builder,*.ring.gz,backups/*.builder,backups/*.ring.gz} + if egrep -q ${SWIFT_DATA_DIR}/drives/sdb1 /proc/mounts; then + sudo umount ${SWIFT_DATA_DIR}/drives/sdb1 + fi + if [[ -e ${SWIFT_DATA_DIR}/drives/images/swift.img ]]; then + rm ${SWIFT_DATA_DIR}/drives/images/swift.img + fi +} + +# configure_swift() - Set config files, create data dirs and loop image +function configure_swift() { + local swift_auth_server + local node_number + local swift_node_config + local swift_log_dir + + setup_develop $SWIFT_DIR + + # Make sure to kill all swift processes first + swift-init --run-dir=${SWIFT_DATA_DIR}/run all stop || true + + # First do a bit of setup by creating the directories and + # changing the permissions so we can run it as our user. + + USER_GROUP=$(id -g) + sudo mkdir -p ${SWIFT_DATA_DIR}/{drives,cache,run,logs} + sudo chown -R $USER:${USER_GROUP} ${SWIFT_DATA_DIR} + + # Create a loopback disk and format it to XFS. + if [[ -e ${SWIFT_DATA_DIR}/drives/images/swift.img ]]; then + if egrep -q ${SWIFT_DATA_DIR}/drives/sdb1 /proc/mounts; then + sudo umount ${SWIFT_DATA_DIR}/drives/sdb1 + sudo rm -f ${SWIFT_DATA_DIR}/drives/images/swift.img + fi + fi + + mkdir -p ${SWIFT_DATA_DIR}/drives/images + sudo touch ${SWIFT_DATA_DIR}/drives/images/swift.img + sudo chown $USER: ${SWIFT_DATA_DIR}/drives/images/swift.img + + dd if=/dev/zero of=${SWIFT_DATA_DIR}/drives/images/swift.img \ + bs=1024 count=0 seek=${SWIFT_LOOPBACK_DISK_SIZE} + + # Make a fresh XFS filesystem + mkfs.xfs -f -i size=1024 ${SWIFT_DATA_DIR}/drives/images/swift.img + + # Mount the disk with mount options to make it as efficient as possible + mkdir -p ${SWIFT_DATA_DIR}/drives/sdb1 + if ! egrep -q ${SWIFT_DATA_DIR}/drives/sdb1 /proc/mounts; then + sudo mount -t xfs -o loop,noatime,nodiratime,nobarrier,logbufs=8 \ + ${SWIFT_DATA_DIR}/drives/images/swift.img ${SWIFT_DATA_DIR}/drives/sdb1 + fi + + # Create a link to the above mount and + # create all of the directories needed to emulate a few different servers + for node_number in ${SWIFT_REPLICAS_SEQ}; do + sudo ln -sf ${SWIFT_DATA_DIR}/drives/sdb1/$node_number ${SWIFT_DATA_DIR}/$node_number; + drive=${SWIFT_DATA_DIR}/drives/sdb1/${node_number} + node=${SWIFT_DATA_DIR}/${node_number}/node + node_device=${node}/sdb1 + [[ -d $node ]] && continue + [[ -d $drive ]] && continue + sudo install -o ${USER} -g $USER_GROUP -d $drive + sudo install -o ${USER} -g $USER_GROUP -d $node_device + sudo chown -R $USER: ${node} + done + + sudo mkdir -p ${SWIFT_CONFIG_DIR}/{object,container,account}-server + sudo chown -R $USER: ${SWIFT_CONFIG_DIR} + + if [[ "$SWIFT_CONFIG_DIR" != "/etc/swift" ]]; then + # Some swift tools are hard-coded to use ``/etc/swift`` and are apparently not going to be fixed. + # Create a symlink if the config dir is moved + sudo ln -sf ${SWIFT_CONFIG_DIR} /etc/swift + fi + + # Swift use rsync to synchronize between all the different + # partitions (which make more sense when you have a multi-node + # setup) we configure it with our version of rsync. + sed -e " + s/%GROUP%/${USER_GROUP}/; + s/%USER%/$USER/; + s,%SWIFT_DATA_DIR%,$SWIFT_DATA_DIR,; + " $FILES/swift/rsyncd.conf | sudo tee /etc/rsyncd.conf + # rsyncd.conf just prepared for 4 nodes + if is_ubuntu; then + sudo sed -i '/^RSYNC_ENABLE=false/ { s/false/true/ }' /etc/default/rsync + else + sudo sed -i '/disable *= *yes/ { s/yes/no/ }' /etc/xinetd.d/rsync + fi + + if is_service_enabled swift3;then + swift_auth_server="s3token " + fi + + # By default Swift will be installed with the tempauth middleware + # which has some default username and password if you have + # configured keystone it will checkout the directory. + if is_service_enabled key; then + swift_auth_server+="authtoken keystoneauth" + else + swift_auth_server=tempauth + fi + + SWIFT_CONFIG_PROXY_SERVER=${SWIFT_CONFIG_DIR}/proxy-server.conf + cp ${SWIFT_DIR}/etc/proxy-server.conf-sample ${SWIFT_CONFIG_PROXY_SERVER} + + iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT user + iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT user ${USER} + + iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT swift_dir + iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT swift_dir ${SWIFT_CONFIG_DIR} + + iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT workers + iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT workers 1 + + iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT log_level + iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT log_level DEBUG + + iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT bind_port + iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT bind_port ${SWIFT_DEFAULT_BIND_PORT:-8080} + + # Only enable Swift3 if we have it enabled in ENABLED_SERVICES + is_service_enabled swift3 && swift3=swift3 || swift3="" + + iniset ${SWIFT_CONFIG_PROXY_SERVER} pipeline:main pipeline "catch_errors healthcheck cache ratelimit ${swift3} ${swift_auth_server} proxy-logging proxy-server" + + iniset ${SWIFT_CONFIG_PROXY_SERVER} app:proxy-server account_autocreate true + + # Configure Keystone + sed -i '/^# \[filter:authtoken\]/,/^# \[filter:keystoneauth\]$/ s/^#[ \t]*//' ${SWIFT_CONFIG_PROXY_SERVER} + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken auth_host $KEYSTONE_AUTH_HOST + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken auth_port $KEYSTONE_AUTH_PORT + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/ + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken admin_tenant_name $SERVICE_TENANT_NAME + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken admin_user swift + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken admin_password $SERVICE_PASSWORD + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken signing_dir $SWIFT_AUTH_CACHE_DIR + + iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} filter:keystoneauth use + iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} filter:keystoneauth operator_roles + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:keystoneauth operator_roles "Member, admin" + + if is_service_enabled swift3; then + cat <>${SWIFT_CONFIG_PROXY_SERVER} +# NOTE(chmou): s3token middleware is not updated yet to use only +# username and password. +[filter:s3token] +paste.filter_factory = keystone.middleware.s3_token:filter_factory +auth_port = ${KEYSTONE_AUTH_PORT} +auth_host = ${KEYSTONE_AUTH_HOST} +auth_protocol = ${KEYSTONE_AUTH_PROTOCOL} +auth_token = ${SERVICE_TOKEN} +admin_token = ${SERVICE_TOKEN} + +[filter:swift3] +use = egg:swift3#swift3 +EOF + fi + + cp ${SWIFT_DIR}/etc/swift.conf-sample ${SWIFT_CONFIG_DIR}/swift.conf + iniset ${SWIFT_CONFIG_DIR}/swift.conf swift-hash swift_hash_path_suffix ${SWIFT_HASH} + + # This function generates an object/account/proxy configuration + # emulating 4 nodes on different ports + function generate_swift_config() { + local swift_node_config=$1 + local node_id=$2 + local bind_port=$3 + + log_facility=$[ node_id - 1 ] + node_path=${SWIFT_DATA_DIR}/${node_number} + + iniuncomment ${swift_node_config} DEFAULT user + iniset ${swift_node_config} DEFAULT user ${USER} + + iniuncomment ${swift_node_config} DEFAULT bind_port + iniset ${swift_node_config} DEFAULT bind_port ${bind_port} + + iniuncomment ${swift_node_config} DEFAULT swift_dir + iniset ${swift_node_config} DEFAULT swift_dir ${SWIFT_CONFIG_DIR} + + iniuncomment ${swift_node_config} DEFAULT devices + iniset ${swift_node_config} DEFAULT devices ${node_path} + + iniuncomment ${swift_node_config} DEFAULT log_facility + iniset ${swift_node_config} DEFAULT log_facility LOG_LOCAL${log_facility} + + iniuncomment ${swift_node_config} DEFAULT mount_check + iniset ${swift_node_config} DEFAULT mount_check false + + iniuncomment ${swift_node_config} ${server_type}-replicator vm_test_mode + iniset ${swift_node_config} ${server_type}-replicator vm_test_mode yes + } + + for node_number in ${SWIFT_REPLICAS_SEQ}; do + swift_node_config=${SWIFT_CONFIG_DIR}/object-server/${node_number}.conf + cp ${SWIFT_DIR}/etc/object-server.conf-sample ${swift_node_config} + generate_swift_config ${swift_node_config} ${node_number} $[OBJECT_PORT_BASE + 10 * (node_number - 1)] + iniset ${swift_node_config} filter:recon recon_cache_path ${SWIFT_DATA_DIR}/cache + # Using a sed and not iniset/iniuncomment because we want to a global + # modification and make sure it works for new sections. + sed -i -e "s,#[ ]*recon_cache_path .*,recon_cache_path = ${SWIFT_DATA_DIR}/cache," ${swift_node_config} + + swift_node_config=${SWIFT_CONFIG_DIR}/container-server/${node_number}.conf + cp ${SWIFT_DIR}/etc/container-server.conf-sample ${swift_node_config} + generate_swift_config ${swift_node_config} ${node_number} $[CONTAINER_PORT_BASE + 10 * (node_number - 1)] + iniuncomment ${swift_node_config} app:container-server allow_versions + iniset ${swift_node_config} app:container-server allow_versions "true" + sed -i -e "s,#[ ]*recon_cache_path .*,recon_cache_path = ${SWIFT_DATA_DIR}/cache," ${swift_node_config} + + swift_node_config=${SWIFT_CONFIG_DIR}/account-server/${node_number}.conf + cp ${SWIFT_DIR}/etc/account-server.conf-sample ${swift_node_config} + generate_swift_config ${swift_node_config} ${node_number} $[ACCOUNT_PORT_BASE + 10 * (node_number - 1)] + sed -i -e "s,#[ ]*recon_cache_path .*,recon_cache_path = ${SWIFT_DATA_DIR}/cache," ${swift_node_config} + done + + swift_log_dir=${SWIFT_DATA_DIR}/logs + rm -rf ${swift_log_dir} + mkdir -p ${swift_log_dir}/hourly + sudo chown -R $USER:adm ${swift_log_dir} + sed "s,%SWIFT_LOGDIR%,${swift_log_dir}," $FILES/swift/rsyslog.conf | sudo \ + tee /etc/rsyslog.d/10-swift.conf +} + +# configure_swiftclient() - Set config files, create data dirs, etc +function configure_swiftclient() { + setup_develop $SWIFTCLIENT_DIR +} + +# init_swift() - Initialize rings +function init_swift() { + local node_number + # Make sure to kill all swift processes first + swift-init --run-dir=${SWIFT_DATA_DIR}/run all stop || true + + # This is where we create three different rings for swift with + # different object servers binding on different ports. + pushd ${SWIFT_CONFIG_DIR} >/dev/null && { + + rm -f *.builder *.ring.gz backups/*.builder backups/*.ring.gz + + swift-ring-builder object.builder create ${SWIFT_PARTITION_POWER_SIZE} ${SWIFT_REPLICAS} 1 + swift-ring-builder container.builder create ${SWIFT_PARTITION_POWER_SIZE} ${SWIFT_REPLICAS} 1 + swift-ring-builder account.builder create ${SWIFT_PARTITION_POWER_SIZE} ${SWIFT_REPLICAS} 1 + + for node_number in ${SWIFT_REPLICAS_SEQ}; do + swift-ring-builder object.builder add z${node_number}-127.0.0.1:$[OBJECT_PORT_BASE + 10 * (node_number - 1)]/sdb1 1 + swift-ring-builder container.builder add z${node_number}-127.0.0.1:$[CONTAINER_PORT_BASE + 10 * (node_number - 1)]/sdb1 1 + swift-ring-builder account.builder add z${node_number}-127.0.0.1:$[ACCOUNT_PORT_BASE + 10 * (node_number - 1)]/sdb1 1 + done + swift-ring-builder object.builder rebalance + swift-ring-builder container.builder rebalance + swift-ring-builder account.builder rebalance + } && popd >/dev/null + + # Create cache dir + sudo mkdir -p $SWIFT_AUTH_CACHE_DIR + sudo chown $STACK_USER $SWIFT_AUTH_CACHE_DIR + rm -f $SWIFT_AUTH_CACHE_DIR/* +} + +function install_swift() { + git_clone $SWIFT_REPO $SWIFT_DIR $SWIFT_BRANCH +} + +function install_swiftclient() { + git_clone $SWIFTCLIENT_REPO $SWIFTCLIENT_DIR $SWIFTCLIENT_BRANCH +} + + +# start_swift() - Start running processes, including screen +function start_swift() { + # (re)start rsyslog + restart_service rsyslog + # Start rsync + if is_ubuntu; then + sudo /etc/init.d/rsync restart || : + else + sudo systemctl start xinetd.service + fi + + # First spawn all the swift services then kill the + # proxy service so we can run it in foreground in screen. + # ``swift-init ... {stop|restart}`` exits with '1' if no servers are running, + # ignore it just in case + swift-init --run-dir=${SWIFT_DATA_DIR}/run all restart || true + swift-init --run-dir=${SWIFT_DATA_DIR}/run proxy stop || true + screen_it swift "cd $SWIFT_DIR && $SWIFT_DIR/bin/swift-proxy-server ${SWIFT_CONFIG_DIR}/proxy-server.conf -v" +} + +# stop_swift() - Stop running processes (non-screen) +function stop_swift() { + # screen normally killed by unstack.sh + swift-init --run-dir=${SWIFT_DATA_DIR}/run all stop || true +} + +# Restore xtrace +$XTRACE diff --git a/lib/tempest b/lib/tempest index 115c9118..364323de 100644 --- a/lib/tempest +++ b/lib/tempest @@ -1,18 +1,33 @@ # lib/tempest +# Install and configure Tempest # Dependencies: # ``functions`` file -# ``SERVICE_{TENANT_NAME|PASSWORD}`` must be defined +# ``lib/nova`` service is runing # - +# - ``DEST``, ``FILES`` +# - ``ADMIN_PASSWORD`` +# - ``DEFAULT_IMAGE_NAME`` +# - ``S3_SERVICE_PORT`` +# - ``SERVICE_HOST`` +# - ``BASE_SQL_CONN`` ``lib/database`` declares +# - ``PUBLIC_NETWORK_NAME`` +# - ``Q_USE_NAMESPACE`` +# - ``Q_ROUTER_NAME`` +# - ``VIRT_DRIVER`` +# - ``LIBVIRT_TYPE`` +# - ``KEYSTONE_SERVICE_PROTOCOL``, ``KEYSTONE_SERVICE_HOST`` from lib/keystone +# Optional Dependencies: +# ALT_* (similar vars exists in keystone_data.sh) +# ``LIVE_MIGRATION_AVAILABLE`` +# ``USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION`` +# ``DEFAULT_INSTANCE_TYPE`` +# ``DEFAULT_INSTANCE_USER`` # ``stack.sh`` calls the entry points in this order: # -# install_XXXX -# configure_XXXX -# init_XXXX -# start_XXXX -# stop_XXXX -# cleanup_XXXX +# install_tempest +# configure_tempest +# init_tempest # Save trace setting XTRACE=$(set +o | grep xtrace) @@ -22,35 +37,261 @@ set +o xtrace # Defaults # -------- -# - # Set up default directories TEMPEST_DIR=$DEST/tempest -TEMPEST_CONF_DIR=$DEST/tempest/etc +TEMPEST_CONF_DIR=$TEMPEST_DIR/etc +TEMPEST_CONF=$TEMPEST_CONF_DIR/tempest.conf + +NOVA_SOURCE_DIR=$DEST/nova + +BUILD_INTERVAL=3 +BUILD_TIMEOUT=400 + + +BOTO_MATERIALS_PATH="$FILES/images/s3-materials/cirros-0.3.0" # Entry Points # ------------ - # configure_tempest() - Set config files, create data dirs, etc function configure_tempest() { + setup_develop $TEMPEST_DIR + local image_lines + local images + local num_images + local image_uuid + local image_uuid_alt + local errexit + local password + local line + local flavors + local flavors_ref + local flavor_lines + local public_network_id + local public_router_id + local tenant_networks_reachable + local boto_instance_type="m1.tiny" + + # TODO(afazekas): # sudo python setup.py deploy - # iniset $tempest_CONF ... - # This function intentionally left blank - # - # TODO(sdague) actually move the guts of configure tempest - # into this function - cd tools - ./configure_tempest.sh - cd .. -} + # This function exits on an error so that errors don't compound and you see + # only the first error that occured. + errexit=$(set +o | grep errexit) + set -o errexit + + # Save IFS + ifs=$IFS + + # Glance should already contain images to be used in tempest + # testing. Here we simply look for images stored in Glance + # and set the appropriate variables for use in the tempest config + # We ignore ramdisk and kernel images, look for the default image + # ``DEFAULT_IMAGE_NAME``. If not found, we set the ``image_uuid`` to the + # first image returned and set ``image_uuid_alt`` to the second, + # if there is more than one returned... + # ... Also ensure we only take active images, so we don't get snapshots in process + declare -a images + + while read -r IMAGE_NAME IMAGE_UUID; do + if [ "$IMAGE_NAME" = "$DEFAULT_IMAGE_NAME" ]; then + image_uuid="$IMAGE_UUID" + image_uuid_alt="$IMAGE_UUID" + fi + images+=($IMAGE_UUID) + done < <(glance image-list --status=active | awk -F'|' '!/^(+--)|ID|aki|ari/ { print $3,$2 }') + + case "${#images[*]}" in + 0) + echo "Found no valid images to use!" + exit 1 + ;; + 1) + if [ -z "$image_uuid" ]; then + image_uuid=${images[0]} + image_uuid_alt=${images[0]} + fi + ;; + *) + if [ -z "$image_uuid" ]; then + image_uuid=${images[0]} + image_uuid_alt=${images[1]} + fi + ;; + esac + + # Create tempest.conf from tempest.conf.sample + # copy every time, because the image UUIDS are going to change + cp $TEMPEST_CONF.sample $TEMPEST_CONF + + password=${ADMIN_PASSWORD:-secrete} + + # See files/keystone_data.sh where alt_demo user + # and tenant are set up... + ALT_USERNAME=${ALT_USERNAME:-alt_demo} + ALT_TENANT_NAME=${ALT_TENANT_NAME:-alt_demo} + + # If the ``DEFAULT_INSTANCE_TYPE`` not declared, use the new behavior + # Tempest creates instane types for himself + if [[ -z "$DEFAULT_INSTANCE_TYPE" ]]; then + nova flavor-create m1.nano 42 64 0 1 + flavor_ref=42 + boto_instance_type=m1.nano + nova flavor-create m1.micro 84 128 0 1 + flavor_ref_alt=84 + else + # Check Nova for existing flavors and, if set, look for the + # ``DEFAULT_INSTANCE_TYPE`` and use that. + boto_instance_type=$DEFAULT_INSTANCE_TYPE + flavor_lines=`nova flavor-list` + IFS=$'\r\n' + flavors="" + for line in $flavor_lines; do + f=$(echo $line | awk "/ $DEFAULT_INSTANCE_TYPE / { print \$2 }") + flavors="$flavors $f" + done + + for line in $flavor_lines; do + flavors="$flavors `echo $line | grep -v "^\(|\s*ID\|+--\)" | cut -d' ' -f2`" + done + + IFS=" " + flavors=($flavors) + num_flavors=${#flavors[*]} + echo "Found $num_flavors flavors" + if [[ $num_flavors -eq 0 ]]; then + echo "Found no valid flavors to use!" + exit 1 + fi + flavor_ref=${flavors[0]} + flavor_ref_alt=$flavor_ref + if [[ $num_flavors -gt 1 ]]; then + flavor_ref_alt=${flavors[1]} + fi + fi + + if [ "$Q_USE_NAMESPACE" != "False" ]; then + tenant_networks_reachable=false + else + tenant_networks_reachable=true + fi + + if is_service_enabled q-l3; then + public_network_id=$(quantum net-list | grep $PUBLIC_NETWORK_NAME | \ + awk '{print $2}') + if [ "$Q_USE_NAMESPACE" == "False" ]; then + # If namespaces are disabled, devstack will create a single + # public router that tempest should be configured to use. + public_router_id=$(quantum router-list | awk "/ $Q_ROUTER_NAME / \ + { print \$2 }") + fi + fi + + # Timeouts + iniset $TEMPEST_CONF compute build_timeout $BUILD_TIMEOUT + iniset $TEMPEST_CONF volume build_timeout $BUILD_TIMEOUT + iniset $TEMPEST_CONF boto build_timeout $BUILD_TIMEOUT + iniset $TEMPEST_CONF compute build_interval $BUILD_INTERVAL + iniset $TEMPEST_CONF volume build_interval $BUILD_INTERVAL + iniset $TEMPEST_CONF boto build_interval $BUILD_INTERVAL + iniset $TEMPEST_CONF boto http_socket_timeout 5 + + # Identity + iniset $TEMPEST_CONF identity uri "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:5000/v2.0/" + iniset $TEMPEST_CONF identity password "$password" + iniset $TEMPEST_CONF identity alt_username $ALT_USERNAME + iniset $TEMPEST_CONF identity alt_password "$password" + iniset $TEMPEST_CONF identity alt_tenant_name $ALT_TENANT_NAME + iniset $TEMPEST_CONF identity admin_password "$password" + + # Compute + iniset $TEMPEST_CONF compute change_password_available False + # Note(nati) current tempest don't create network for each tenant + # so reuse same tenant for now + if is_service_enabled quantum; then + TEMPEST_ALLOW_TENANT_ISOLATION=${TEMPEST_ALLOW_TENANT_ISOLATION:-False} + fi + iniset $TEMPEST_CONF compute allow_tenant_isolation ${TEMPEST_ALLOW_TENANT_ISOLATION:-True} + iniset $TEMPEST_CONF compute ssh_user ${DEFAULT_INSTANCE_USER:-cirros} # DEPRECATED + iniset $TEMPEST_CONF compute network_for_ssh $PRIVATE_NETWORK_NAME + iniset $TEMPEST_CONF compute ip_version_for_ssh 4 + iniset $TEMPEST_CONF compute ssh_timeout $BUILD_TIMEOUT + iniset $TEMPEST_CONF compute image_ref $image_uuid + iniset $TEMPEST_CONF compute image_ssh_user ${DEFAULT_INSTANCE_USER:-cirros} + iniset $TEMPEST_CONF compute image_ref_alt $image_uuid_alt + iniset $TEMPEST_CONF compute image_alt_ssh_user ${DEFAULT_INSTANCE_USER:-cirros} + iniset $TEMPEST_CONF compute flavor_ref $flavor_ref + iniset $TEMPEST_CONF compute flavor_ref_alt $flavor_ref_alt + iniset $TEMPEST_CONF compute live_migration_available ${LIVE_MIGRATION_AVAILABLE:-False} + iniset $TEMPEST_CONF compute use_block_migration_for_live_migration ${USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION:-False} + + # Whitebox + iniset $TEMPEST_CONF whitebox source_dir $NOVA_SOURCE_DIR + iniset $TEMPEST_CONF whitebox bin_dir $NOVA_BIN_DIR + # TODO(jaypipes): Create the key file here... right now, no whitebox + # tests actually use a key. + iniset $TEMPEST_CONF whitebox path_to_private_key $TEMPEST_DIR/id_rsa + iniset $TEMPEST_CONF whitebox db_uri $BASE_SQL_CONN/nova + + + # compute admin + iniset $TEMPEST_CONF "compute-admin" password "$password" # DEPRECATED + + # network + iniset $TEMPEST_CONF network api_version 2.0 + iniset $TEMPEST_CONF network tenant_networks_reachable "$tenant_networks_reachable" + iniset $TEMPEST_CONF network public_network_id "$public_network_id" + iniset $TEMPEST_CONF network public_router_id "$public_router_id" + + #boto + iniset $TEMPEST_CONF boto ec2_url "http://$SERVICE_HOST:8773/services/Cloud" + iniset $TEMPEST_CONF boto s3_url "http://$SERVICE_HOST:${S3_SERVICE_PORT:-3333}" + iniset $TEMPEST_CONF boto s3_materials_path "$BOTO_MATERIALS_PATH" + iniset $TEMPEST_CONF boto instance_type "$boto_instance_type" + iniset $TEMPEST_CONF boto http_socket_timeout 30 + iniset $TEMPEST_CONF boto ssh_user ${DEFAULT_INSTANCE_USER:-cirros} + + echo "Created tempest configuration file:" + cat $TEMPEST_CONF + + # Restore IFS + IFS=$ifs + #Restore errexit + $errexit +} # install_tempest() - Collect source and prepare function install_tempest() { git_clone $TEMPEST_REPO $TEMPEST_DIR $TEMPEST_BRANCH + + # Tempest doesn't satisfy its dependencies on its own, so + # install them here instead. + pip_install -r $TEMPEST_DIR/tools/pip-requires } +# init_tempest() - Initialize ec2 images +function init_tempest() { + local base_image_name=cirros-0.3.0-x86_64 + # /opt/stack/devstack/files/images/cirros-0.3.0-x86_64-uec + local image_dir="$FILES/images/${base_image_name}-uec" + local kernel="$image_dir/${base_image_name}-vmlinuz" + local ramdisk="$image_dir/${base_image_name}-initrd" + local disk_image="$image_dir/${base_image_name}-blank.img" + # if the cirros uec downloaded and the system is uec capable + if [ -f "$kernel" -a -f "$ramdisk" -a -f "$disk_image" -a "$VIRT_DRIVER" != "openvz" \ + -a \( "$LIBVIRT_TYPE" != "lxc" -o "$VIRT_DRIVER" != "libvirt" \) ]; then + echo "Prepare aki/ari/ami Images" + ( #new namespace + # tenant:demo ; user: demo + source $TOP_DIR/accrc/demo/demo + euca-bundle-image -i "$kernel" --kernel true -d "$BOTO_MATERIALS_PATH" + euca-bundle-image -i "$ramdisk" --ramdisk true -d "$BOTO_MATERIALS_PATH" + euca-bundle-image -i "$disk_image" -d "$BOTO_MATERIALS_PATH" + ) 2>&1 $ca_dir/serial + cp /dev/null $ca_dir/index.txt +} + + +# Create a new CA configuration file +# create_CA_config ca-dir common-name +function create_CA_config() { + local ca_dir=$1 + local common_name=$2 + + echo " +[ ca ] +default_ca = CA_default + +[ CA_default ] +dir = $ca_dir +policy = policy_match +database = \$dir/index.txt +serial = \$dir/serial +certs = \$dir/certs +crl_dir = \$dir/crl +new_certs_dir = \$dir/newcerts +certificate = \$dir/cacert.pem +private_key = \$dir/private/cacert.key +RANDFILE = \$dir/private/.rand +default_md = default + +[ req ] +default_bits = 1024 +default_md = sha1 + +prompt = no +distinguished_name = ca_distinguished_name + +x509_extensions = ca_extensions + +[ ca_distinguished_name ] +organizationName = $ORG_NAME +organizationalUnitName = $ORG_UNIT_NAME Certificate Authority +commonName = $common_name + +[ policy_match ] +countryName = optional +stateOrProvinceName = optional +organizationName = match +organizationalUnitName = optional +commonName = supplied + +[ ca_extensions ] +basicConstraints = critical,CA:true +subjectKeyIdentifier = hash +authorityKeyIdentifier = keyid:always, issuer +keyUsage = cRLSign, keyCertSign + +" >$ca_dir/ca.conf +} + +# Create a new signing configuration file +# create_signing_config ca-dir +function create_signing_config() { + local ca_dir=$1 + + echo " +[ ca ] +default_ca = CA_default + +[ CA_default ] +dir = $ca_dir +policy = policy_match +database = \$dir/index.txt +serial = \$dir/serial +certs = \$dir/certs +crl_dir = \$dir/crl +new_certs_dir = \$dir/newcerts +certificate = \$dir/cacert.pem +private_key = \$dir/private/cacert.key +RANDFILE = \$dir/private/.rand +default_md = default + +[ req ] +default_bits = 1024 +default_md = sha1 + +prompt = no +distinguished_name = req_distinguished_name + +x509_extensions = req_extensions + +[ req_distinguished_name ] +organizationName = $ORG_NAME +organizationalUnitName = $ORG_UNIT_NAME Server Farm + +[ policy_match ] +countryName = optional +stateOrProvinceName = optional +organizationName = match +organizationalUnitName = optional +commonName = supplied + +[ req_extensions ] +basicConstraints = CA:false +subjectKeyIdentifier = hash +authorityKeyIdentifier = keyid:always, issuer +keyUsage = digitalSignature, keyEncipherment, keyAgreement +extendedKeyUsage = serverAuth, clientAuth +subjectAltName = \$ENV::SUBJECT_ALT_NAME + +" >$ca_dir/signing.conf +} + +# Create root and intermediate CAs +# init_CA +function init_CA { + # Ensure CAs are built + make_root_CA $ROOT_CA_DIR + make_int_CA $INT_CA_DIR $ROOT_CA_DIR + + # Create the CA bundle + cat $ROOT_CA_DIR/cacert.pem $INT_CA_DIR/cacert.pem >>$INT_CA_DIR/ca-chain.pem +} + +# Create an initial server cert +# init_cert +function init_cert { + if [[ ! -r $DEVSTACK_CERT ]]; then + if [[ -n "$TLS_IP" ]]; then + # Lie to let incomplete match routines work + TLS_IP="DNS:$TLS_IP" + fi + make_cert $INT_CA_DIR $DEVSTACK_CERT_NAME $DEVSTACK_HOSTNAME "$TLS_IP" + + # Create a cert bundle + cat $INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key $INT_CA_DIR/$DEVSTACK_CERT_NAME.crt $INT_CA_DIR/cacert.pem >$DEVSTACK_CERT + fi +} + + +# make_cert creates and signs a new certificate with the given commonName and CA +# make_cert ca-dir cert-name "common-name" ["alt-name" ...] +function make_cert() { + local ca_dir=$1 + local cert_name=$2 + local common_name=$3 + local alt_names=$4 + + # Generate a signing request + $OPENSSL req \ + -sha1 \ + -newkey rsa \ + -nodes \ + -keyout $ca_dir/private/$cert_name.key \ + -out $ca_dir/$cert_name.csr \ + -subj "/O=${ORG_NAME}/OU=${ORG_UNIT_NAME} Servers/CN=${common_name}" + + if [[ -z "$alt_names" ]]; then + alt_names="DNS:${common_name}" + else + alt_names="DNS:${common_name},${alt_names}" + fi + + # Sign the request valid for 1 year + SUBJECT_ALT_NAME="$alt_names" \ + $OPENSSL ca -config $ca_dir/signing.conf \ + -extensions req_extensions \ + -days 365 \ + -notext \ + -in $ca_dir/$cert_name.csr \ + -out $ca_dir/$cert_name.crt \ + -subj "/O=${ORG_NAME}/OU=${ORG_UNIT_NAME} Servers/CN=${common_name}" \ + -batch +} + + +# Make an intermediate CA to sign everything else +# make_int_CA ca-dir signing-ca-dir +function make_int_CA() { + local ca_dir=$1 + local signing_ca_dir=$2 + + # Create the root CA + create_CA_base $ca_dir + create_CA_config $ca_dir 'Intermediate CA' + create_signing_config $ca_dir + + # Create a signing certificate request + $OPENSSL req -config $ca_dir/ca.conf \ + -sha1 \ + -newkey rsa \ + -nodes \ + -keyout $ca_dir/private/cacert.key \ + -out $ca_dir/cacert.csr \ + -outform PEM + + # Sign the intermediate request valid for 1 year + $OPENSSL ca -config $signing_ca_dir/ca.conf \ + -extensions ca_extensions \ + -days 365 \ + -notext \ + -in $ca_dir/cacert.csr \ + -out $ca_dir/cacert.pem \ + -batch +} + +# Make a root CA to sign other CAs +# make_root_CA ca-dir +function make_root_CA() { + local ca_dir=$1 + + # Create the root CA + create_CA_base $ca_dir + create_CA_config $ca_dir 'Root CA' + + # Create a self-signed certificate valid for 5 years + $OPENSSL req -config $ca_dir/ca.conf \ + -x509 \ + -nodes \ + -newkey rsa \ + -days 21360 \ + -keyout $ca_dir/private/cacert.key \ + -out $ca_dir/cacert.pem \ + -outform PEM +} + + +# Proxy Functions +# =============== + +# Starts the TLS proxy for the given IP/ports +# start_tls_proxy front-host front-port back-host back-port +function start_tls_proxy() { + local f_host=$1 + local f_port=$2 + local b_host=$3 + local b_port=$4 + + stud $STUD_PROTO -f $f_host,$f_port -b $b_host,$b_port $DEVSTACK_CERT 2>/dev/null +} diff --git a/openrc b/openrc index 0a6a2150..3ef44fd1 100644 --- a/openrc +++ b/openrc @@ -26,6 +26,14 @@ source $RC_DIR/functions # Load local configuration source $RC_DIR/stackrc +# Load the last env variables if available +if [[ -r $TOP_DIR/.stackenv ]]; then + source $TOP_DIR/.stackenv +fi + +# Get some necessary configuration +source $RC_DIR/lib/tls + # The introduction of Keystone to the OpenStack ecosystem has standardized the # term **tenant** as the entity that owns resources. In some places references # still exist to the original Nova term **project** for this use. Also, @@ -49,6 +57,7 @@ export OS_NO_CACHE=${OS_NO_CACHE:-1} # which is convenient for some localrc configurations. HOST_IP=${HOST_IP:-127.0.0.1} SERVICE_HOST=${SERVICE_HOST:-$HOST_IP} +SERVICE_PROTOCOL=${SERVICE_PROTOCOL:-http} # Some exercises call glance directly. On a single-node installation, Glance # should be listening on HOST_IP. If its running elsewhere, it can be set here @@ -61,7 +70,10 @@ GLANCE_HOST=${GLANCE_HOST:-$HOST_IP} # # *NOTE*: Using the 2.0 *identity api* does not mean that compute api is 2.0. We # will use the 1.1 *compute api* -export OS_AUTH_URL=http://$SERVICE_HOST:5000/v2.0 +export OS_AUTH_URL=$SERVICE_PROTOCOL://$SERVICE_HOST:5000/v2.0 + +# Set the pointer to our CA certificate chain. Harmless if TLS is not used. +export OS_CACERT=$INT_CA_DIR/ca-chain.pem # Currently novaclient needs you to specify the *compute api* version. This # needs to match the config of your catalog returned by Keystone. @@ -72,6 +84,3 @@ export COMPUTE_API_VERSION=${COMPUTE_API_VERSION:-$NOVA_VERSION} # set log level to DEBUG (helps debug issues) # export KEYSTONECLIENT_DEBUG=1 # export NOVACLIENT_DEBUG=1 - -# set qunatum debug command -export TEST_CONFIG_FILE=/etc/quantum/debug.ini diff --git a/stack.sh b/stack.sh index 40eab36e..331743f0 100755 --- a/stack.sh +++ b/stack.sh @@ -29,12 +29,9 @@ source $TOP_DIR/functions # and ``DISTRO`` GetDistro -# Import database library (must be loaded before stackrc which sources localrc) -source $TOP_DIR/lib/database - -# Settings -# ======== +# Global Settings +# =============== # ``stack.sh`` is customizable through setting environment variables. If you # want to override a setting you can set and export it:: @@ -64,42 +61,41 @@ fi source $TOP_DIR/stackrc -# Proxy Settings +# Local Settings # -------------- -# HTTP and HTTPS proxy servers are supported via the usual environment variables [1] -# ``http_proxy``, ``https_proxy`` and ``no_proxy``. They can be set in -# ``localrc`` if necessary or on the command line:: -# -# [1] http://www.w3.org/Daemon/User/Proxies/ProxyClients.html -# -# http_proxy=http://proxy.example.com:3128/ no_proxy=repo.example.net ./stack.sh - -if [[ -n "$http_proxy" ]]; then - export http_proxy=$http_proxy -fi -if [[ -n "$https_proxy" ]]; then - export https_proxy=$https_proxy -fi -if [[ -n "$no_proxy" ]]; then - export no_proxy=$no_proxy -fi +# Make sure the proxy config is visible to sub-processes +export_proxy_variables # Destination path for installation ``DEST`` DEST=${DEST:-/opt/stack} # Sanity Check -# ============ +# ------------ + +# Clean up last environment var cache +if [[ -r $TOP_DIR/.stackenv ]]; then + rm $TOP_DIR/.stackenv +fi + +# Import common services (database, message queue) configuration +source $TOP_DIR/lib/database +source $TOP_DIR/lib/rpc_backend + +# Validate database selection +# Since DATABASE_BACKENDS is now set, this also gets ENABLED_SERVICES +# properly configured for the database selection. +use_database $DATABASE_TYPE || echo "Invalid database '$DATABASE_TYPE'" # Remove services which were negated in ENABLED_SERVICES -# using the "-" prefix (e.g., "-n-vol") instead of +# using the "-" prefix (e.g., "-rabbit") instead of # calling disable_service(). disable_negated_services # Warn users who aren't on an explicitly supported distro, but allow them to # override check and attempt installation with ``FORCE=yes ./stack`` -if [[ ! ${DISTRO} =~ (oneiric|precise|quantal|raring|f16|f17) ]]; then +if [[ ! ${DISTRO} =~ (oneiric|precise|quantal|raring|f16|f17|f18|opensuse-12.2) ]]; then echo "WARNING: this script has not been tested on $DISTRO" if [[ "$FORCE" != "yes" ]]; then echo "If you wish to run this script anyway run with FORCE=yes" @@ -107,11 +103,9 @@ if [[ ! ${DISTRO} =~ (oneiric|precise|quantal|raring|f16|f17) ]]; then fi fi -# Qpid was introduced to Ubuntu in precise, disallow it on oneiric -if [ "${DISTRO}" = "oneiric" ] && is_service_enabled qpid ; then - echo "You must use Ubuntu Precise or newer for Qpid support." - exit 1 -fi +# Make sure we only have one rpc backend enabled, +# and the specified rpc backend is available on your platform. +check_rpc_backend # ``stack.sh`` keeps function libraries here # Make sure ``$TOP_DIR/lib`` directory is present @@ -137,29 +131,6 @@ if type -p screen >/dev/null && screen -ls | egrep -q "[0-9].$SCREEN_NAME"; then exit 1 fi -# Make sure we only have one rpc backend enabled. -rpc_backend_cnt=0 -for svc in qpid zeromq rabbit; do - is_service_enabled $svc && - ((rpc_backend_cnt++)) -done -if [ "$rpc_backend_cnt" -gt 1 ]; then - echo "ERROR: only one rpc backend may be enabled," - echo " set only one of 'rabbit', 'qpid', 'zeromq'" - echo " via ENABLED_SERVICES." -elif [ "$rpc_backend_cnt" == 0 ]; then - echo "ERROR: at least one rpc backend must be enabled," - echo " set one of 'rabbit', 'qpid', 'zeromq'" - echo " via ENABLED_SERVICES." -fi -unset rpc_backend_cnt - -# Make sure we only have one volume service enabled. -if is_service_enabled cinder && is_service_enabled n-vol; then - echo "ERROR: n-vol and cinder must not be enabled at the same time" - exit 1 -fi - # Set up logging level VERBOSE=$(trueorfalse True $VERBOSE) @@ -175,35 +146,36 @@ VERBOSE=$(trueorfalse True $VERBOSE) if [[ $EUID -eq 0 ]]; then ROOTSLEEP=${ROOTSLEEP:-10} echo "You are running this script as root." - echo "In $ROOTSLEEP seconds, we will create a user 'stack' and run as that user" + echo "In $ROOTSLEEP seconds, we will create a user '$STACK_USER' and run as that user" sleep $ROOTSLEEP # Give the non-root user the ability to run as **root** via ``sudo`` is_package_installed sudo || install_package sudo - if ! getent group stack >/dev/null; then - echo "Creating a group called stack" - groupadd stack + if ! getent group $STACK_USER >/dev/null; then + echo "Creating a group called $STACK_USER" + groupadd $STACK_USER fi - if ! getent passwd stack >/dev/null; then - echo "Creating a user called stack" - useradd -g stack -s /bin/bash -d $DEST -m stack + if ! getent passwd $STACK_USER >/dev/null; then + echo "Creating a user called $STACK_USER" + useradd -g $STACK_USER -s /bin/bash -d $DEST -m $STACK_USER fi echo "Giving stack user passwordless sudo privileges" # UEC images ``/etc/sudoers`` does not have a ``#includedir``, add one grep -q "^#includedir.*/etc/sudoers.d" /etc/sudoers || echo "#includedir /etc/sudoers.d" >> /etc/sudoers - ( umask 226 && echo "stack ALL=(ALL) NOPASSWD:ALL" \ + ( umask 226 && echo "$STACK_USER ALL=(ALL) NOPASSWD:ALL" \ > /etc/sudoers.d/50_stack_sh ) - echo "Copying files to stack user" - STACK_DIR="$DEST/${PWD##*/}" - cp -r -f -T "$PWD" "$STACK_DIR" - chown -R stack "$STACK_DIR" + echo "Copying files to $STACK_USER user" + STACK_DIR="$DEST/${TOP_DIR##*/}" + cp -r -f -T "$TOP_DIR" "$STACK_DIR" + chown -R $STACK_USER "$STACK_DIR" + cd "$STACK_DIR" if [[ "$SHELL_AFTER_RUN" != "no" ]]; then - exec su -c "set -e; cd $STACK_DIR; bash stack.sh; bash" stack + exec sudo -u $STACK_USER bash -l -c "set -e; bash stack.sh; bash" else - exec su -c "set -e; cd $STACK_DIR; bash stack.sh" stack + exec sudo -u $STACK_USER bash -l -c "set -e; source stack.sh" fi exit 1 else @@ -216,10 +188,10 @@ else # Set up devstack sudoers TEMPFILE=`mktemp` - echo "`whoami` ALL=(root) NOPASSWD:ALL" >$TEMPFILE + echo "$STACK_USER ALL=(root) NOPASSWD:ALL" >$TEMPFILE # Some binaries might be under /sbin or /usr/sbin, so make sure sudo will # see them by forcing PATH - echo "Defaults:`whoami` secure_path=/sbin:/usr/sbin:/usr/bin:/bin:/usr/local/sbin:/usr/local/bin" >> $TEMPFILE + echo "Defaults:$STACK_USER secure_path=/sbin:/usr/sbin:/usr/bin:/bin:/usr/local/sbin:/usr/local/bin" >> $TEMPFILE chmod 0440 $TEMPFILE sudo chown root:root $TEMPFILE sudo mv $TEMPFILE /etc/sudoers.d/50_stack_sh @@ -231,7 +203,7 @@ fi # Create the destination directory and ensure it is writable by the user sudo mkdir -p $DEST if [ ! -w $DEST ]; then - sudo chown `whoami` $DEST + sudo chown $STACK_USER $DEST fi # Set ``OFFLINE`` to ``True`` to configure ``stack.sh`` to run cleanly without @@ -247,7 +219,7 @@ ERROR_ON_CLONE=`trueorfalse False $ERROR_ON_CLONE` # Destination path for service data DATA_DIR=${DATA_DIR:-${DEST}/data} sudo mkdir -p $DATA_DIR -sudo chown `whoami` $DATA_DIR +sudo chown $STACK_USER $DATA_DIR # Common Configuration @@ -263,7 +235,7 @@ FIXED_NETWORK_SIZE=${FIXED_NETWORK_SIZE:-256} NETWORK_GATEWAY=${NETWORK_GATEWAY:-10.0.0.1} # Find the interface used for the default route -HOST_IP_IFACE=${HOST_IP_IFACE:-$(ip route | sed -n '/^default/{ s/.*dev \(\w\+\)\s\+.*/\1/; p; }')} +HOST_IP_IFACE=${HOST_IP_IFACE:-$(ip route | sed -n '/^default/{ s/.*dev \(\w\+\)\s\+.*/\1/; p; }' | head -1)} # Search for an IP unless an explicit is set by ``HOST_IP`` environment variable if [ -z "$HOST_IP" -o "$HOST_IP" == "dhcp" ]; then HOST_IP="" @@ -289,6 +261,7 @@ fi # Allow the use of an alternate hostname (such as localhost/127.0.0.1) for service endpoints. SERVICE_HOST=${SERVICE_HOST:-$HOST_IP} +SERVICE_PROTOCOL=${SERVICE_PROTOCOL:-http} # Configure services to use syslog instead of writing to individual log files SYSLOG=`trueorfalse False $SYSLOG` @@ -306,54 +279,25 @@ SERVICE_TIMEOUT=${SERVICE_TIMEOUT:-60} # ================== # Get project function libraries +source $TOP_DIR/lib/tls +source $TOP_DIR/lib/horizon source $TOP_DIR/lib/keystone source $TOP_DIR/lib/glance source $TOP_DIR/lib/nova source $TOP_DIR/lib/cinder -source $TOP_DIR/lib/n-vol +source $TOP_DIR/lib/swift source $TOP_DIR/lib/ceilometer source $TOP_DIR/lib/heat source $TOP_DIR/lib/quantum -source $TOP_DIR/lib/tempest +source $TOP_DIR/lib/baremetal +source $TOP_DIR/lib/ldap # Set the destination directories for OpenStack projects HORIZON_DIR=$DEST/horizon OPENSTACKCLIENT_DIR=$DEST/python-openstackclient NOVNC_DIR=$DEST/noVNC -SWIFT_DIR=$DEST/swift +SPICE_DIR=$DEST/spice-html5 SWIFT3_DIR=$DEST/swift3 -SWIFTCLIENT_DIR=$DEST/python-swiftclient -QUANTUM_DIR=$DEST/quantum -QUANTUM_CLIENT_DIR=$DEST/python-quantumclient - -# Default Quantum Plugin -Q_PLUGIN=${Q_PLUGIN:-openvswitch} -# Default Quantum Port -Q_PORT=${Q_PORT:-9696} -# Default Quantum Host -Q_HOST=${Q_HOST:-localhost} -# Which Quantum API nova should use -# Default admin username -Q_ADMIN_USERNAME=${Q_ADMIN_USERNAME:-quantum} -# Default auth strategy -Q_AUTH_STRATEGY=${Q_AUTH_STRATEGY:-keystone} -# Use namespace or not -Q_USE_NAMESPACE=${Q_USE_NAMESPACE:-True} -Q_USE_ROOTWRAP=${Q_USE_ROOTWRAP:-True} -# Meta data IP -Q_META_DATA_IP=${Q_META_DATA_IP:-$HOST_IP} - -RYU_DIR=$DEST/ryu -# Ryu API Host -RYU_API_HOST=${RYU_API_HOST:-127.0.0.1} -# Ryu API Port -RYU_API_PORT=${RYU_API_PORT:-8080} -# Ryu OFP Host -RYU_OFP_HOST=${RYU_OFP_HOST:-127.0.0.1} -# Ryu OFP Port -RYU_OFP_PORT=${RYU_OFP_PORT:-6633} -# Ryu Applications -RYU_APPS=${RYU_APPS:-ryu.app.simple_isolation,ryu.app.rest} # Should cinder perform secure deletion of volumes? # Defaults to true, can be set to False to avoid this bug when testing: @@ -419,6 +363,13 @@ if [ "$VIRT_DRIVER" = 'xenserver' ]; then # Allow ``build_domU.sh`` to specify the flat network bridge via kernel args FLAT_NETWORK_BRIDGE_DEFAULT=$(grep -o 'flat_network_bridge=[[:alnum:]]*' /proc/cmdline | cut -d= -f 2 | sort -u) GUEST_INTERFACE_DEFAULT=eth1 +elif [ "$VIRT_DRIVER" = 'baremetal' ]; then + PUBLIC_INTERFACE_DEFAULT=eth0 + FLAT_NETWORK_BRIDGE_DEFAULT=br100 + FLAT_INTERFACE=${FLAT_INTERFACE:-eth0} + FORCE_DHCP_RELEASE=${FORCE_DHCP_RELEASE:-False} + NET_MAN=${NET_MAN:-FlatManager} + STUB_NETWORK=${STUB_NETWORK:-False} else PUBLIC_INTERFACE_DEFAULT=br100 FLAT_NETWORK_BRIDGE_DEFAULT=br100 @@ -430,6 +381,7 @@ NET_MAN=${NET_MAN:-FlatDHCPManager} EC2_DMZ_HOST=${EC2_DMZ_HOST:-$SERVICE_HOST} FLAT_NETWORK_BRIDGE=${FLAT_NETWORK_BRIDGE:-$FLAT_NETWORK_BRIDGE_DEFAULT} VLAN_INTERFACE=${VLAN_INTERFACE:-$GUEST_INTERFACE_DEFAULT} +FORCE_DHCP_RELEASE=${FORCE_DHCP_RELEASE:-True} # Test floating pool and range are used for testing. They are defined # here until the admin APIs can replace nova-manage @@ -459,26 +411,6 @@ FLAT_INTERFACE=${FLAT_INTERFACE-$GUEST_INTERFACE_DEFAULT} ## FIXME(ja): should/can we check that FLAT_INTERFACE is sane? -# Quantum Networking -# ------------------ - -# Make sure that quantum is enabled in ENABLED_SERVICES. If you want -# to run Quantum on this host, make sure that q-svc is also in -# ENABLED_SERVICES. -# -# If you're planning to use the Quantum openvswitch plugin, set -# Q_PLUGIN to "openvswitch" and make sure the q-agt service is enabled -# in ENABLED_SERVICES. If you're planning to use the Quantum -# linuxbridge plugin, set Q_PLUGIN to "linuxbridge" and make sure the -# q-agt service is enabled in ENABLED_SERVICES. -# -# See "Quantum Network Configuration" below for additional variables -# that must be set in localrc for connectivity across hosts with -# Quantum. -# -# With Quantum networking the NET_MAN variable is ignored. - - # Database Configuration # ---------------------- @@ -502,41 +434,6 @@ if is_service_enabled rabbit; then read_password RABBIT_PASSWORD "ENTER A PASSWORD TO USE FOR RABBIT." fi - -# Swift -# ----- - -# TODO: add logging to different location. - -# Set ``SWIFT_DATA_DIR`` to the location of swift drives and objects. -# Default is the common DevStack data directory. -SWIFT_DATA_DIR=${SWIFT_DATA_DIR:-${DATA_DIR}/swift} - -# Set ``SWIFT_CONFIG_DIR`` to the location of the configuration files. -# Default is ``/etc/swift``. -SWIFT_CONFIG_DIR=${SWIFT_CONFIG_DIR:-/etc/swift} - -# DevStack will create a loop-back disk formatted as XFS to store the -# swift data. Set ``SWIFT_LOOPBACK_DISK_SIZE`` to the disk size in bytes. -# Default is 1 gigabyte. -SWIFT_LOOPBACK_DISK_SIZE=${SWIFT_LOOPBACK_DISK_SIZE:-1000000} - -# The ring uses a configurable number of bits from a path’s MD5 hash as -# a partition index that designates a device. The number of bits kept -# from the hash is known as the partition power, and 2 to the partition -# power indicates the partition count. Partitioning the full MD5 hash -# ring allows other parts of the cluster to work in batches of items at -# once which ends up either more efficient or at least less complex than -# working with each item separately or the entire cluster all at once. -# By default we define 9 for the partition count (which mean 512). -SWIFT_PARTITION_POWER_SIZE=${SWIFT_PARTITION_POWER_SIZE:-9} - -# Set ``SWIFT_REPLICAS`` to configure how many replicas are to be -# configured for your Swift cluster. By default the three replicas would need a -# bit of IO and Memory on a VM you may want to lower that to 1 if you want to do -# only some quick testing. -SWIFT_REPLICAS=${SWIFT_REPLICAS:-3} - if is_service_enabled swift; then # If we are using swift3, we can default the s3 port to swift instead # of nova-objectstore @@ -563,18 +460,23 @@ read_password SERVICE_TOKEN "ENTER A SERVICE_TOKEN TO USE FOR THE SERVICE ADMIN read_password SERVICE_PASSWORD "ENTER A SERVICE_PASSWORD TO USE FOR THE SERVICE AUTHENTICATION." # Horizon currently truncates usernames and passwords at 20 characters read_password ADMIN_PASSWORD "ENTER A PASSWORD TO USE FOR HORIZON AND KEYSTONE (20 CHARS OR LESS)." - -# Set the tenant for service accounts in Keystone -SERVICE_TENANT_NAME=${SERVICE_TENANT_NAME:-service} +# Keystone can now optionally install OpenLDAP by adding ldap to the list +# of enabled services in the localrc file (e.g. ENABLED_SERVICES=key,ldap). +# If OpenLDAP has already been installed but you need to clear out +# the Keystone contents of LDAP set KEYSTONE_CLEAR_LDAP to yes +# (e.g. KEYSTONE_CLEAR_LDAP=yes ) in the localrc file. To enable the +# Keystone Identity Driver (keystone.identity.backends.ldap.Identity) +# set KEYSTONE_IDENTITY_BACKEND to ldap (e.g. KEYSTONE_IDENTITY_BACKEND=ldap) +# in the localrc file. -# Horizon -# ------- +# only request ldap password if the service is enabled +if is_service_enabled ldap; then + read_password LDAP_PASSWORD "ENTER A PASSWORD TO USE FOR LDAP" +fi -# Allow overriding the default Apache user and group, default both to -# current user. -APACHE_USER=${APACHE_USER:-$USER} -APACHE_GROUP=${APACHE_GROUP:-$APACHE_USER} +# Set the tenant for service accounts in Keystone +SERVICE_TENANT_NAME=${SERVICE_TENANT_NAME:-service} # Log files @@ -620,9 +522,9 @@ function echo_nolog() { # Set ``LOGFILE`` to turn on logging # Append '.xxxxxxxx' to the given name to maintain history # where 'xxxxxxxx' is a representation of the date the file was created +TIMESTAMP_FORMAT=${TIMESTAMP_FORMAT:-"%F-%H%M%S"} if [[ -n "$LOGFILE" || -n "$SCREEN_LOGDIR" ]]; then LOGDAYS=${LOGDAYS:-7} - TIMESTAMP_FORMAT=${TIMESTAMP_FORMAT:-"%F-%H%M%S"} CURRENT_LOG_TIME=$(date "+$TIMESTAMP_FORMAT") fi @@ -643,7 +545,15 @@ if [[ -n "$LOGFILE" ]]; then exec 3>&1 if [[ "$VERBOSE" == "True" ]]; then # Redirect stdout/stderr to tee to write the log file - exec 1> >( tee "${LOGFILE}" ) 2>&1 + exec 1> >( awk ' + { + cmd ="date +\"%Y-%m-%d %H:%M:%S \"" + cmd | getline now + close("date +\"%Y-%m-%d %H:%M:%S \"") + sub(/^/, now) + print + fflush() + }' | tee "${LOGFILE}" ) 2>&1 # Set up a second fd for output exec 6> >( tee "${SUMFILE}" ) else @@ -720,73 +630,18 @@ set -o xtrace # OpenStack uses a fair number of other projects. # Install package requirements +# Source it so the entire environment is available echo_summary "Installing package prerequisites" -if [[ "$os_PACKAGE" = "deb" ]]; then - install_package $(get_packages $FILES/apts) -else - install_package $(get_packages $FILES/rpms) -fi - -if [[ $SYSLOG != "False" ]]; then - install_package rsyslog-relp -fi +source $TOP_DIR/tools/install_prereqs.sh -if is_service_enabled rabbit; then - # Install rabbitmq-server - # the temp file is necessary due to LP: #878600 - tfile=$(mktemp) - install_package rabbitmq-server > "$tfile" 2>&1 - cat "$tfile" - rm -f "$tfile" -elif is_service_enabled qpid; then - if [[ "$os_PACKAGE" = "rpm" ]]; then - install_package qpid-cpp-server-daemon - else - install_package qpidd - fi -elif is_service_enabled zeromq; then - if [[ "$os_PACKAGE" = "rpm" ]]; then - install_package zeromq python-zmq - else - install_package libzmq1 python-zmq - fi -fi +install_rpc_backend if is_service_enabled $DATABASE_BACKENDS; then install_database fi -if is_service_enabled horizon; then - if [[ "$os_PACKAGE" = "deb" ]]; then - # Install apache2, which is NOPRIME'd - install_package apache2 libapache2-mod-wsgi - else - sudo rm -f /etc/httpd/conf.d/000-* - install_package httpd mod_wsgi - fi -fi - if is_service_enabled q-agt; then - if is_quantum_ovs_base_plugin "$Q_PLUGIN"; then - # Install deps - # FIXME add to ``files/apts/quantum``, but don't install if not needed! - if [[ "$os_PACKAGE" = "deb" ]]; then - kernel_version=`cat /proc/version | cut -d " " -f3` - install_package make fakeroot dkms openvswitch-switch openvswitch-datapath-dkms linux-headers-$kernel_version - else - ### FIXME(dtroyer): Find RPMs for OpenVSwitch - echo "OpenVSwitch packages need to be located" - # Fedora does not started OVS by default - restart_service openvswitch - fi - elif [[ "$Q_PLUGIN" = "linuxbridge" ]]; then - install_package bridge-utils - fi -fi - -if is_service_enabled swift; then - # Install memcached for swift. - install_package memcached + install_quantum_agent_packages fi TRACK_DEPENDS=${TRACK_DEPENDS:-False} @@ -802,10 +657,6 @@ if [[ $TRACK_DEPENDS = True ]] ; then $DEST/.venv/bin/pip freeze > $DEST/requires-pre-pip fi -# Install python requirements -echo_summary "Installing Python prerequisites" -pip_install $(get_packages $FILES/pips | sort -u) - # Check Out Source # ---------------- @@ -816,7 +667,6 @@ echo_summary "Installing OpenStack project source" install_keystoneclient install_glanceclient install_novaclient - # Check out the client libs that are used most git_clone $OPENSTACKCLIENT_REPO $OPENSTACKCLIENT_DIR $OPENSTACKCLIENT_BRANCH @@ -825,16 +675,16 @@ if is_service_enabled key g-api n-api swift; then # unified auth system (manages accounts/tokens) install_keystone fi + if is_service_enabled swift; then - # storage service - git_clone $SWIFT_REPO $SWIFT_DIR $SWIFT_BRANCH - # storage service client and and Library - git_clone $SWIFTCLIENT_REPO $SWIFTCLIENT_DIR $SWIFTCLIENT_BRANCH + install_swiftclient + install_swift if is_service_enabled swift3; then # swift3 middleware to provide S3 emulation to Swift git_clone $SWIFT3_REPO $SWIFT3_DIR $SWIFT3_BRANCH fi fi + if is_service_enabled g-api n-api; then # image catalog service install_glance @@ -847,16 +697,18 @@ if is_service_enabled n-novnc; then # a websockets/html5 or flash powered VNC console for vm instances git_clone $NOVNC_REPO $NOVNC_DIR $NOVNC_BRANCH fi -if is_service_enabled horizon; then - # django powered web control panel for openstack - git_clone $HORIZON_REPO $HORIZON_DIR $HORIZON_BRANCH $HORIZON_TAG +if is_service_enabled n-spice; then + # a websockets/html5 or flash powered SPICE console for vm instances + git_clone $SPICE_REPO $SPICE_DIR $SPICE_BRANCH fi -if is_service_enabled quantum; then - git_clone $QUANTUM_CLIENT_REPO $QUANTUM_CLIENT_DIR $QUANTUM_CLIENT_BRANCH +if is_service_enabled horizon; then + # dashboard + install_horizon fi if is_service_enabled quantum; then - # quantum - git_clone $QUANTUM_REPO $QUANTUM_DIR $QUANTUM_BRANCH + install_quantum + install_quantumclient + install_quantum_third_party fi if is_service_enabled heat; then install_heat @@ -866,14 +718,9 @@ if is_service_enabled cinder; then install_cinder fi if is_service_enabled ceilometer; then + install_ceilometerclient install_ceilometer fi -if is_service_enabled tempest; then - install_tempest -fi -if is_service_enabled ryu || (is_service_enabled quantum && [[ "$Q_PLUGIN" = "ryu" ]]); then - git_clone $RYU_REPO $RYU_DIR $RYU_BRANCH -fi # Initialization @@ -890,11 +737,11 @@ if is_service_enabled key g-api n-api swift; then configure_keystone fi if is_service_enabled swift; then - setup_develop $SWIFT_DIR - setup_develop $SWIFTCLIENT_DIR -fi -if is_service_enabled swift3; then - setup_develop $SWIFT3_DIR + configure_swift + configure_swiftclient + if is_service_enabled swift3; then + setup_develop $SWIFT3_DIR + fi fi if is_service_enabled g-api n-api; then configure_glance @@ -908,11 +755,11 @@ if is_service_enabled nova; then configure_nova fi if is_service_enabled horizon; then - setup_develop $HORIZON_DIR + configure_horizon fi if is_service_enabled quantum; then - setup_develop $QUANTUM_CLIENT_DIR - setup_develop $QUANTUM_DIR + setup_quantumclient + setup_quantum fi if is_service_enabled heat; then configure_heat @@ -921,9 +768,6 @@ fi if is_service_enabled cinder; then configure_cinder fi -if is_service_enabled ryu || (is_service_enabled quantum && [[ "$Q_PLUGIN" = "ryu" ]]); then - setup_develop $RYU_DIR -fi if [[ $TRACK_DEPENDS = True ]] ; then $DEST/.venv/bin/pip freeze > $DEST/requires-post-pip @@ -934,6 +778,13 @@ if [[ $TRACK_DEPENDS = True ]] ; then exit 0 fi +if is_service_enabled tls-proxy; then + configure_CA + init_CA + init_cert + # Add name to /etc/hosts + # don't be naive and add to existing line! +fi # Syslog # ------ @@ -960,20 +811,7 @@ fi # Finalize queue installation # ---------------------------- - -if is_service_enabled rabbit; then - # Start rabbitmq-server - echo_summary "Starting RabbitMQ" - if [[ "$os_PACKAGE" = "rpm" ]]; then - # RPM doesn't start the service - restart_service rabbitmq-server - fi - # change the rabbit password since the default is "guest" - sudo rabbitmqctl change_password guest $RABBIT_PASSWORD -elif is_service_enabled qpid; then - echo_summary "Starting qpid" - restart_service qpidd -fi +restart_rpc_backend # Configure database @@ -1004,24 +842,35 @@ sleep 1 # Set a reasonable status bar screen -r $SCREEN_NAME -X hardstatus alwayslastline "$SCREEN_HARDSTATUS" +# Initialize the directory for service status check +init_service_check # Keystone # -------- if is_service_enabled key; then echo_summary "Starting Keystone" - configure_keystone init_keystone start_keystone - echo "Waiting for keystone to start..." - if ! timeout $SERVICE_TIMEOUT sh -c "while ! http_proxy= curl -s $KEYSTONE_AUTH_PROTOCOL://$SERVICE_HOST:$KEYSTONE_API_PORT/v2.0/ >/dev/null; do sleep 1; done"; then - echo "keystone did not start" - exit 1 + + # Set up a temporary admin URI for Keystone + SERVICE_ENDPOINT=$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_AUTH_HOST:$KEYSTONE_AUTH_PORT/v2.0 + + if is_service_enabled tls-proxy; then + export OS_CACERT=$INT_CA_DIR/ca-chain.pem + # Until the client support is fixed, just use the internal endpoint + SERVICE_ENDPOINT=http://$KEYSTONE_AUTH_HOST:$KEYSTONE_AUTH_PORT_INT/v2.0 fi - # ``keystone_data.sh`` creates services, admin and demo users, and roles. - SERVICE_ENDPOINT=$KEYSTONE_AUTH_PROTOCOL://$KEYSTONE_AUTH_HOST:$KEYSTONE_AUTH_PORT/v2.0 + # Do the keystone-specific bits from keystone_data.sh + export OS_SERVICE_TOKEN=$SERVICE_TOKEN + export OS_SERVICE_ENDPOINT=$SERVICE_ENDPOINT + create_keystone_accounts + create_nova_accounts + create_cinder_accounts + create_quantum_accounts + # ``keystone_data.sh`` creates services, admin and demo users, and roles. ADMIN_PASSWORD=$ADMIN_PASSWORD SERVICE_TENANT_NAME=$SERVICE_TENANT_NAME SERVICE_PASSWORD=$SERVICE_PASSWORD \ SERVICE_TOKEN=$SERVICE_TOKEN SERVICE_ENDPOINT=$SERVICE_ENDPOINT SERVICE_HOST=$SERVICE_HOST \ S3_SERVICE_PORT=$S3_SERVICE_PORT KEYSTONE_CATALOG_BACKEND=$KEYSTONE_CATALOG_BACKEND \ @@ -1034,6 +883,7 @@ if is_service_enabled key; then export OS_TENANT_NAME=admin export OS_USERNAME=admin export OS_PASSWORD=$ADMIN_PASSWORD + unset OS_SERVICE_TOKEN OS_SERVICE_ENDPOINT fi @@ -1044,48 +894,8 @@ fi if is_service_enabled horizon; then echo_summary "Configuring and starting Horizon" - - # Remove stale session database. - rm -f $HORIZON_DIR/openstack_dashboard/local/dashboard_openstack.sqlite3 - - # ``local_settings.py`` is used to override horizon default settings. - local_settings=$HORIZON_DIR/openstack_dashboard/local/local_settings.py - cp $FILES/horizon_settings.py $local_settings - - # Initialize the horizon database (it stores sessions and notices shown to - # users). The user system is external (keystone). - cd $HORIZON_DIR - python manage.py syncdb --noinput - cd $TOP_DIR - - # Create an empty directory that apache uses as docroot - sudo mkdir -p $HORIZON_DIR/.blackhole - - if [[ "$os_PACKAGE" = "deb" ]]; then - APACHE_NAME=apache2 - APACHE_CONF=sites-available/horizon - # Clean up the old config name - sudo rm -f /etc/apache2/sites-enabled/000-default - # Be a good citizen and use the distro tools here - sudo touch /etc/$APACHE_NAME/$APACHE_CONF - sudo a2ensite horizon - else - # Install httpd, which is NOPRIME'd - APACHE_NAME=httpd - APACHE_CONF=conf.d/horizon.conf - sudo sed '/^Listen/s/^.*$/Listen 0.0.0.0:80/' -i /etc/httpd/conf/httpd.conf - fi - - # Configure apache to run horizon - sudo sh -c "sed -e \" - s,%USER%,$APACHE_USER,g; - s,%GROUP%,$APACHE_GROUP,g; - s,%HORIZON_DIR%,$HORIZON_DIR,g; - s,%APACHE_NAME%,$APACHE_NAME,g; - s,%DEST%,$DEST,g; - \" $FILES/apache-horizon.template >/etc/$APACHE_NAME/$APACHE_CONF" - - restart_service $APACHE_NAME + init_horizon + start_horizon fi @@ -1108,363 +918,22 @@ if is_service_enabled g-reg; then fi -# Ryu -# --- - -# Ryu is not a part of OpenStack project. Please ignore following block if -# you are not interested in Ryu. -# launch ryu manager -if is_service_enabled ryu; then - RYU_CONF_DIR=/etc/ryu - if [[ ! -d $RYU_CONF_DIR ]]; then - sudo mkdir -p $RYU_CONF_DIR - fi - sudo chown `whoami` $RYU_CONF_DIR - RYU_CONF=$RYU_CONF_DIR/ryu.conf - sudo rm -rf $RYU_CONF - - cat < $RYU_CONF ---app_lists=$RYU_APPS ---wsapi_host=$RYU_API_HOST ---wsapi_port=$RYU_API_PORT ---ofp_listen_host=$RYU_OFP_HOST ---ofp_tcp_listen_port=$RYU_OFP_PORT -EOF - screen_it ryu "cd $RYU_DIR && $RYU_DIR/bin/ryu-manager --flagfile $RYU_CONF" -fi - - # Quantum # ------- -# Quantum Network Configuration if is_service_enabled quantum; then echo_summary "Configuring Quantum" - # The following variables control the Quantum openvswitch and - # linuxbridge plugins' allocation of tenant networks and - # availability of provider networks. If these are not configured - # in localrc, tenant networks will be local to the host (with no - # remote connectivity), and no physical resources will be - # available for the allocation of provider networks. - - # To use GRE tunnels for tenant networks, set to True in - # localrc. GRE tunnels are only supported by the openvswitch - # plugin, and currently only on Ubuntu. - ENABLE_TENANT_TUNNELS=${ENABLE_TENANT_TUNNELS:-False} - - # If using GRE tunnels for tenant networks, specify the range of - # tunnel IDs from which tenant networks are allocated. Can be - # overriden in localrc in necesssary. - TENANT_TUNNEL_RANGES=${TENANT_TUNNEL_RANGE:-1:1000} - - # To use VLANs for tenant networks, set to True in localrc. VLANs - # are supported by the openvswitch and linuxbridge plugins, each - # requiring additional configuration described below. - ENABLE_TENANT_VLANS=${ENABLE_TENANT_VLANS:-False} - - # If using VLANs for tenant networks, set in localrc to specify - # the range of VLAN VIDs from which tenant networks are - # allocated. An external network switch must be configured to - # trunk these VLANs between hosts for multi-host connectivity. - # - # Example: ``TENANT_VLAN_RANGE=1000:1999`` - TENANT_VLAN_RANGE=${TENANT_VLAN_RANGE:-} - - # If using VLANs for tenant networks, or if using flat or VLAN - # provider networks, set in localrc to the name of the physical - # network, and also configure OVS_PHYSICAL_BRIDGE for the - # openvswitch agent or LB_PHYSICAL_INTERFACE for the linuxbridge - # agent, as described below. - # - # Example: ``PHYSICAL_NETWORK=default`` - PHYSICAL_NETWORK=${PHYSICAL_NETWORK:-} - - # With the openvswitch plugin, if using VLANs for tenant networks, - # or if using flat or VLAN provider networks, set in localrc to - # the name of the OVS bridge to use for the physical network. The - # bridge will be created if it does not already exist, but a - # physical interface must be manually added to the bridge as a - # port for external connectivity. - # - # Example: ``OVS_PHYSICAL_BRIDGE=br-eth1`` - OVS_PHYSICAL_BRIDGE=${OVS_PHYSICAL_BRIDGE:-} - - # With the linuxbridge plugin, if using VLANs for tenant networks, - # or if using flat or VLAN provider networks, set in localrc to - # the name of the network interface to use for the physical - # network. - # - # Example: ``LB_PHYSICAL_INTERFACE=eth1`` - LB_PHYSICAL_INTERFACE=${LB_PHYSICAL_INTERFACE:-} - - # With the openvswitch plugin, set to True in localrc to enable - # provider GRE tunnels when ``ENABLE_TENANT_TUNNELS`` is False. - # - # Example: ``OVS_ENABLE_TUNNELING=True`` - OVS_ENABLE_TUNNELING=${OVS_ENABLE_TUNNELING:-$ENABLE_TENANT_TUNNELS} - - # Put config files in ``/etc/quantum`` for everyone to find - if [[ ! -d /etc/quantum ]]; then - sudo mkdir -p /etc/quantum - fi - sudo chown `whoami` /etc/quantum - - if [[ "$Q_PLUGIN" = "openvswitch" ]]; then - Q_PLUGIN_CONF_PATH=etc/quantum/plugins/openvswitch - Q_PLUGIN_CONF_FILENAME=ovs_quantum_plugin.ini - Q_DB_NAME="ovs_quantum" - Q_PLUGIN_CLASS="quantum.plugins.openvswitch.ovs_quantum_plugin.OVSQuantumPluginV2" - elif [[ "$Q_PLUGIN" = "linuxbridge" ]]; then - Q_PLUGIN_CONF_PATH=etc/quantum/plugins/linuxbridge - Q_PLUGIN_CONF_FILENAME=linuxbridge_conf.ini - Q_DB_NAME="quantum_linux_bridge" - Q_PLUGIN_CLASS="quantum.plugins.linuxbridge.lb_quantum_plugin.LinuxBridgePluginV2" - elif [[ "$Q_PLUGIN" = "ryu" ]]; then - Q_PLUGIN_CONF_PATH=etc/quantum/plugins/ryu - Q_PLUGIN_CONF_FILENAME=ryu.ini - Q_DB_NAME="ovs_quantum" - Q_PLUGIN_CLASS="quantum.plugins.ryu.ryu_quantum_plugin.RyuQuantumPluginV2" - fi - - if [[ $Q_PLUGIN_CONF_PATH == '' || $Q_PLUGIN_CONF_FILENAME == '' || $Q_PLUGIN_CLASS == '' ]]; then - echo "Quantum plugin not set.. exiting" - exit 1 - fi - - # If needed, move config file from ``$QUANTUM_DIR/etc/quantum`` to ``/etc/quantum`` - mkdir -p /$Q_PLUGIN_CONF_PATH - Q_PLUGIN_CONF_FILE=$Q_PLUGIN_CONF_PATH/$Q_PLUGIN_CONF_FILENAME - cp $QUANTUM_DIR/$Q_PLUGIN_CONF_FILE /$Q_PLUGIN_CONF_FILE - - database_connection_url dburl $Q_DB_NAME - iniset /$Q_PLUGIN_CONF_FILE DATABASE sql_connection $dburl - unset dburl - - Q_CONF_FILE=/etc/quantum/quantum.conf - cp $QUANTUM_DIR/etc/quantum.conf $Q_CONF_FILE - Q_RR_CONF_FILE=/etc/quantum/rootwrap.conf - if [[ "$Q_USE_ROOTWRAP" == "False" ]]; then - Q_RR_COMMAND="sudo" - else - Q_RR_COMMAND="sudo $QUANTUM_DIR/bin/quantum-rootwrap $Q_RR_CONF_FILE" - fi - cp -p $QUANTUM_DIR/etc/rootwrap.conf $Q_RR_CONF_FILE - - # Copy over the config and filter bits - Q_CONF_ROOTWRAP_D=/etc/quantum/rootwrap.d - mkdir -p $Q_CONF_ROOTWRAP_D - cp -pr $QUANTUM_DIR/etc/quantum/rootwrap.d/* $Q_CONF_ROOTWRAP_D/ -fi - -# Quantum service (for controller node) -if is_service_enabled q-svc; then - Q_API_PASTE_FILE=/etc/quantum/api-paste.ini - Q_POLICY_FILE=/etc/quantum/policy.json - - cp $QUANTUM_DIR/etc/api-paste.ini $Q_API_PASTE_FILE - cp $QUANTUM_DIR/etc/policy.json $Q_POLICY_FILE - - if is_service_enabled $DATABASE_BACKENDS; then - recreate_database $Q_DB_NAME utf8 - else - echo "A database must be enabled in order to use the $Q_PLUGIN Quantum plugin." - exit 1 - fi - - # Update either configuration file with plugin - iniset $Q_CONF_FILE DEFAULT core_plugin $Q_PLUGIN_CLASS - - iniset $Q_CONF_FILE DEFAULT auth_strategy $Q_AUTH_STRATEGY - quantum_setup_keystone $Q_API_PASTE_FILE filter:authtoken - - # Configure plugin - if [[ "$Q_PLUGIN" = "openvswitch" ]]; then - if [[ "$ENABLE_TENANT_TUNNELS" = "True" ]]; then - iniset /$Q_PLUGIN_CONF_FILE OVS tenant_network_type gre - iniset /$Q_PLUGIN_CONF_FILE OVS tunnel_id_ranges $TENANT_TUNNEL_RANGES - elif [[ "$ENABLE_TENANT_VLANS" = "True" ]]; then - iniset /$Q_PLUGIN_CONF_FILE OVS tenant_network_type vlan - else - echo "WARNING - The openvswitch plugin is using local tenant networks, with no connectivity between hosts." - fi - - # Override ``OVS_VLAN_RANGES`` and ``OVS_BRIDGE_MAPPINGS`` in ``localrc`` - # for more complex physical network configurations. - if [[ "$OVS_VLAN_RANGES" = "" ]] && [[ "$PHYSICAL_NETWORK" != "" ]]; then - OVS_VLAN_RANGES=$PHYSICAL_NETWORK - if [[ "$TENANT_VLAN_RANGE" != "" ]]; then - OVS_VLAN_RANGES=$OVS_VLAN_RANGES:$TENANT_VLAN_RANGE - fi - fi - if [[ "$OVS_VLAN_RANGES" != "" ]]; then - iniset /$Q_PLUGIN_CONF_FILE OVS network_vlan_ranges $OVS_VLAN_RANGES - fi - - # Enable tunnel networks if selected - if [[ $OVS_ENABLE_TUNNELING = "True" ]]; then - iniset /$Q_PLUGIN_CONF_FILE OVS enable_tunneling True - fi - elif [[ "$Q_PLUGIN" = "linuxbridge" ]]; then - if [[ "$ENABLE_TENANT_VLANS" = "True" ]]; then - iniset /$Q_PLUGIN_CONF_FILE VLANS tenant_network_type vlan - else - echo "WARNING - The linuxbridge plugin is using local tenant networks, with no connectivity between hosts." - fi - - # Override ``LB_VLAN_RANGES`` and ``LB_INTERFACE_MAPPINGS`` in ``localrc`` - # for more complex physical network configurations. - if [[ "$LB_VLAN_RANGES" = "" ]] && [[ "$PHYSICAL_NETWORK" != "" ]]; then - LB_VLAN_RANGES=$PHYSICAL_NETWORK - if [[ "$TENANT_VLAN_RANGE" != "" ]]; then - LB_VLAN_RANGES=$LB_VLAN_RANGES:$TENANT_VLAN_RANGE - fi - fi - if [[ "$LB_VLAN_RANGES" != "" ]]; then - iniset /$Q_PLUGIN_CONF_FILE VLANS network_vlan_ranges $LB_VLAN_RANGES - fi - elif [[ "$Q_PLUGIN" = "ryu" ]]; then - iniset /$Q_PLUGIN_CONF_FILE OVS openflow_controller $RYU_OFP_HOST:$RYU_OFP_PORT - iniset /$Q_PLUGIN_CONF_FILE OVS openflow_rest_api $RYU_API_HOST:$RYU_API_PORT - fi -fi - -# Quantum agent (for compute nodes) -if is_service_enabled q-agt; then - # Configure agent for plugin - if [[ "$Q_PLUGIN" = "openvswitch" ]]; then - # Setup integration bridge - OVS_BRIDGE=${OVS_BRIDGE:-br-int} - quantum_setup_ovs_bridge $OVS_BRIDGE - - # Setup agent for tunneling - if [[ "$OVS_ENABLE_TUNNELING" = "True" ]]; then - # Verify tunnels are supported - # REVISIT - also check kernel module support for GRE and patch ports - OVS_VERSION=`ovs-vsctl --version | head -n 1 | awk '{print $4;}'` - if [ $OVS_VERSION \< "1.4" ] && ! is_service_enabled q-svc ; then - echo "You are running OVS version $OVS_VERSION." - echo "OVS 1.4+ is required for tunneling between multiple hosts." - exit 1 - fi - iniset /$Q_PLUGIN_CONF_FILE OVS enable_tunneling True - iniset /$Q_PLUGIN_CONF_FILE OVS local_ip $HOST_IP - fi - - # Setup physical network bridge mappings. Override - # ``OVS_VLAN_RANGES`` and ``OVS_BRIDGE_MAPPINGS`` in ``localrc`` for more - # complex physical network configurations. - if [[ "$OVS_BRIDGE_MAPPINGS" = "" ]] && [[ "$PHYSICAL_NETWORK" != "" ]] && [[ "$OVS_PHYSICAL_BRIDGE" != "" ]]; then - OVS_BRIDGE_MAPPINGS=$PHYSICAL_NETWORK:$OVS_PHYSICAL_BRIDGE - - # Configure bridge manually with physical interface as port for multi-node - sudo ovs-vsctl --no-wait -- --may-exist add-br $OVS_PHYSICAL_BRIDGE - fi - if [[ "$OVS_BRIDGE_MAPPINGS" != "" ]]; then - iniset /$Q_PLUGIN_CONF_FILE OVS bridge_mappings $OVS_BRIDGE_MAPPINGS - fi - AGENT_BINARY="$QUANTUM_DIR/bin/quantum-openvswitch-agent" - elif [[ "$Q_PLUGIN" = "linuxbridge" ]]; then - # Setup physical network interface mappings. Override - # ``LB_VLAN_RANGES`` and ``LB_INTERFACE_MAPPINGS`` in ``localrc`` for more - # complex physical network configurations. - if [[ "$LB_INTERFACE_MAPPINGS" = "" ]] && [[ "$PHYSICAL_NETWORK" != "" ]] && [[ "$LB_PHYSICAL_INTERFACE" != "" ]]; then - LB_INTERFACE_MAPPINGS=$PHYSICAL_NETWORK:$LB_PHYSICAL_INTERFACE - fi - if [[ "$LB_INTERFACE_MAPPINGS" != "" ]]; then - iniset /$Q_PLUGIN_CONF_FILE LINUX_BRIDGE physical_interface_mappings $LB_INTERFACE_MAPPINGS - fi - AGENT_BINARY="$QUANTUM_DIR/bin/quantum-linuxbridge-agent" - elif [[ "$Q_PLUGIN" = "ryu" ]]; then - # Set up integration bridge - OVS_BRIDGE=${OVS_BRIDGE:-br-int} - quantum_setup_ovs_bridge $OVS_BRIDGE - if [ -n "$RYU_INTERNAL_INTERFACE" ]; then - sudo ovs-vsctl --no-wait -- --may-exist add-port $OVS_BRIDGE $RYU_INTERNAL_INTERFACE - fi - AGENT_BINARY="$QUANTUM_DIR/quantum/plugins/ryu/agent/ryu_quantum_agent.py" - fi - # Update config w/rootwrap - iniset /$Q_PLUGIN_CONF_FILE AGENT root_helper "$Q_RR_COMMAND" -fi - -# Quantum DHCP -if is_service_enabled q-dhcp; then - AGENT_DHCP_BINARY="$QUANTUM_DIR/bin/quantum-dhcp-agent" - - Q_DHCP_CONF_FILE=/etc/quantum/dhcp_agent.ini - - cp $QUANTUM_DIR/etc/dhcp_agent.ini $Q_DHCP_CONF_FILE - - # Set verbose - iniset $Q_DHCP_CONF_FILE DEFAULT verbose True - # Set debug - iniset $Q_DHCP_CONF_FILE DEFAULT debug True - iniset $Q_DHCP_CONF_FILE DEFAULT use_namespaces $Q_USE_NAMESPACE - iniset $Q_DHCP_CONF_FILE DEFAULT state_path $DATA_DIR/quantum - - quantum_setup_keystone $Q_DHCP_CONF_FILE DEFAULT set_auth_url - - # Update config w/rootwrap - iniset $Q_DHCP_CONF_FILE DEFAULT root_helper "$Q_RR_COMMAND" - - if [[ "$Q_PLUGIN" = "openvswitch" ]]; then - iniset $Q_DHCP_CONF_FILE DEFAULT interface_driver quantum.agent.linux.interface.OVSInterfaceDriver - elif [[ "$Q_PLUGIN" = "linuxbridge" ]]; then - iniset $Q_DHCP_CONF_FILE DEFAULT interface_driver quantum.agent.linux.interface.BridgeInterfaceDriver - elif [[ "$Q_PLUGIN" = "ryu" ]]; then - iniset $Q_DHCP_CONF_FILE DEFAULT interface_driver quantum.agent.linux.interface.RyuInterfaceDriver - iniset $Q_DHCP_CONF_FILE DEFAULT ryu_api_host $RYU_API_HOST:$RYU_API_PORT - fi -fi - -# Quantum L3 -if is_service_enabled q-l3; then - AGENT_L3_BINARY="$QUANTUM_DIR/bin/quantum-l3-agent" - PUBLIC_BRIDGE=${PUBLIC_BRIDGE:-br-ex} - Q_L3_CONF_FILE=/etc/quantum/l3_agent.ini - - cp $QUANTUM_DIR/etc/l3_agent.ini $Q_L3_CONF_FILE - - # Set verbose - iniset $Q_L3_CONF_FILE DEFAULT verbose True - # Set debug - iniset $Q_L3_CONF_FILE DEFAULT debug True - - iniset $Q_L3_CONF_FILE DEFAULT metadata_ip $Q_META_DATA_IP - iniset $Q_L3_CONF_FILE DEFAULT use_namespaces $Q_USE_NAMESPACE - - iniset $Q_L3_CONF_FILE DEFAULT root_helper "$Q_RR_COMMAND" - - quantum_setup_keystone $Q_L3_CONF_FILE DEFAULT set_auth_url - if [[ "$Q_PLUGIN" == "openvswitch" ]]; then - iniset $Q_L3_CONF_FILE DEFAULT interface_driver quantum.agent.linux.interface.OVSInterfaceDriver - iniset $Q_L3_CONF_FILE DEFAULT external_network_bridge $PUBLIC_BRIDGE - # Set up external bridge - quantum_setup_external_bridge $PUBLIC_BRIDGE - elif [[ "$Q_PLUGIN" = "linuxbridge" ]]; then - iniset $Q_L3_CONF_FILE DEFAULT interface_driver quantum.agent.linux.interface.BridgeInterfaceDriver - iniset $Q_L3_CONF_FILE DEFAULT external_network_bridge '' - elif [[ "$Q_PLUGIN" = "ryu" ]]; then - iniset $Q_L3_CONF_FILE DEFAULT interface_driver quantum.agent.linux.interface.RyuInterfaceDriver - iniset $Q_L3_CONF_FILE DEFAULT external_network_bridge $PUBLIC_BRIDGE - iniset $Q_L3_CONF_FILE DEFAULT ryu_api_host $RYU_API_HOST:$RYU_API_PORT - # Set up external bridge - quantum_setup_external_bridge $PUBLIC_BRIDGE - fi + configure_quantum + init_quantum fi -# Quantum RPC support - must be updated prior to starting any of the services +# Some Quantum plugins require network controllers which are not +# a part of the OpenStack project. Configure and start them. if is_service_enabled quantum; then - iniset $Q_CONF_FILE DEFAULT control_exchange quantum - if is_service_enabled qpid ; then - iniset $Q_CONF_FILE DEFAULT rpc_backend quantum.openstack.common.rpc.impl_qpid - elif is_service_enabled zeromq; then - iniset $Q_CONF_FILE DEFAULT rpc_backend quantum.openstack.common.rpc.impl_zmq - elif [ -n "$RABBIT_HOST" ] && [ -n "$RABBIT_PASSWORD" ]; then - iniset $Q_CONF_FILE DEFAULT rabbit_host $RABBIT_HOST - iniset $Q_CONF_FILE DEFAULT rabbit_password $RABBIT_PASSWORD - fi + configure_quantum_third_party + init_quantum_third_party + start_quantum_third_party fi @@ -1478,11 +947,18 @@ fi if is_service_enabled n-net q-dhcp; then # Delete traces of nova networks from prior runs - sudo killall dnsmasq || true - clean_iptables - rm -rf $NOVA_STATE_PATH/networks - mkdir -p $NOVA_STATE_PATH/networks + # Do not kill any dnsmasq instance spawned by NetworkManager + netman_pid=$(pidof NetworkManager || true) + if [ -z "$netman_pid" ]; then + sudo killall dnsmasq || true + else + sudo ps h -o pid,ppid -C dnsmasq | grep -v $netman_pid | awk '{print $1}' | sudo xargs kill || true + fi + clean_iptables + rm -rf ${NOVA_STATE_PATH}/networks + sudo mkdir -p ${NOVA_STATE_PATH}/networks + sudo chown -R ${USER} ${NOVA_STATE_PATH}/networks # Force IP forwarding on, just on case sudo sysctl -w net.ipv4.ip_forward=1 fi @@ -1493,253 +969,7 @@ fi if is_service_enabled swift; then echo_summary "Configuring Swift" - - # Make sure to kill all swift processes first - swift-init all stop || true - - # First do a bit of setup by creating the directories and - # changing the permissions so we can run it as our user. - - USER_GROUP=$(id -g) - sudo mkdir -p ${SWIFT_DATA_DIR}/drives - sudo chown -R $USER:${USER_GROUP} ${SWIFT_DATA_DIR} - - # Create a loopback disk and format it to XFS. - if [[ -e ${SWIFT_DATA_DIR}/drives/images/swift.img ]]; then - if egrep -q ${SWIFT_DATA_DIR}/drives/sdb1 /proc/mounts; then - sudo umount ${SWIFT_DATA_DIR}/drives/sdb1 - fi - else - mkdir -p ${SWIFT_DATA_DIR}/drives/images - sudo touch ${SWIFT_DATA_DIR}/drives/images/swift.img - sudo chown $USER: ${SWIFT_DATA_DIR}/drives/images/swift.img - - dd if=/dev/zero of=${SWIFT_DATA_DIR}/drives/images/swift.img \ - bs=1024 count=0 seek=${SWIFT_LOOPBACK_DISK_SIZE} - fi - - # Make a fresh XFS filesystem - mkfs.xfs -f -i size=1024 ${SWIFT_DATA_DIR}/drives/images/swift.img - - # Mount the disk with mount options to make it as efficient as possible - mkdir -p ${SWIFT_DATA_DIR}/drives/sdb1 - if ! egrep -q ${SWIFT_DATA_DIR}/drives/sdb1 /proc/mounts; then - sudo mount -t xfs -o loop,noatime,nodiratime,nobarrier,logbufs=8 \ - ${SWIFT_DATA_DIR}/drives/images/swift.img ${SWIFT_DATA_DIR}/drives/sdb1 - fi - - # Create a link to the above mount - for x in $(seq ${SWIFT_REPLICAS}); do - sudo ln -sf ${SWIFT_DATA_DIR}/drives/sdb1/$x ${SWIFT_DATA_DIR}/$x; done - - # Create all of the directories needed to emulate a few different servers - for x in $(seq ${SWIFT_REPLICAS}); do - drive=${SWIFT_DATA_DIR}/drives/sdb1/${x} - node=${SWIFT_DATA_DIR}/${x}/node - node_device=${node}/sdb1 - [[ -d $node ]] && continue - [[ -d $drive ]] && continue - sudo install -o ${USER} -g $USER_GROUP -d $drive - sudo install -o ${USER} -g $USER_GROUP -d $node_device - sudo chown -R $USER: ${node} - done - - sudo mkdir -p ${SWIFT_CONFIG_DIR}/{object,container,account}-server /var/run/swift - sudo chown -R $USER: ${SWIFT_CONFIG_DIR} /var/run/swift - - if [[ "$SWIFT_CONFIG_DIR" != "/etc/swift" ]]; then - # Some swift tools are hard-coded to use ``/etc/swift`` and are apparently not going to be fixed. - # Create a symlink if the config dir is moved - sudo ln -sf ${SWIFT_CONFIG_DIR} /etc/swift - fi - - # Swift use rsync to synchronize between all the different - # partitions (which make more sense when you have a multi-node - # setup) we configure it with our version of rsync. - sed -e " - s/%GROUP%/${USER_GROUP}/; - s/%USER%/$USER/; - s,%SWIFT_DATA_DIR%,$SWIFT_DATA_DIR,; - " $FILES/swift/rsyncd.conf | sudo tee /etc/rsyncd.conf - if [[ "$os_PACKAGE" = "deb" ]]; then - sudo sed -i '/^RSYNC_ENABLE=false/ { s/false/true/ }' /etc/default/rsync - else - sudo sed -i '/disable *= *yes/ { s/yes/no/ }' /etc/xinetd.d/rsync - fi - - if is_service_enabled swift3;then - swift_auth_server="s3token " - fi - - # By default Swift will be installed with the tempauth middleware - # which has some default username and password if you have - # configured keystone it will checkout the directory. - if is_service_enabled key; then - swift_auth_server+="authtoken keystoneauth" - else - swift_auth_server=tempauth - fi - - SWIFT_CONFIG_PROXY_SERVER=${SWIFT_CONFIG_DIR}/proxy-server.conf - cp ${SWIFT_DIR}/etc/proxy-server.conf-sample ${SWIFT_CONFIG_PROXY_SERVER} - - iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT user - iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT user ${USER} - - iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT swift_dir - iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT swift_dir ${SWIFT_CONFIG_DIR} - - iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT workers - iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT workers 1 - - iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT log_level - iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT log_level DEBUG - - iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT bind_port - iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT bind_port ${SWIFT_DEFAULT_BIND_PORT:-8080} - - # Only enable Swift3 if we have it enabled in ENABLED_SERVICES - is_service_enabled swift3 && swift3=swift3 || swift3="" - - iniset ${SWIFT_CONFIG_PROXY_SERVER} pipeline:main pipeline "catch_errors healthcheck cache ratelimit ${swift3} ${swift_auth_server} proxy-logging proxy-server" - - iniset ${SWIFT_CONFIG_PROXY_SERVER} app:proxy-server account_autocreate true - - # Configure Keystone - sed -i '/^# \[filter:authtoken\]/,/^# \[filter:keystoneauth\]$/ s/^#[ \t]*//' ${SWIFT_CONFIG_PROXY_SERVER} - iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken auth_host $KEYSTONE_AUTH_HOST - iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken auth_port $KEYSTONE_AUTH_PORT - iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL - iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/ - iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken admin_tenant_name $SERVICE_TENANT_NAME - iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken admin_user swift - iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken admin_password $SERVICE_PASSWORD - - iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} filter:keystoneauth use - iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} filter:keystoneauth operator_roles - iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:keystoneauth operator_roles "Member, admin" - - if is_service_enabled swift3; then - cat <>${SWIFT_CONFIG_PROXY_SERVER} -# NOTE(chmou): s3token middleware is not updated yet to use only -# username and password. -[filter:s3token] -paste.filter_factory = keystone.middleware.s3_token:filter_factory -auth_port = ${KEYSTONE_AUTH_PORT} -auth_host = ${KEYSTONE_AUTH_HOST} -auth_protocol = ${KEYSTONE_AUTH_PROTOCOL} -auth_token = ${SERVICE_TOKEN} -admin_token = ${SERVICE_TOKEN} - -[filter:swift3] -use = egg:swift3#swift3 -EOF - fi - - cp ${SWIFT_DIR}/etc/swift.conf-sample ${SWIFT_CONFIG_DIR}/swift.conf - iniset ${SWIFT_CONFIG_DIR}/swift.conf swift-hash swift_hash_path_suffix ${SWIFT_HASH} - - # This function generates an object/account/proxy configuration - # emulating 4 nodes on different ports - function generate_swift_configuration() { - local server_type=$1 - local bind_port=$2 - local log_facility=$3 - local node_number - local swift_node_config - - for node_number in $(seq ${SWIFT_REPLICAS}); do - node_path=${SWIFT_DATA_DIR}/${node_number} - swift_node_config=${SWIFT_CONFIG_DIR}/${server_type}-server/${node_number}.conf - - cp ${SWIFT_DIR}/etc/${server_type}-server.conf-sample ${swift_node_config} - - iniuncomment ${swift_node_config} DEFAULT user - iniset ${swift_node_config} DEFAULT user ${USER} - - iniuncomment ${swift_node_config} DEFAULT bind_port - iniset ${swift_node_config} DEFAULT bind_port ${bind_port} - - iniuncomment ${swift_node_config} DEFAULT swift_dir - iniset ${swift_node_config} DEFAULT swift_dir ${SWIFT_CONFIG_DIR} - - iniuncomment ${swift_node_config} DEFAULT devices - iniset ${swift_node_config} DEFAULT devices ${node_path} - - iniuncomment ${swift_node_config} DEFAULT log_facility - iniset ${swift_node_config} DEFAULT log_facility LOG_LOCAL${log_facility} - - iniuncomment ${swift_node_config} DEFAULT mount_check - iniset ${swift_node_config} DEFAULT mount_check false - - iniuncomment ${swift_node_config} ${server_type}-replicator vm_test_mode - iniset ${swift_node_config} ${server_type}-replicator vm_test_mode yes - - bind_port=$(( ${bind_port} + 10 )) - log_facility=$(( ${log_facility} + 1 )) - done - } - generate_swift_configuration object 6010 2 - generate_swift_configuration container 6011 2 - generate_swift_configuration account 6012 2 - - # Specific configuration for swift for rsyslog. See - # ``/etc/rsyslog.d/10-swift.conf`` for more info. - swift_log_dir=${SWIFT_DATA_DIR}/logs - rm -rf ${swift_log_dir} - mkdir -p ${swift_log_dir}/hourly - sudo chown -R $USER:adm ${swift_log_dir} - sed "s,%SWIFT_LOGDIR%,${swift_log_dir}," $FILES/swift/rsyslog.conf | sudo \ - tee /etc/rsyslog.d/10-swift.conf - restart_service rsyslog - - # This is where we create three different rings for swift with - # different object servers binding on different ports. - pushd ${SWIFT_CONFIG_DIR} >/dev/null && { - - rm -f *.builder *.ring.gz backups/*.builder backups/*.ring.gz - - port_number=6010 - swift-ring-builder object.builder create ${SWIFT_PARTITION_POWER_SIZE} ${SWIFT_REPLICAS} 1 - for x in $(seq ${SWIFT_REPLICAS}); do - swift-ring-builder object.builder add z${x}-127.0.0.1:${port_number}/sdb1 1 - port_number=$[port_number + 10] - done - swift-ring-builder object.builder rebalance - - port_number=6011 - swift-ring-builder container.builder create ${SWIFT_PARTITION_POWER_SIZE} ${SWIFT_REPLICAS} 1 - for x in $(seq ${SWIFT_REPLICAS}); do - swift-ring-builder container.builder add z${x}-127.0.0.1:${port_number}/sdb1 1 - port_number=$[port_number + 10] - done - swift-ring-builder container.builder rebalance - - port_number=6012 - swift-ring-builder account.builder create ${SWIFT_PARTITION_POWER_SIZE} ${SWIFT_REPLICAS} 1 - for x in $(seq ${SWIFT_REPLICAS}); do - swift-ring-builder account.builder add z${x}-127.0.0.1:${port_number}/sdb1 1 - port_number=$[port_number + 10] - done - swift-ring-builder account.builder rebalance - - } && popd >/dev/null - - # Start rsync - if [[ "$os_PACKAGE" = "deb" ]]; then - sudo /etc/init.d/rsync restart || : - else - sudo systemctl start xinetd.service - fi - - # First spawn all the swift services then kill the - # proxy service so we can run it in foreground in screen. - # ``swift-init ... {stop|restart}`` exits with '1' if no servers are running, - # ignore it just in case - swift-init all restart || true - swift-init proxy stop || true - - unset s swift_hash swift_auth_server + init_swift fi @@ -1749,9 +979,6 @@ fi if is_service_enabled cinder; then echo_summary "Configuring Cinder" init_cinder -elif is_service_enabled n-vol; then - echo_summary "Configuring Nova volumes" - init_nvol fi if is_service_enabled nova; then @@ -1762,63 +989,52 @@ if is_service_enabled nova; then # Additional Nova configuration that is dependent on other services if is_service_enabled quantum; then - add_nova_opt "network_api_class=nova.network.quantumv2.api.API" - add_nova_opt "quantum_admin_username=$Q_ADMIN_USERNAME" - add_nova_opt "quantum_admin_password=$SERVICE_PASSWORD" - add_nova_opt "quantum_admin_auth_url=$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_AUTH_PORT/v2.0" - add_nova_opt "quantum_auth_strategy=$Q_AUTH_STRATEGY" - add_nova_opt "quantum_admin_tenant_name=$SERVICE_TENANT_NAME" - add_nova_opt "quantum_url=http://$Q_HOST:$Q_PORT" - - if [[ "$Q_PLUGIN" = "openvswitch" ]]; then - NOVA_VIF_DRIVER="nova.virt.libvirt.vif.LibvirtHybridOVSBridgeDriver" - elif [[ "$Q_PLUGIN" = "linuxbridge" ]]; then - NOVA_VIF_DRIVER="nova.virt.libvirt.vif.QuantumLinuxBridgeVIFDriver" - elif [[ "$Q_PLUGIN" = "ryu" ]]; then - NOVA_VIF_DRIVER="quantum.plugins.ryu.nova.vif.LibvirtOpenVswitchOFPRyuDriver" - add_nova_opt "libvirt_ovs_integration_bridge=$OVS_BRIDGE" - add_nova_opt "linuxnet_ovs_ryu_api_host=$RYU_API_HOST:$RYU_API_PORT" - add_nova_opt "libvirt_ovs_ryu_api_host=$RYU_API_HOST:$RYU_API_PORT" - fi - add_nova_opt "libvirt_vif_driver=$NOVA_VIF_DRIVER" - add_nova_opt "linuxnet_interface_driver=$LINUXNET_VIF_DRIVER" + create_nova_conf_quantum elif is_service_enabled n-net; then - add_nova_opt "network_manager=nova.network.manager.$NET_MAN" - add_nova_opt "public_interface=$PUBLIC_INTERFACE" - add_nova_opt "vlan_interface=$VLAN_INTERFACE" - add_nova_opt "flat_network_bridge=$FLAT_NETWORK_BRIDGE" - if [ -n "$FLAT_INTERFACE" ]; then - add_nova_opt "flat_interface=$FLAT_INTERFACE" - fi + create_nova_conf_nova_network fi # All nova-compute workers need to know the vnc configuration options # These settings don't hurt anything if n-xvnc and n-novnc are disabled if is_service_enabled n-cpu; then NOVNCPROXY_URL=${NOVNCPROXY_URL:-"http://$SERVICE_HOST:6080/vnc_auto.html"} - add_nova_opt "novncproxy_base_url=$NOVNCPROXY_URL" + iniset $NOVA_CONF DEFAULT novncproxy_base_url "$NOVNCPROXY_URL" XVPVNCPROXY_URL=${XVPVNCPROXY_URL:-"http://$SERVICE_HOST:6081/console"} - add_nova_opt "xvpvncproxy_base_url=$XVPVNCPROXY_URL" + iniset $NOVA_CONF DEFAULT xvpvncproxy_base_url "$XVPVNCPROXY_URL" + SPICEHTML5PROXY_URL=${SPICEHTML5PROXY_URL:-"http://$SERVICE_HOST:6082/spice_auto.html"} + iniset $NOVA_CONF spice html5proxy_base_url "$SPICEHTML5PROXY_URL" fi if [ "$VIRT_DRIVER" = 'xenserver' ]; then VNCSERVER_PROXYCLIENT_ADDRESS=${VNCSERVER_PROXYCLIENT_ADDRESS=169.254.0.1} else VNCSERVER_PROXYCLIENT_ADDRESS=${VNCSERVER_PROXYCLIENT_ADDRESS=127.0.0.1} fi - # Address on which instance vncservers will listen on compute hosts. - # For multi-host, this should be the management ip of the compute host. - VNCSERVER_LISTEN=${VNCSERVER_LISTEN=127.0.0.1} - add_nova_opt "vncserver_listen=$VNCSERVER_LISTEN" - add_nova_opt "vncserver_proxyclient_address=$VNCSERVER_PROXYCLIENT_ADDRESS" - add_nova_opt "ec2_dmz_host=$EC2_DMZ_HOST" - if is_service_enabled zeromq; then - add_nova_opt "rpc_backend=nova.openstack.common.rpc.impl_zmq" - elif is_service_enabled qpid; then - add_nova_opt "rpc_backend=nova.rpc.impl_qpid" - elif [ -n "$RABBIT_HOST" ] && [ -n "$RABBIT_PASSWORD" ]; then - add_nova_opt "rabbit_host=$RABBIT_HOST" - add_nova_opt "rabbit_password=$RABBIT_PASSWORD" + + if is_service_enabled n-novnc || is_service_enabled n-xvnc ; then + # Address on which instance vncservers will listen on compute hosts. + # For multi-host, this should be the management ip of the compute host. + VNCSERVER_LISTEN=${VNCSERVER_LISTEN=127.0.0.1} + iniset $NOVA_CONF DEFAULT vnc_enabled true + iniset $NOVA_CONF DEFAULT vncserver_listen "$VNCSERVER_LISTEN" + iniset $NOVA_CONF DEFAULT vncserver_proxyclient_address "$VNCSERVER_PROXYCLIENT_ADDRESS" + else + iniset $NOVA_CONF DEFAULT vnc_enabled false + fi + + if is_service_enabled n-spice; then + # Address on which instance spiceservers will listen on compute hosts. + # For multi-host, this should be the management ip of the compute host. + SPICESERVER_PROXYCLIENT_ADDRESS=${SPICESERVER_PROXYCLIENT_ADDRESS=127.0.0.1} + SPICESERVER_LISTEN=${SPICESERVER_LISTEN=127.0.0.1} + iniset $NOVA_CONF spice enabled true + iniset $NOVA_CONF spice server_listen "$SPICESERVER_LISTEN" + iniset $NOVA_CONF spice server_proxyclient_address "$SPICESERVER_PROXYCLIENT_ADDRESS" + else + iniset $NOVA_CONF spice enabled false fi - add_nova_opt "glance_api_servers=$GLANCE_HOSTPORT" + + iniset $NOVA_CONF DEFAULT ec2_dmz_host "$EC2_DMZ_HOST" + iniset_rpc_backend nova $NOVA_CONF DEFAULT + iniset $NOVA_CONF DEFAULT glance_api_servers "$GLANCE_HOSTPORT" # XenServer @@ -1827,38 +1043,82 @@ if is_service_enabled nova; then if [ "$VIRT_DRIVER" = 'xenserver' ]; then echo_summary "Using XenServer virtualization driver" read_password XENAPI_PASSWORD "ENTER A PASSWORD TO USE FOR XEN." - add_nova_opt "compute_driver=xenapi.XenAPIDriver" + iniset $NOVA_CONF DEFAULT compute_driver "xenapi.XenAPIDriver" XENAPI_CONNECTION_URL=${XENAPI_CONNECTION_URL:-"http://169.254.0.1"} XENAPI_USER=${XENAPI_USER:-"root"} - add_nova_opt "xenapi_connection_url=$XENAPI_CONNECTION_URL" - add_nova_opt "xenapi_connection_username=$XENAPI_USER" - add_nova_opt "xenapi_connection_password=$XENAPI_PASSWORD" - add_nova_opt "flat_injected=False" + iniset $NOVA_CONF DEFAULT xenapi_connection_url "$XENAPI_CONNECTION_URL" + iniset $NOVA_CONF DEFAULT xenapi_connection_username "$XENAPI_USER" + iniset $NOVA_CONF DEFAULT xenapi_connection_password "$XENAPI_PASSWORD" + iniset $NOVA_CONF DEFAULT flat_injected "False" # Need to avoid crash due to new firewall support XEN_FIREWALL_DRIVER=${XEN_FIREWALL_DRIVER:-"nova.virt.firewall.IptablesFirewallDriver"} - add_nova_opt "firewall_driver=$XEN_FIREWALL_DRIVER" + iniset $NOVA_CONF DEFAULT firewall_driver "$XEN_FIREWALL_DRIVER" + + # OpenVZ + # ------ + elif [ "$VIRT_DRIVER" = 'openvz' ]; then echo_summary "Using OpenVZ virtualization driver" - # TODO(deva): OpenVZ driver does not yet work if compute_driver is set here. - # Replace connection_type when this is fixed. - # add_nova_opt "compute_driver=openvz.connection.OpenVzConnection" - add_nova_opt "connection_type=openvz" + iniset $NOVA_CONF DEFAULT compute_driver "openvz.driver.OpenVzDriver" + iniset $NOVA_CONF DEFAULT connection_type "openvz" LIBVIRT_FIREWALL_DRIVER=${LIBVIRT_FIREWALL_DRIVER:-"nova.virt.libvirt.firewall.IptablesFirewallDriver"} - add_nova_opt "firewall_driver=$LIBVIRT_FIREWALL_DRIVER" + iniset $NOVA_CONF DEFAULT firewall_driver "$LIBVIRT_FIREWALL_DRIVER" + + # Bare Metal + # ---------- + + elif [ "$VIRT_DRIVER" = 'baremetal' ]; then + echo_summary "Using BareMetal driver" + LIBVIRT_FIREWALL_DRIVER=${LIBVIRT_FIREWALL_DRIVER:-"nova.virt.firewall.NoopFirewallDriver"} + iniset $NOVA_CONF DEFAULT compute_driver nova.virt.baremetal.driver.BareMetalDriver + iniset $NOVA_CONF DEFAULT firewall_driver $LIBVIRT_FIREWALL_DRIVER + iniset $NOVA_CONF DEFAULT scheduler_host_manager nova.scheduler.baremetal_host_manager.BaremetalHostManager + # NOTE(deva): ComputeCapabilitiesFilter does not currently work with Baremetal. See bug # 1129485 + # As a work around, we disable CCFilter by explicitly enabling all the other default filters. + iniset $NOVA_CONF DEFAULT scheduler_default_filters ComputeFilter,RetryFilter,AvailabilityZoneFilter,ImagePropertiesFilter + iniset $NOVA_CONF baremetal instance_type_extra_specs cpu_arch:$BM_CPU_ARCH + iniset $NOVA_CONF baremetal driver $BM_DRIVER + iniset $NOVA_CONF baremetal power_manager $BM_POWER_MANAGER + iniset $NOVA_CONF baremetal tftp_root /tftpboot + + # Define extra baremetal nova conf flags by defining the array ``EXTRA_BAREMETAL_OPTS``. + for I in "${EXTRA_BAREMETAL_OPTS[@]}"; do + # Attempt to convert flags to options + iniset $NOVA_CONF baremetal ${I//=/ } + done + + # Default + # ------- + else echo_summary "Using libvirt virtualization driver" - add_nova_opt "compute_driver=libvirt.LibvirtDriver" + iniset $NOVA_CONF DEFAULT compute_driver "libvirt.LibvirtDriver" LIBVIRT_FIREWALL_DRIVER=${LIBVIRT_FIREWALL_DRIVER:-"nova.virt.libvirt.firewall.IptablesFirewallDriver"} - add_nova_opt "firewall_driver=$LIBVIRT_FIREWALL_DRIVER" + iniset $NOVA_CONF DEFAULT firewall_driver "$LIBVIRT_FIREWALL_DRIVER" fi fi +# Extra things to prepare nova for baremetal, before nova starts +if is_service_enabled nova && is_baremetal; then + echo_summary "Preparing for nova baremetal" + prepare_baremetal_toolchain + configure_baremetal_nova_dirs + if [[ "$BM_USE_FAKE_ENV" = "True" ]]; then + create_fake_baremetal_env + fi +fi # Launch Services # =============== # Only run the services specified in ``ENABLED_SERVICES`` +# Launch Swift Services +if is_service_enabled swift; then + echo_summary "Starting Swift" + start_swift +fi + # Launch the Glance services if is_service_enabled g-api g-reg; then echo_summary "Starting Glance" @@ -1872,89 +1132,43 @@ if is_service_enabled key && is_service_enabled swift3 && is_service_enabled nov CREDS=$(keystone ec2-credentials-create --user_id $NOVA_USER_ID --tenant_id $NOVA_TENANT_ID) ACCESS_KEY=$(echo "$CREDS" | awk '/ access / { print $4 }') SECRET_KEY=$(echo "$CREDS" | awk '/ secret / { print $4 }') - add_nova_opt "s3_access_key=$ACCESS_KEY" - add_nova_opt "s3_secret_key=$SECRET_KEY" - add_nova_opt "s3_affix_tenant=True" + iniset $NOVA_CONF DEFAULT s3_access_key "$ACCESS_KEY" + iniset $NOVA_CONF DEFAULT s3_secret_key "$SECRET_KEY" + iniset $NOVA_CONF DEFAULT s3_affix_tenant "True" fi -screen_it zeromq "cd $NOVA_DIR && $NOVA_DIR/bin/nova-rpc-zmq-receiver" +screen_it zeromq "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-rpc-zmq-receiver" # Launch the nova-api and wait for it to answer before continuing if is_service_enabled n-api; then echo_summary "Starting Nova API" - screen_it n-api "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-api" - echo "Waiting for nova-api to start..." - if ! timeout $SERVICE_TIMEOUT sh -c "while ! http_proxy= wget -q -O- http://127.0.0.1:8774; do sleep 1; done"; then - echo "nova-api did not start" - exit 1 - fi + start_nova_api fi if is_service_enabled q-svc; then echo_summary "Starting Quantum" - # Start the Quantum service - screen_it q-svc "cd $QUANTUM_DIR && python $QUANTUM_DIR/bin/quantum-server --config-file $Q_CONF_FILE --config-file /$Q_PLUGIN_CONF_FILE" - echo "Waiting for Quantum to start..." - if ! timeout $SERVICE_TIMEOUT sh -c "while ! http_proxy= wget -q -O- http://127.0.0.1:9696; do sleep 1; done"; then - echo "Quantum did not start" - exit 1 - fi - - # Configure Quantum elements - # Configure internal network & subnet - - TENANT_ID=$(keystone tenant-list | grep " demo " | get_field 1) - - # Create a small network - # Since quantum command is executed in admin context at this point, - # ``--tenant_id`` needs to be specified. - NET_ID=$(quantum net-create --tenant_id $TENANT_ID "$PRIVATE_NETWORK_NAME" | grep ' id ' | get_field 2) - SUBNET_ID=$(quantum subnet-create --tenant_id $TENANT_ID --ip_version 4 --gateway $NETWORK_GATEWAY $NET_ID $FIXED_RANGE | grep ' id ' | get_field 2) - if is_service_enabled q-l3; then - # Create a router, and add the private subnet as one of its interfaces - ROUTER_ID=$(quantum router-create --tenant_id $TENANT_ID router1 | grep ' id ' | get_field 2) - quantum router-interface-add $ROUTER_ID $SUBNET_ID - # Create an external network, and a subnet. Configure the external network as router gw - EXT_NET_ID=$(quantum net-create "$PUBLIC_NETWORK_NAME" -- --router:external=True | grep ' id ' | get_field 2) - EXT_GW_IP=$(quantum subnet-create --ip_version 4 $EXT_NET_ID $FLOATING_RANGE -- --enable_dhcp=False | grep 'gateway_ip' | get_field 2) - quantum router-gateway-set $ROUTER_ID $EXT_NET_ID - if is_quantum_ovs_base_plugin "$Q_PLUGIN" && [[ "$Q_USE_NAMESPACE" = "True" ]]; then - CIDR_LEN=${FLOATING_RANGE#*/} - sudo ip addr add $EXT_GW_IP/$CIDR_LEN dev $PUBLIC_BRIDGE - sudo ip link set $PUBLIC_BRIDGE up - ROUTER_GW_IP=`quantum port-list -c fixed_ips -c device_owner | grep router_gateway | awk -F '"' '{ print $8; }'` - sudo route add -net $FIXED_RANGE gw $ROUTER_GW_IP - fi - if [[ "$Q_USE_NAMESPACE" == "False" ]]; then - # Explicitly set router id in l3 agent configuration - iniset $Q_L3_CONF_FILE DEFAULT router_id $ROUTER_ID - fi - fi + start_quantum_service_and_check + create_quantum_initial_network + setup_quantum_debug elif is_service_enabled $DATABASE_BACKENDS && is_service_enabled n-net; then # Create a small network $NOVA_BIN_DIR/nova-manage network create "$PRIVATE_NETWORK_NAME" $FIXED_RANGE 1 $FIXED_NETWORK_SIZE $NETWORK_CREATE_ARGS # Create some floating ips - $NOVA_BIN_DIR/nova-manage floating create $FLOATING_RANGE --pool=$PUBLIC_NETWORK + $NOVA_BIN_DIR/nova-manage floating create $FLOATING_RANGE --pool=$PUBLIC_NETWORK_NAME # Create a second pool $NOVA_BIN_DIR/nova-manage floating create --ip_range=$TEST_FLOATING_RANGE --pool=$TEST_FLOATING_POOL fi -# Start up the quantum agents if enabled -screen_it q-agt "python $AGENT_BINARY --config-file $Q_CONF_FILE --config-file /$Q_PLUGIN_CONF_FILE" -screen_it q-dhcp "python $AGENT_DHCP_BINARY --config-file $Q_CONF_FILE --config-file=$Q_DHCP_CONF_FILE" -screen_it q-l3 "python $AGENT_L3_BINARY --config-file $Q_CONF_FILE --config-file=$Q_L3_CONF_FILE" - +if is_service_enabled quantum; then + start_quantum_agents +fi if is_service_enabled nova; then echo_summary "Starting Nova" start_nova fi -if is_service_enabled n-vol; then - echo_summary "Starting Nova volumes" - start_nvol -fi if is_service_enabled cinder; then echo_summary "Starting Cinder" start_cinder @@ -1962,11 +1176,11 @@ fi if is_service_enabled ceilometer; then echo_summary "Configuring Ceilometer" configure_ceilometer + configure_ceilometerclient echo_summary "Starting Ceilometer" + init_ceilometer start_ceilometer fi -screen_it horizon "cd $HORIZON_DIR && sudo tail -f /var/log/$APACHE_NAME/horizon_error.log" -screen_it swift "cd $SWIFT_DIR && $SWIFT_DIR/bin/swift-proxy-server ${SWIFT_CONFIG_DIR}/proxy-server.conf -v" # Starting the nova-objectstore only if swift3 service is not enabled. # Swift will act as s3 objectstore. @@ -1983,6 +1197,17 @@ if is_service_enabled heat; then start_heat fi +# Create account rc files +# ======================= + +# Creates source able script files for easier user switching. +# This step also creates certificates for tenants and users, +# which is helpful in image bundle steps. + +if is_service_enabled nova && is_service_enabled key; then + $TOP_DIR/tools/create_userrc.sh -PA --target-dir $TOP_DIR/accrc +fi + # Install Images # ============== @@ -1998,24 +1223,73 @@ fi # * **precise**: http://uec-images.ubuntu.com/precise/current/precise-server-cloudimg-amd64.tar.gz if is_service_enabled g-reg; then - echo_summary "Uploading images" TOKEN=$(keystone token-get | grep ' id ' | get_field 2) - # Option to upload legacy ami-tty, which works with xenserver - if [[ -n "$UPLOAD_LEGACY_TTY" ]]; then - IMAGE_URLS="${IMAGE_URLS:+${IMAGE_URLS},}http://images.ansolabs.com/tty.tgz" - fi + if is_baremetal; then + echo_summary "Creating and uploading baremetal images" - for image_url in ${IMAGE_URLS//,/ }; do - upload_image $image_url $TOKEN - done + # build and upload separate deploy kernel & ramdisk + upload_baremetal_deploy $TOKEN + + # upload images, separating out the kernel & ramdisk for PXE boot + for image_url in ${IMAGE_URLS//,/ }; do + upload_baremetal_image $image_url $TOKEN + done + else + echo_summary "Uploading images" + + # Option to upload legacy ami-tty, which works with xenserver + if [[ -n "$UPLOAD_LEGACY_TTY" ]]; then + IMAGE_URLS="${IMAGE_URLS:+${IMAGE_URLS},}https://github.com/downloads/citrix-openstack/warehouse/tty.tgz" + fi + + for image_url in ${IMAGE_URLS//,/ }; do + upload_image $image_url $TOKEN + done + fi fi +# If we are running nova with baremetal driver, there are a few +# last-mile configuration bits to attend to, which must happen +# after n-api and n-sch have started. +# Also, creating the baremetal flavor must happen after images +# are loaded into glance, though just knowing the IDs is sufficient here +if is_service_enabled nova && is_baremetal; then + # create special flavor for baremetal if we know what images to associate + [[ -n "$BM_DEPLOY_KERNEL_ID" ]] && [[ -n "$BM_DEPLOY_RAMDISK_ID" ]] && \ + create_baremetal_flavor $BM_DEPLOY_KERNEL_ID $BM_DEPLOY_RAMDISK_ID + + # otherwise user can manually add it later by calling nova-baremetal-manage + # otherwise user can manually add it later by calling nova-baremetal-manage + [[ -n "$BM_FIRST_MAC" ]] && add_baremetal_node + + # NOTE: we do this here to ensure that our copy of dnsmasq is running + sudo pkill dnsmasq || true + sudo dnsmasq --conf-file= --port=0 --enable-tftp --tftp-root=/tftpboot \ + --dhcp-boot=pxelinux.0 --bind-interfaces --pid-file=/var/run/dnsmasq.pid \ + --interface=$BM_DNSMASQ_IFACE --dhcp-range=$BM_DNSMASQ_RANGE \ + ${BM_DNSMASQ_DNS:+--dhcp-option=option:dns-server,$BM_DNSMASQ_DNS} + # ensure callback daemon is running + sudo pkill nova-baremetal-deploy-helper || true + screen_it baremetal "nova-baremetal-deploy-helper" +fi + +# Save some values we generated for later use +CURRENT_RUN_TIME=$(date "+$TIMESTAMP_FORMAT") +echo "# $CURRENT_RUN_TIME" >$TOP_DIR/.stackenv +for i in BASE_SQL_CONN ENABLED_SERVICES HOST_IP LOGFILE \ + SERVICE_HOST SERVICE_PROTOCOL STACK_USER TLS_IP; do + echo $i=${!i} >>$TOP_DIR/.stackenv +done + -# Configure Tempest last to ensure that the runtime configuration of -# the various OpenStack services can be queried. -if is_service_enabled tempest; then - configure_tempest +# Run extras +# ========== + +if [[ -d $TOP_DIR/extras.d ]]; then + for i in $TOP_DIR/extras.d/*.sh; do + [[ -r $i ]] && source $i stack + done fi @@ -2028,6 +1302,8 @@ if [[ -x $TOP_DIR/local.sh ]]; then $TOP_DIR/local.sh fi +# Check the status of running services +service_check # Fin # === @@ -2064,7 +1340,7 @@ fi # If Keystone is present you can point ``nova`` cli to this server if is_service_enabled key; then - echo "Keystone is serving at $KEYSTONE_AUTH_PROTOCOL://$SERVICE_HOST:$KEYSTONE_API_PORT/v2.0/" + echo "Keystone is serving at $KEYSTONE_AUTH_PROTOCOL://$SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0/" echo "Examples on using novaclient command line is in exercise.sh" echo "The default users are: admin and demo" echo "The password: $ADMIN_PASSWORD" diff --git a/stackrc b/stackrc index 5be872ba..91f4e2b5 100644 --- a/stackrc +++ b/stackrc @@ -6,20 +6,36 @@ RC_DIR=$(cd $(dirname "$BASH_SOURCE") && pwd) # Destination path for installation DEST=/opt/stack +# Destination for working data +DATA_DIR=${DEST}/data + +# Select the default database +DATABASE_TYPE=mysql + +# Determine stack user +if [[ $EUID -eq 0 ]]; then + STACK_USER=stack +else + STACK_USER=$(whoami) +fi + # Specify which services to launch. These generally correspond to # screen tabs. To change the default list, use the ``enable_service`` and # ``disable_service`` functions in ``localrc``. # For example, to enable Swift add this to ``localrc``: # enable_service swift -# -# And to disable Cinder and use Nova Volumes instead: -# disable_service c-api c-sch c-vol cinder -# enable_service n-vol -ENABLED_SERVICES=g-api,g-reg,key,n-api,n-crt,n-obj,n-cpu,n-net,cinder,c-sch,c-api,c-vol,n-sch,n-novnc,n-xvnc,n-cauth,horizon,mysql,rabbit +ENABLED_SERVICES=g-api,g-reg,key,n-api,n-crt,n-obj,n-cpu,n-net,n-cond,cinder,c-sch,c-api,c-vol,n-sch,n-novnc,n-xvnc,n-cauth,horizon,rabbit,tempest,$DATABASE_TYPE # Set the default Nova APIs to enable NOVA_ENABLED_APIS=ec2,osapi_compute,metadata +# Whether to use 'dev mode' for screen windows. Dev mode works by +# stuffing text into the screen windows so that a developer can use +# ctrl-c, up-arrow, enter to restart the service. Starting services +# this way is slightly unreliable, and a bit slower, so this can +# be disabled for automated testing by setting this value to false. +SCREEN_DEV=True + # Repositories # ------------ @@ -28,9 +44,13 @@ NOVA_ENABLED_APIS=ec2,osapi_compute,metadata GIT_BASE=https://github.com # metering service -CEILOMETER_REPO=https://github.com/stackforge/ceilometer.git +CEILOMETER_REPO=${GIT_BASE}/openstack/ceilometer.git CEILOMETER_BRANCH=master +# ceilometer client library +CEILOMETERCLIENT_REPO=${GIT_BASE}/openstack/python-ceilometerclient +CEILOMETERCLIENT_BRANCH=master + # volume service CINDER_REPO=${GIT_BASE}/openstack/cinder CINDER_BRANCH=master @@ -69,6 +89,10 @@ KEYSTONE_BRANCH=master NOVNC_REPO=https://github.com/kanaka/noVNC.git NOVNC_BRANCH=master +# a websockets/html5 or flash powered SPICE console for vm instances +SPICE_REPO=http://anongit.freedesktop.org/git/spice/spice-html5.git +SPICE_BRANCH=master + # django powered web control panel for openstack HORIZON_REPO=${GIT_BASE}/openstack/horizon.git HORIZON_BRANCH=master @@ -90,25 +114,36 @@ QUANTUM_REPO=${GIT_BASE}/openstack/quantum QUANTUM_BRANCH=master # quantum client -QUANTUM_CLIENT_REPO=${GIT_BASE}/openstack/python-quantumclient -QUANTUM_CLIENT_BRANCH=master +QUANTUMCLIENT_REPO=${GIT_BASE}/openstack/python-quantumclient +QUANTUMCLIENT_BRANCH=master # Tempest test suite TEMPEST_REPO=${GIT_BASE}/openstack/tempest.git TEMPEST_BRANCH=master # heat service -HEAT_REPO=${GIT_BASE}/heat-api/heat.git +HEAT_REPO=${GIT_BASE}/openstack/heat.git HEAT_BRANCH=master # python heat client library -HEATCLIENT_REPO=${GIT_BASE}/heat-api/python-heatclient.git +HEATCLIENT_REPO=${GIT_BASE}/openstack/python-heatclient.git HEATCLIENT_BRANCH=master # ryu service RYU_REPO=https://github.com/osrg/ryu.git RYU_BRANCH=master +# diskimage-builder +BM_IMAGE_BUILD_REPO=https://github.com/stackforge/diskimage-builder.git +BM_IMAGE_BUILD_BRANCH=master + +# bm_poseur +# Used to simulate a hardware environment for baremetal +# Only used if BM_USE_FAKE_ENV is set +BM_POSEUR_REPO=https://github.com/tripleo/bm_poseur.git +BM_POSEUR_BRANCH=master + + # Nova hypervisor configuration. We default to libvirt with **kvm** but will # drop back to **qemu** if we are unable to load the kvm module. ``stack.sh`` can # also install an **LXC** or **OpenVZ** based system. diff --git a/tests/functions.sh b/tests/functions.sh index 3a0f3199..4fe64436 100755 --- a/tests/functions.sh +++ b/tests/functions.sh @@ -54,6 +54,12 @@ handlers = aa, bb [bbb] handlers=ee,ff + +[ ccc ] +spaces = yes + +[ddd] +empty = EOF # Test with spaces @@ -74,6 +80,23 @@ else echo "iniget failed: $VAL" fi +# Test with spaces in section header + +VAL=$(iniget test.ini " ccc " spaces) +if [[ "$VAL" == "yes" ]]; then + echo "OK: $VAL" +else + echo "iniget failed: $VAL" +fi + +iniset test.ini "b b" opt_ion 42 + +VAL=$(iniget test.ini "b b" opt_ion) +if [[ "$VAL" == "42" ]]; then + echo "OK: $VAL" +else + echo "iniget failed: $VAL" +fi # Test without spaces, end of file @@ -93,6 +116,29 @@ else echo "iniget failed: $VAL" fi +# test empty option +if ini_has_option test.ini ddd empty; then + echo "OK: ddd.empty present" +else + echo "ini_has_option failed: ddd.empty not found" +fi + +# test non-empty option +if ini_has_option test.ini bbb handlers; then + echo "OK: bbb.handlers present" +else + echo "ini_has_option failed: bbb.handlers not found" +fi + +# test changing empty option +iniset test.ini ddd empty "42" + +VAL=$(iniget test.ini ddd empty) +if [[ "$VAL" == "42" ]]; then + echo "OK: $VAL" +else + echo "iniget failed: $VAL" +fi # Test section not exist @@ -112,7 +158,6 @@ else echo "iniget failed: $VAL" fi - # Test option not exist VAL=$(iniget test.ini aaa debug) @@ -122,6 +167,12 @@ else echo "iniget failed: $VAL" fi +if ! ini_has_option test.ini aaa debug; then + echo "OK aaa.debug not present" +else + echo "ini_has_option failed: aaa.debug" +fi + iniset test.ini aaa debug "999" VAL=$(iniget test.ini aaa debug) @@ -250,9 +301,11 @@ fi if [[ "$os_PACKAGE" = "deb" ]]; then is_package_installed dpkg VAL=$? -else +elif [[ "$os_PACKAGE" = "rpm" ]]; then is_package_installed rpm VAL=$? +else + VAL=1 fi if [[ "$VAL" -eq 0 ]]; then echo "OK" @@ -263,9 +316,11 @@ fi if [[ "$os_PACKAGE" = "deb" ]]; then is_package_installed dpkg bash VAL=$? -else +elif [[ "$os_PACKAGE" = "rpm" ]]; then is_package_installed rpm bash VAL=$? +else + VAL=1 fi if [[ "$VAL" -eq 0 ]]; then echo "OK" diff --git a/tools/build_ramdisk.sh b/tools/build_ramdisk.sh index 8e2c0be9..2c455685 100755 --- a/tools/build_ramdisk.sh +++ b/tools/build_ramdisk.sh @@ -108,7 +108,7 @@ function map_nbd { echo $NBD } -# Prime image with as many apt/pips as we can +# Prime image with as many apt as we can DEV_FILE=$CACHEDIR/$DIST_NAME-dev.img DEV_FILE_TMP=`mktemp $DEV_FILE.XXXXXX` if [ ! -r $DEV_FILE ]; then @@ -121,22 +121,21 @@ if [ ! -r $DEV_FILE ]; then chroot $MNTDIR apt-get install -y --download-only `cat files/apts/* | grep NOPRIME | cut -d\# -f1` chroot $MNTDIR apt-get install -y --force-yes `cat files/apts/* | grep -v NOPRIME | cut -d\# -f1` - chroot $MNTDIR pip install `cat files/pips/*` # Create a stack user that is a member of the libvirtd group so that stack # is able to interact with libvirt. chroot $MNTDIR groupadd libvirtd - chroot $MNTDIR useradd stack -s /bin/bash -d $DEST -G libvirtd + chroot $MNTDIR useradd $STACK_USER -s /bin/bash -d $DEST -G libvirtd mkdir -p $MNTDIR/$DEST - chroot $MNTDIR chown stack $DEST + chroot $MNTDIR chown $STACK_USER $DEST # A simple password - pass - echo stack:pass | chroot $MNTDIR chpasswd + echo $STACK_USER:pass | chroot $MNTDIR chpasswd echo root:$ROOT_PASSWORD | chroot $MNTDIR chpasswd # And has sudo ability (in the future this should be limited to only what # stack requires) - echo "stack ALL=(ALL) NOPASSWD: ALL" >> $MNTDIR/etc/sudoers + echo "$STACK_USER ALL=(ALL) NOPASSWD: ALL" >> $MNTDIR/etc/sudoers umount $MNTDIR rmdir $MNTDIR @@ -188,7 +187,7 @@ git_clone $OPENSTACKX_REPO $DEST/openstackx $OPENSTACKX_BRANCH # Use this version of devstack rm -rf $MNTDIR/$DEST/devstack cp -pr $CWD $MNTDIR/$DEST/devstack -chroot $MNTDIR chown -R stack $DEST/devstack +chroot $MNTDIR chown -R $STACK_USER $DEST/devstack # Configure host network for DHCP mkdir -p $MNTDIR/etc/network @@ -226,7 +225,7 @@ EOF # Make the run.sh executable chmod 755 $RUN_SH -chroot $MNTDIR chown stack $DEST/run.sh +chroot $MNTDIR chown $STACK_USER $DEST/run.sh umount $MNTDIR rmdir $MNTDIR diff --git a/tools/build_tempest.sh b/tools/build_tempest.sh index e72355c9..1758e7da 100755 --- a/tools/build_tempest.sh +++ b/tools/build_tempest.sh @@ -48,8 +48,6 @@ DEST=${DEST:-/opt/stack} TEMPEST_DIR=$DEST/tempest # Install tests and prerequisites -pip_install `cat $TOP_DIR/files/pips/tempest` - git_clone $TEMPEST_REPO $TEMPEST_DIR $TEMPEST_BRANCH trap - SIGHUP SIGINT SIGTERM SIGQUIT EXIT diff --git a/tools/build_uec.sh b/tools/build_uec.sh index 48819c95..6c4a26c2 100755 --- a/tools/build_uec.sh +++ b/tools/build_uec.sh @@ -68,7 +68,7 @@ fi # Option to warm the base image with software requirements. if [ $WARM_CACHE ]; then cd $TOOLS_DIR - ./warm_apts_and_pips_for_uec.sh $image_dir/disk + ./warm_apts_for_uec.sh $image_dir/disk fi # Name of our instance, used by libvirt @@ -207,11 +207,11 @@ ROOTSLEEP=0 `cat $TOP_DIR/localrc` LOCAL_EOF fi -useradd -U -G sudo -s /bin/bash -d /opt/stack -m stack -echo stack:pass | chpasswd +useradd -U -G sudo -s /bin/bash -d /opt/stack -m $STACK_USER +echo $STACK_USER:pass | chpasswd mkdir -p /opt/stack/.ssh echo "$PUB_KEY" > /opt/stack/.ssh/authorized_keys -chown -R stack /opt/stack +chown -R $STACK_USER /opt/stack chmod 700 /opt/stack/.ssh chmod 600 /opt/stack/.ssh/authorized_keys @@ -224,7 +224,7 @@ fi # Run stack.sh cat >> $vm_dir/uec/user-data< $STAGING_DIR/etc/sudoers.d/50_stack_sh ) # Copy over your ssh keys and env if desired @@ -64,7 +67,7 @@ rm -rf $STAGING_DIR/$DEST/devstack cp_it . $STAGING_DIR/$DEST/devstack # Give stack ownership over $DEST so it may do the work needed -chroot $STAGING_DIR chown -R stack $DEST +chroot $STAGING_DIR chown -R $STACK_USER $DEST # Unmount umount $STAGING_DIR diff --git a/tools/create_userrc.sh b/tools/create_userrc.sh new file mode 100755 index 00000000..619d63f7 --- /dev/null +++ b/tools/create_userrc.sh @@ -0,0 +1,258 @@ +#!/usr/bin/env bash + +# **create_userrc.sh** + +# Pre-create rc files and credentials for the default users. + +# Warning: This script just for development purposes + +ACCOUNT_DIR=./accrc + +display_help() +{ +cat < + +This script creates certificates and sourcable rc files per tenant/user. + +Target account directory hierarchy: +target_dir-| + |-cacert.pem + |-tenant1-name| + | |- user1 + | |- user1-cert.pem + | |- user1-pk.pem + | |- user2 + | .. + |-tenant2-name.. + .. + +Optional Arguments +-P include password to the rc files; with -A it assume all users password is the same +-A try with all user +-u create files just for the specified user +-C create user and tenant, the specifid tenant will be the user's tenant +-r when combined with -C and the (-u) user exists it will be the user's tenant role in the (-C)tenant (default: Member) +-p password for the user +--os-username +--os-password +--os-tenant-name +--os-tenant-id +--os-auth-url +--target-dir +--skip-tenant +--debug + +Example: +$0 -AP +$0 -P -C mytenant -u myuser -p mypass +EOF +} + +if ! options=$(getopt -o hPAp:u:r:C: -l os-username:,os-password:,os-tenant-name:,os-tenant-id:,os-auth-url:,target-dir:,skip-tenant:,help,debug -- "$@") +then + #parse error + display_help + exit 1 +fi +eval set -- $options +ADDPASS="" + +# The services users usually in the service tenant. +# rc files for service users, is out of scope. +# Supporting different tanent for services is out of scope. +SKIP_TENANT=",service," # tenant names are between commas(,) +MODE="" +ROLE=Member +USER_NAME="" +USER_PASS="" +while [ $# -gt 0 ] +do + case "$1" in + -h|--help) display_help; exit 0 ;; + --os-username) export OS_USERNAME=$2; shift ;; + --os-password) export OS_PASSWORD=$2; shift ;; + --os-tenant-name) export OS_TENANT_NAME=$2; shift ;; + --os-tenant-id) export OS_TENANT_ID=$2; shift ;; + --skip-tenant) SKIP_TENANT="$SKIP_TENANT$2,"; shift ;; + --os-auth-url) export OS_AUTH_URL=$2; shift ;; + --target-dir) ACCOUNT_DIR=$2; shift ;; + --debug) set -o xtrace ;; + -u) MODE=${MODE:-one}; USER_NAME=$2; shift ;; + -p) USER_PASS=$2; shift ;; + -A) MODE=all; ;; + -P) ADDPASS="yes" ;; + -C) MODE=create; TENANT=$2; shift ;; + -r) ROLE=$2; shift ;; + (--) shift; break ;; + (-*) echo "$0: error - unrecognized option $1" >&2; display_help; exit 1 ;; + (*) echo "$0: error - unexpected argument $1" >&2; display_help; exit 1 ;; + esac + shift +done + +if [ -z "$OS_PASSWORD" ]; then + if [ -z "$ADMIN_PASSWORD" ];then + echo "The admin password is required option!" >&2 + exit 2 + else + OS_PASSWORD=$ADMIN_PASSWORD + fi +fi + +if [ -z "$OS_TENANT_NAME" -a -z "$OS_TENANT_ID" ]; then + export OS_TENANT_NAME=admin +fi + +if [ -z "$OS_USERNAME" ]; then + export OS_USERNAME=admin +fi + +if [ -z "$OS_AUTH_URL" ]; then + export OS_AUTH_URL=http://localhost:5000/v2.0/ +fi + +USER_PASS=${USER_PASS:-$OS_PASSWORD} +USER_NAME=${USER_NAME:-$OS_USERNAME} + +if [ -z "$MODE" ]; then + echo "You must specify at least -A or -u parameter!" >&2 + echo + display_help + exit 3 +fi + +export -n SERVICE_TOKEN SERVICE_ENDPOINT OS_SERVICE_TOKEN OS_SERVICE_ENDPOINT + +EC2_URL=http://localhost:8773/service/Cloud +S3_URL=http://localhost:3333 + +ec2=`keystone endpoint-get --service ec2 | awk '/\|[[:space:]]*ec2.publicURL/ {print $4}'` +[ -n "$ec2" ] && EC2_URL=$ec2 + +s3=`keystone endpoint-get --service s3 | awk '/\|[[:space:]]*s3.publicURL/ {print $4}'` +[ -n "$s3" ] && S3_URL=$s3 + + +mkdir -p "$ACCOUNT_DIR" +ACCOUNT_DIR=`readlink -f "$ACCOUNT_DIR"` +EUCALYPTUS_CERT=$ACCOUNT_DIR/cacert.pem +mv "$EUCALYPTUS_CERT" "$EUCALYPTUS_CERT.old" &>/dev/null +if ! nova x509-get-root-cert "$EUCALYPTUS_CERT"; then + echo "Failed to update the root certificate: $EUCALYPTUS_CERT" >&2 + mv "$EUCALYPTUS_CERT.old" "$EUCALYPTUS_CERT" &>/dev/null +fi + + +function add_entry(){ + local user_id=$1 + local user_name=$2 + local tenant_id=$3 + local tenant_name=$4 + local user_passwd=$5 + + # The admin user can see all user's secret AWS keys, it does not looks good + local line=`keystone ec2-credentials-list --user_id $user_id | grep -E "^\\|[[:space:]]*($tenant_name|$tenant_id)[[:space:]]*\\|" | head -n 1` + if [ -z "$line" ]; then + keystone ec2-credentials-create --user-id $user_id --tenant-id $tenant_id 1>&2 + line=`keystone ec2-credentials-list --user_id $user_id | grep -E "^\\|[[:space:]]*($tenant_name|$tenant_id)[[:space:]]*\\|" | head -n 1` + fi + local ec2_access_key ec2_secret_key + read ec2_access_key ec2_secret_key <<< `echo $line | awk '{print $4 " " $6 }'` + mkdir -p "$ACCOUNT_DIR/$tenant_name" + local rcfile="$ACCOUNT_DIR/$tenant_name/$user_name" + # The certs subject part are the tenant ID "dash" user ID, but the CN should be the first part of the DN + # Generally the subject DN parts should be in reverse order like the Issuer + # The Serial does not seams correctly marked either + local ec2_cert="$rcfile-cert.pem" + local ec2_private_key="$rcfile-pk.pem" + # Try to preserve the original file on fail (best effort) + mv -f "$ec2_private_key" "$ec2_private_key.old" &>/dev/null + mv -f "$ec2_cert" "$ec2_cert.old" &>/dev/null + # It will not create certs when the password is incorrect + if ! nova --os-password "$user_passwd" --os-username "$user_name" --os-tenant-name "$tenant_name" x509-create-cert "$ec2_private_key" "$ec2_cert"; then + mv -f "$ec2_private_key.old" "$ec2_private_key" &>/dev/null + mv -f "$ec2_cert.old" "$ec2_cert" &>/dev/null + fi + cat >"$rcfile" <>"$rcfile" + fi +} + +#admin users expected +function create_or_get_tenant(){ + local tenant_name=$1 + local tenant_id=`keystone tenant-list | awk '/\|[[:space:]]*'"$tenant_name"'[[:space:]]*\|.*\|/ {print $2}'` + if [ -n "$tenant_id" ]; then + echo $tenant_id + else + keystone tenant-create --name "$tenant_name" | awk '/\|[[:space:]]*id[[:space:]]*\|.*\|/ {print $4}' + fi +} + +function create_or_get_role(){ + local role_name=$1 + local role_id=`keystone role-list| awk '/\|[[:space:]]*'"$role_name"'[[:space:]]*\|/ {print $2}'` + if [ -n "$role_id" ]; then + echo $role_id + else + keystone role-create --name "$role_name" |awk '/\|[[:space:]]*id[[:space:]]*\|.*\|/ {print $4}' + fi +} + +# Provides empty string when the user does not exists +function get_user_id(){ + local user_name=$1 + keystone user-list | awk '/^\|[^|]*\|[[:space:]]*'"$user_name"'[[:space:]]*\|.*\|/ {print $2}' +} + +if [ $MODE != "create" ]; then +# looks like I can't ask for all tenant related to a specified user + for tenant_id_at_name in `keystone tenant-list | awk 'BEGIN {IGNORECASE = 1} /true[[:space:]]*\|$/ {print $2 "@" $4}'`; do + read tenant_id tenant_name <<< `echo "$tenant_id_at_name" | sed 's/@/ /'` + if echo $SKIP_TENANT| grep -q ",$tenant_name,"; then + continue; + fi + for user_id_at_name in `keystone user-list --tenant-id $tenant_id | awk 'BEGIN {IGNORECASE = 1} /true[[:space:]]*\|[^|]*\|$/ {print $2 "@" $4}'`; do + read user_id user_name <<< `echo "$user_id_at_name" | sed 's/@/ /'` + if [ $MODE = one -a "$user_name" != "$USER_NAME" ]; then + continue; + fi + add_entry "$user_id" "$user_name" "$tenant_id" "$tenant_name" "$USER_PASS" + done + done +else + tenant_name=$TENANT + tenant_id=`create_or_get_tenant "$TENANT"` + user_name=$USER_NAME + user_id=`get_user_id $user_name` + if [ -z "$user_id" ]; then + #new user + user_id=`keystone user-create --name "$user_name" --tenant-id "$tenant_id" --pass "$USER_PASS" --email "$user_name@example.com" | awk '/\|[[:space:]]*id[[:space:]]*\|.*\|/ {print $4}'` + #The password is in the cmd line. It is not a good thing + add_entry "$user_id" "$user_name" "$tenant_id" "$tenant_name" "$USER_PASS" + else + #new role + role_id=`create_or_get_role "$ROLE"` + keystone user-role-add --user-id "$user_id" --tenant-id "$tenant_id" --role-id "$role_id" + add_entry "$user_id" "$user_name" "$tenant_id" "$tenant_name" "$USER_PASS" + fi +fi diff --git a/tools/get_uec_image.sh b/tools/get_uec_image.sh index 156fd439..3c62064a 100755 --- a/tools/get_uec_image.sh +++ b/tools/get_uec_image.sh @@ -65,7 +65,7 @@ KERNEL=$3 case $DIST_NAME in quantal) ;; - percise) ;; + precise) ;; oneiric) ;; natty) ;; maverick) ;; diff --git a/tools/info.sh b/tools/info.sh index 5c9a1d3d..ef1f3380 100755 --- a/tools/info.sh +++ b/tools/info.sh @@ -88,17 +88,23 @@ done # - We are going to check packages only for the services needed. # - We are parsing the packages files and detecting metadatas. -if [[ "$os_PACKAGE" = "deb" ]]; then +if is_ubuntu; then PKG_DIR=$FILES/apts -else +elif is_fedora; then PKG_DIR=$FILES/rpms +elif is_suse; then + PKG_DIR=$FILES/rpms-suse +else + exit_distro_not_supported "list of packages" fi for p in $(get_packages $PKG_DIR); do if [[ "$os_PACKAGE" = "deb" ]]; then ver=$(dpkg -s $p 2>/dev/null | grep '^Version: ' | cut -d' ' -f2) - else + elif [[ "$os_PACKAGE" = "rpm" ]]; then ver=$(rpm -q --queryformat "%{VERSION}-%{RELEASE}\n" $p) + else + exit_distro_not_supported "finding version of a package" fi echo "pkg|${p}|${ver}" done @@ -107,11 +113,7 @@ done # Pips # ---- -if [[ "$os_PACKAGE" = "deb" ]]; then - CMD_PIP=/usr/bin/pip -else - CMD_PIP=/usr/bin/pip-python -fi +CMD_PIP=$(get_pip_command) # Pip tells us what is currently installed FREEZE_FILE=$(mktemp --tmpdir freeze.XXXXXX) diff --git a/tools/install_prereqs.sh b/tools/install_prereqs.sh new file mode 100755 index 00000000..4d151db2 --- /dev/null +++ b/tools/install_prereqs.sh @@ -0,0 +1,82 @@ +#!/usr/bin/env bash + +# **install_prereqs.sh** + +# Install system package prerequisites +# +# install_prereqs.sh [-f] +# +# -f Force an install run now + +if [[ -n "$1" && "$1" = "-f" ]]; then + FORCE_PREREQ=1 +fi + +# If TOP_DIR is set we're being sourced rather than running stand-alone +# or in a sub-shell +if [[ -z "$TOP_DIR" ]]; then + # Keep track of the devstack directory + TOP_DIR=$(cd $(dirname "$0")/.. && pwd) + + # Import common functions + source $TOP_DIR/functions + + # Determine what system we are running on. This provides ``os_VENDOR``, + # ``os_RELEASE``, ``os_UPDATE``, ``os_PACKAGE``, ``os_CODENAME`` + # and ``DISTRO`` + GetDistro + + # Needed to get ``ENABLED_SERVICES`` + source $TOP_DIR/stackrc + + # Prereq dirs are here + FILES=$TOP_DIR/files +fi + +# Minimum wait time +PREREQ_RERUN_MARKER=${PREREQ_RERUN_MARKER:-$TOP_DIR/.prereqs} +PREREQ_RERUN_HOURS=${PREREQ_RERUN_HOURS:-2} +PREREQ_RERUN_SECONDS=$((60*60*$PREREQ_RERUN_HOURS)) + +NOW=$(date "+%s") +LAST_RUN=$(head -1 $PREREQ_RERUN_MARKER 2>/dev/null || echo "0") +DELTA=$(($NOW - $LAST_RUN)) +if [[ $DELTA -lt $PREREQ_RERUN_SECONDS && -z "$FORCE_PREREQ" ]]; then + echo "Re-run time has not expired ($(($PREREQ_RERUN_SECONDS - $DELTA)) seconds remaining); exiting..." + return 0 +fi + +# Make sure the proxy config is visible to sub-processes +export_proxy_variables + + +# Install Packages +# ================ + +# Install package requirements +if is_ubuntu; then + install_package $(get_packages $FILES/apts) +elif is_fedora; then + install_package $(get_packages $FILES/rpms) +elif is_suse; then + install_package $(get_packages $FILES/rpms-suse) +else + exit_distro_not_supported "list of packages" +fi + +if [[ -n "$SYSLOG" && "$SYSLOG" != "False" ]]; then + if is_ubuntu || is_fedora; then + install_package rsyslog-relp + elif is_suse; then + install_package rsyslog-module-relp + else + exit_distro_not_supported "rsyslog-relp installation" + fi +fi + + +# Mark end of run +# --------------- + +date "+%s" >$PREREQ_RERUN_MARKER +date >>$PREREQ_RERUN_MARKER diff --git a/tools/make_cert.sh b/tools/make_cert.sh new file mode 100755 index 00000000..cb93e57c --- /dev/null +++ b/tools/make_cert.sh @@ -0,0 +1,55 @@ +#!/bin/bash + +# **make_cert.sh** + +# Create a CA hierarchy (if necessary) and server certificate +# +# This mimics the CA structure that DevStack sets up when ``tls_proxy`` is enabled +# but in the curent directory unless ``DATA_DIR`` is set + +ENABLE_TLS=True +DATA_DIR=${DATA_DIR:-`pwd`/ca-data} + +ROOT_CA_DIR=$DATA_DIR/root +INT_CA_DIR=$DATA_DIR/int + +# Import common functions +source $TOP_DIR/functions + +# Import TLS functions +source lib/tls + +function usage { + echo "$0 - Create CA and/or certs" + echo "" + echo "Usage: $0 commonName [orgUnit]" + exit 1 +} + +CN=$1 +if [ -z "$CN" ]]; then + usage +fi +ORG_UNIT_NAME=${2:-$ORG_UNIT_NAME} + +# Useful on OS/X +if [[ `uname -s` == 'Darwin' && -d /usr/local/Cellar/openssl ]]; then + # set up for brew-installed modern OpenSSL + OPENSSL_CONF=/usr/local/etc/openssl/openssl.cnf + OPENSSL=/usr/local/Cellar/openssl/*/bin/openssl +fi + +DEVSTACK_CERT_NAME=$CN +DEVSTACK_HOSTNAME=$CN +DEVSTACK_CERT=$DATA_DIR/$DEVSTACK_CERT_NAME.pem + +# Make sure the CA is set up +configure_CA +init_CA + +# Create the server cert +make_cert $INT_CA_DIR $DEVSTACK_CERT_NAME $DEVSTACK_HOSTNAME + +# Create a cert bundle +cat $INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key $INT_CA_DIR/$DEVSTACK_CERT_NAME.crt $INT_CA_DIR/cacert.pem >$DEVSTACK_CERT + diff --git a/tools/warm_apts_and_pips_for_uec.sh b/tools/warm_apts_for_uec.sh similarity index 88% rename from tools/warm_apts_and_pips_for_uec.sh rename to tools/warm_apts_for_uec.sh index fe389ffe..3c15f52e 100755 --- a/tools/warm_apts_and_pips_for_uec.sh +++ b/tools/warm_apts_for_uec.sh @@ -1,6 +1,6 @@ #!/usr/bin/env bash -# **warm_apts_and_pips_for_uec.sh** +# **warm_apts_for_uec.sh** # Echo commands set -o xtrace @@ -48,8 +48,6 @@ cp /etc/resolv.conf $STAGING_DIR/etc/resolv.conf chroot $STAGING_DIR apt-get update chroot $STAGING_DIR apt-get install -y --download-only `cat files/apts/* | grep NOPRIME | cut -d\# -f1` chroot $STAGING_DIR apt-get install -y --force-yes `cat files/apts/* | grep -v NOPRIME | cut -d\# -f1` || true -mkdir -p $STAGING_DIR/var/cache/pip -PIP_DOWNLOAD_CACHE=/var/cache/pip chroot $STAGING_DIR pip install `cat files/pips/*` || true # Unmount umount $STAGING_DIR diff --git a/tools/xen/README.md b/tools/xen/README.md index f20ad04b..1cd45cff 100644 --- a/tools/xen/README.md +++ b/tools/xen/README.md @@ -18,7 +18,7 @@ https://www.citrix.com/English/ss/downloads/details.asp?downloadId=2311504&produ For details on installation, see: http://wiki.openstack.org/XenServer/Install Here are some sample Xenserver network settings for when you are just -getting started (I use settings like this with a lappy + cheap wifi router): +getting started (Settings like this have been used with a laptop + cheap wifi router): * XenServer Host IP: 192.168.1.10 * XenServer Netmask: 255.255.255.0 @@ -29,9 +29,9 @@ Step 2: Download devstack -------------------------- On your XenServer host, run the following commands as root: -wget --no-check-certificate https://github.com/openstack-dev/devstack/zipball/master -unzip -o master -d ./devstack -cd devstack/*/ + wget --no-check-certificate https://github.com/openstack-dev/devstack/zipball/master + unzip -o master -d ./devstack + cd devstack/*/ Step 3: Configure your localrc inside the devstack directory ------------------------------------------------------------ diff --git a/tools/xen/build_xva.sh b/tools/xen/build_xva.sh index 9eae1903..b0fd003d 100755 --- a/tools/xen/build_xva.sh +++ b/tools/xen/build_xva.sh @@ -44,12 +44,9 @@ if [ ! -d $STAGING_DIR/etc ]; then exit 1 fi -# Configure dns (use same dns as dom0) -# but only when not precise -if [ "$UBUNTU_INST_RELEASE" != "precise" ]; then - cp /etc/resolv.conf $STAGING_DIR/etc/resolv.conf -elif [ "$MGT_IP" != "dhcp" ] && [ "$PUB_IP" != "dhcp" ]; then - echo "Configuration without DHCP not supported on Precise" +# Only support DHCP for now - don't support how different versions of Ubuntu handle resolv.conf +if [ "$MGT_IP" != "dhcp" ] && [ "$PUB_IP" != "dhcp" ]; then + echo "Configuration without DHCP not supported" exit 1 fi @@ -65,8 +62,8 @@ cd $TOP_DIR cat <$STAGING_DIR/etc/rc.local # network restart required for getting the right gateway /etc/init.d/networking restart -chown -R stack /opt/stack -su -c "/opt/stack/run.sh > /opt/stack/run.sh.log 2>&1" stack +chown -R $STACK_USER /opt/stack +su -c "/opt/stack/run.sh > /opt/stack/run.sh.log" $STACK_USER exit 0 EOF diff --git a/tools/xen/install_os_domU.sh b/tools/xen/install_os_domU.sh index c78c6f2e..0e275705 100755 --- a/tools/xen/install_os_domU.sh +++ b/tools/xen/install_os_domU.sh @@ -68,6 +68,19 @@ if [ ! -d $XAPI_PLUGIN_DIR ]; then XAPI_PLUGIN_DIR=/usr/lib/xcp/plugins/ fi cp -pr ./nova/*/plugins/xenserver/xenapi/etc/xapi.d/plugins/* $XAPI_PLUGIN_DIR + +# Install the netwrap xapi plugin to support agent control of dom0 networking +if [[ "$ENABLED_SERVICES" =~ "q-agt" && "$Q_PLUGIN" = "openvswitch" ]]; then + if [ -f ./quantum ]; then + rm -rf ./quantum + fi + # get quantum + QUANTUM_ZIPBALL_URL=${QUANTUM_ZIPBALL_URL:-$(echo $QUANTUM_REPO | sed "s:\.git$::;s:$:/zipball/$QUANTUM_BRANCH:g")} + wget $QUANTUM_ZIPBALL_URL -O quantum-zipball --no-check-certificate + unzip -o quantum-zipball -d ./quantum + cp -pr ./quantum/*/quantum/plugins/openvswitch/agent/xenapi/etc/xapi.d/plugins/* $XAPI_PLUGIN_DIR +fi + chmod a+x ${XAPI_PLUGIN_DIR}* mkdir -p /boot/guest @@ -223,6 +236,12 @@ SNAME_PREPARED="template_prepared" SNAME_FIRST_BOOT="before_first_boot" function wait_for_VM_to_halt() { + set +x + echo "Waiting for the VM to halt. Progress in-VM can be checked with vncviewer:" + mgmt_ip=$(echo $XENAPI_CONNECTION_URL | tr -d -c '1234567890.') + domid=$(xe vm-list name-label="$GUEST_NAME" params=dom-id minimal=true) + port=$(xenstore-read /local/domain/$domid/console/vnc-port) + echo "vncviewer -via $mgmt_ip localhost:${port:2}" while true do state=$(xe_min vm-list name-label="$GUEST_NAME" power-state=halted) @@ -230,10 +249,11 @@ function wait_for_VM_to_halt() { then break else - echo "Waiting for "$GUEST_NAME" to finish installation..." + echo -n "." sleep 20 fi done + set -x } templateuuid=$(xe template-list name-label="$TNAME") @@ -376,41 +396,30 @@ if [ "$WAIT_TILL_LAUNCH" = "1" ] && [ -e ~/.ssh/id_rsa.pub ] && [ "$COPYENV" = sleep 10 done - # output the run.sh.log - ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no stack@$DOMU_IP 'tail -f run.sh.log' & - TAIL_PID=$! - - function kill_tail() { - kill -9 $TAIL_PID - exit 1 - } - # Let Ctrl-c kill tail and exit - trap kill_tail SIGINT - - # ensure we kill off the tail if we exit the script early - # for other reasons - add_on_exit "kill -9 $TAIL_PID || true" - - # wait silently until stack.sh has finished - set +o xtrace - while ! ssh_no_check -q stack@$DOMU_IP "tail run.sh.log | grep -q 'stack.sh completed in'"; do + set +x + echo -n "Waiting for startup script to finish" + while [ `ssh_no_check -q stack@$DOMU_IP pgrep -c run.sh` -ge 1 ] + do sleep 10 + echo -n "." done - set -o xtrace + echo "done!" + set -x - # kill the tail process now stack.sh has finished - kill -9 $TAIL_PID + # output the run.sh.log + ssh_no_check -q stack@$DOMU_IP 'cat run.sh.log' - # check for a failure - if ssh_no_check -q stack@$DOMU_IP "grep -q 'stack.sh failed' run.sh.log"; then - exit 1 - fi + # Fail if the expected text is not found + ssh_no_check -q stack@$DOMU_IP 'cat run.sh.log' | grep -q 'stack.sh completed in' + + set +x echo "################################################################################" echo "" echo "All Finished!" echo "You can visit the OpenStack Dashboard" echo "at http://$DOMU_IP, and contact other services at the usual ports." else + set +x echo "################################################################################" echo "" echo "All Finished!" diff --git a/tools/xen/prepare_guest.sh b/tools/xen/prepare_guest.sh index 4aa4554f..fe524454 100755 --- a/tools/xen/prepare_guest.sh +++ b/tools/xen/prepare_guest.sh @@ -19,6 +19,7 @@ GUEST_PASSWORD=${GUEST_PASSWORD:-secrete} STAGING_DIR=${STAGING_DIR:-stage} DO_TGZ=${DO_TGZ:-1} XS_TOOLS_PATH=${XS_TOOLS_PATH:-"/root/xs-tools.deb"} +STACK_USER=${STACK_USER:-stack} # Install basics chroot $STAGING_DIR apt-get update @@ -46,12 +47,12 @@ rm -f $STAGING_DIR/etc/localtime # Add stack user chroot $STAGING_DIR groupadd libvirtd -chroot $STAGING_DIR useradd stack -s /bin/bash -d /opt/stack -G libvirtd -echo stack:$GUEST_PASSWORD | chroot $STAGING_DIR chpasswd -echo "stack ALL=(ALL) NOPASSWD: ALL" >> $STAGING_DIR/etc/sudoers +chroot $STAGING_DIR useradd $STACK_USER -s /bin/bash -d /opt/stack -G libvirtd +echo $STACK_USER:$GUEST_PASSWORD | chroot $STAGING_DIR chpasswd +echo "$STACK_USER ALL=(ALL) NOPASSWD: ALL" >> $STAGING_DIR/etc/sudoers # Give ownership of /opt/stack to stack user -chroot $STAGING_DIR chown -R stack /opt/stack +chroot $STAGING_DIR chown -R $STACK_USER /opt/stack # Make our ip address hostnames look nice at the command prompt echo "export PS1='${debian_chroot:+($debian_chroot)}\\u@\\H:\\w\\$ '" >> $STAGING_DIR/opt/stack/.bashrc diff --git a/tools/xen/prepare_guest_template.sh b/tools/xen/prepare_guest_template.sh index baf9c3a2..19bd2f84 100755 --- a/tools/xen/prepare_guest_template.sh +++ b/tools/xen/prepare_guest_template.sh @@ -60,7 +60,7 @@ if [ -e "$ISO_DIR" ]; then rm -rf $TMP_DIR else echo "WARNING: no XenServer tools found, falling back to 5.6 tools" - TOOLS_URL="http://images.ansolabs.com/xen/xe-guest-utilities_5.6.100-651_amd64.deb" + TOOLS_URL="https://github.com/downloads/citrix-openstack/warehouse/xe-guest-utilities_5.6.100-651_amd64.deb" wget $TOOLS_URL -O $XS_TOOLS_FILE_NAME cp $XS_TOOLS_FILE_NAME "${STAGING_DIR}${XS_TOOLS_PATH}" rm -rf $XS_TOOLS_FILE_NAME diff --git a/tools/xen/scripts/install_ubuntu_template.sh b/tools/xen/scripts/install_ubuntu_template.sh index f67547b0..43b6decd 100755 --- a/tools/xen/scripts/install_ubuntu_template.sh +++ b/tools/xen/scripts/install_ubuntu_template.sh @@ -45,6 +45,7 @@ fi # Clone built-in template to create new template new_uuid=$(xe vm-clone uuid=$builtin_uuid \ new-name-label="$UBUNTU_INST_TEMPLATE_NAME") +disk_size=$(($OSDOMU_VDI_GB * 1024 * 1024 * 1024)) # Some of these settings can be found in example preseed files # however these need to be answered before the netinstall @@ -73,6 +74,7 @@ xe template-param-set uuid=$new_uuid \ PV-args="$pvargs" \ other-config:debian-release="$UBUNTU_INST_RELEASE" \ other-config:default_template=true \ + other-config:disks='' \ other-config:install-arch="$UBUNTU_INST_ARCH" echo "Ubuntu template installed uuid:$new_uuid" diff --git a/tools/xen/xenrc b/tools/xen/xenrc index 0365a25e..1a5a2a93 100644 --- a/tools/xen/xenrc +++ b/tools/xen/xenrc @@ -11,6 +11,7 @@ GUEST_NAME=${GUEST_NAME:-DevStackOSDomU} # Size of image VDI_MB=${VDI_MB:-5000} OSDOMU_MEM_MB=1024 +OSDOMU_VDI_GB=8 # VM Password GUEST_PASSWORD=${GUEST_PASSWORD:-secrete} diff --git a/unstack.sh b/unstack.sh index 6b34aa3a..a086d5c6 100755 --- a/unstack.sh +++ b/unstack.sh @@ -25,8 +25,11 @@ source $TOP_DIR/stackrc DATA_DIR=${DATA_DIR:-${DEST}/data} # Get project function libraries +source $TOP_DIR/lib/baremetal source $TOP_DIR/lib/cinder -source $TOP_DIR/lib/n-vol +source $TOP_DIR/lib/horizon +source $TOP_DIR/lib/swift +source $TOP_DIR/lib/quantum # Determine what system we are running on. This provides ``os_VENDOR``, # ``os_RELEASE``, ``os_UPDATE``, ``os_PACKAGE``, ``os_CODENAME`` @@ -36,6 +39,20 @@ if [[ "$1" == "--all" ]]; then UNSTACK_ALL=${UNSTACK_ALL:-1} fi +# Run extras +# ========== + +if [[ -d $TOP_DIR/extras.d ]]; then + for i in $TOP_DIR/extras.d/*.sh; do + [[ -r $i ]] && source $i unstack + done +fi + +if [[ "$Q_USE_DEBUG_COMMAND" == "True" ]]; then + source $TOP_DIR/openrc + teardown_quantum_debug +fi + # Shut down devstack's screen to get the bulk of OpenStack services in one shot SCREEN=$(which screen) if [[ -n "$SCREEN" ]]; then @@ -47,56 +64,30 @@ fi # Swift runs daemons if is_service_enabled swift; then - swift-init all stop 2>/dev/null || true + stop_swift + cleanup_swift fi # Apache has the WSGI processes if is_service_enabled horizon; then - stop_service apache2 + stop_horizon fi -SCSI_PERSIST_DIR=$CINDER_STATE_PATH/volumes/* - -# Get the iSCSI volumes -if is_service_enabled cinder n-vol; then - if is_service_enabled n-vol; then - SCSI_PERSIST_DIR=$NOVA_STATE_PATH/volumes/* - fi - - TARGETS=$(sudo tgtadm --op show --mode target) - if [ $? -ne 0 ]; then - # If tgt driver isn't running this won't work obviously - # So check the response and restart if need be - echo "tgtd seems to be in a bad state, restarting..." - if [[ "$os_PACKAGE" = "deb" ]]; then - restart_service tgt - else - restart_service tgtd - fi - TARGETS=$(sudo tgtadm --op show --mode target) - fi - - if [[ -n "$TARGETS" ]]; then - iqn_list=( $(grep --no-filename -r iqn $SCSI_PERSIST_DIR | sed 's///') ) - for i in "${iqn_list[@]}"; do - echo removing iSCSI target: $i - sudo tgt-admin --delete $i - done - fi +# Kill TLS proxies +if is_service_enabled tls-proxy; then + killall stud +fi - if is_service_enabled cinder; then - sudo rm -rf $CINDER_STATE_PATH/volumes/* - fi +# baremetal might have created a fake environment +if is_service_enabled baremetal && [[ "$BM_USE_FAKE_ENV" = "True" ]]; then + cleanup_fake_baremetal_env +fi - if is_service_enabled n-vol; then - sudo rm -rf $NOVA_STATE_PATH/volumes/* - fi +SCSI_PERSIST_DIR=$CINDER_STATE_PATH/volumes/* - if [[ "$os_PACKAGE" = "deb" ]]; then - stop_service tgt - else - stop_service tgtd - fi +# Get the iSCSI volumes +if is_service_enabled cinder; then + cleanup_cinder fi if [[ -n "$UNSTACK_ALL" ]]; then @@ -115,8 +106,7 @@ if [[ -n "$UNSTACK_ALL" ]]; then fi fi -# Quantum dhcp agent runs dnsmasq -if is_service_enabled q-dhcp; then - pid=$(ps aux | awk '/[d]nsmasq.+interface=tap/ { print $2 }') - [ ! -z "$pid" ] && sudo kill -9 $pid +if is_service_enabled quantum; then + stop_quantum + stop_quantum_third_party fi