From: Dan Radez Date: Thu, 30 Apr 2015 01:43:59 +0000 (+0000) Subject: Merge "Create Compass build script." X-Git-Tag: arno.2015.1.0~62 X-Git-Url: https://gerrit.opnfv.org/gerrit/gitweb?a=commitdiff_plain;h=4266577cd0a7c0816cd3bf951522c7ad1633f02b;hp=80b8e7cfe6768f07c11fcf961f645eda8753bc22;p=genesis.git Merge "Create Compass build script." --- diff --git a/common/puppet-opnfv/manifests/compute.pp b/common/puppet-opnfv/manifests/compute.pp index 7bba609..0b81757 100644 --- a/common/puppet-opnfv/manifests/compute.pp +++ b/common/puppet-opnfv/manifests/compute.pp @@ -28,8 +28,6 @@ class opnfv::compute { } ##Common Parameters - if $ovs_tunnel_if == '' { fail('ovs_tunnel_if is empty') } - if !$rbd_secret_uuid { $rbd_secret_uuid = '3b519746-4021-4f72-957e-5b9d991723be' } if !$private_subnet { fail('private_subnet is empty')} if !$ceph_public_network { $ceph_public_network = $private_subnet } @@ -54,6 +52,7 @@ class opnfv::compute { ##HA Global params if $ha_flag { + if $private_network == '' { fail('private_network is empty') } if !$keystone_private_vip { fail('keystone_private_vip is empty') } if !$glance_private_vip { fail('glance_private_vip is empty') } if !$nova_private_vip { fail('nova_private_vip is empty') } @@ -73,8 +72,13 @@ class opnfv::compute { if !$ceph_mon_initial_members { $ceph_mon_initial_members = $controllers_hostnames_array } if !$ceph_mon_host { $ceph_mon_host = $controllers_ip_array } if !$neutron_private_vip { fail('neutron_private_vip is empty') } + + ##Find private interface + $ovs_tunnel_if = get_nic_from_network("$private_network") + } else { ##non HA params + if $ovs_tunnel_if == '' { fail('ovs_tunnel_if is empty') } if !$private_ip { fail('private_ip is empty') } $keystone_private_vip = $private_ip $glance_private_vip = $private_ip diff --git a/common/puppet-opnfv/manifests/controller_networker.pp b/common/puppet-opnfv/manifests/controller_networker.pp index 6888850..cff258d 100644 --- a/common/puppet-opnfv/manifests/controller_networker.pp +++ b/common/puppet-opnfv/manifests/controller_networker.pp @@ -30,7 +30,6 @@ class opnfv::controller_networker { ##Mandatory Common variables if $admin_email == '' { fail('admin_email is empty') } - if $ovs_tunnel_if == '' { fail('ovs_tunnel_if is empty') } ##Most users will only care about a single user/password for all services ##so lets create one variable that can be used instead of separate usernames/passwords @@ -73,7 +72,10 @@ class opnfv::controller_networker { if !$nova_admin_vip { fail('nova_admin_vip is empty') } if !$nova_private_vip { fail('nova_private_vip is empty') } if !$nova_public_vip { fail('nova_public_vip is empty') } + if $private_network == '' { fail('private_network is empty') } + ##Find private interface + $ovs_tunnel_if = get_nic_from_network("$private_network") ##Optional HA variables if !$amqp_username { $amqp_username = $single_username } @@ -102,7 +104,11 @@ class opnfv::controller_networker { if !$pcmk_server_addrs {$pcmk_server_addrs = $controllers_ip_array} if !$pcmk_server_names {$pcmk_server_names = ["pcmk-${controllers_hostnames_array[0]}", "pcmk-${controllers_hostnames_array[1]}", "pcmk-${controllers_hostnames_array[2]}"] } if !$rbd_secret_uuid { $rbd_secret_uuid = '3b519746-4021-4f72-957e-5b9d991723be' } - if !$storage_iface { $storage_iface = $ovs_tunnel_if } + if !$storage_network { + $storage_iface = $ovs_tunnel_if + } else { + $storage_iface = get_nic_from_network("$storage_network") + } ##we assume here that if not provided, the first controller is where ODL will reside ##this is fine for now as we will replace ODL with ODL HA when it is ready @@ -273,7 +279,7 @@ class opnfv::controller_networker { } } else { - + if $ovs_tunnel_if == '' { fail('ovs_tunnel_if is empty') } if $public_ip == '' { fail('public_ip is empty') } if $private_ip == '' { fail('private_ip is empty') } diff --git a/compass/ci/deploy.sh b/compass/ci/deploy.sh old mode 100644 new mode 100755 index e69de29..fe754aa --- a/compass/ci/deploy.sh +++ b/compass/ci/deploy.sh @@ -0,0 +1,5 @@ +SCRIPT_DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd ) +CONF_NAME=$1 +source ${SCRIPT_DIR}/../deploy/prepare.sh || exit $? +source ${SCRIPT_DIR}/../deploy/setup-env.sh || exit $? +source ${SCRIPT_DIR}/../deploy/deploy-vm.sh || exit $? diff --git a/compass/deploy/conf/base.conf b/compass/deploy/conf/base.conf new file mode 100644 index 0000000..8362b9a --- /dev/null +++ b/compass/deploy/conf/base.conf @@ -0,0 +1,60 @@ +export COMPASS_SERVER_URL="http://10.1.0.12/api" +export COMPASS_USER_EMAIL="admin@huawei.com" +export COMPASS_USER_PASSWORD="admin" +export CLUSTER_NAME="opnfv2" +export LANGUAGE="EN" +export TIMEZONE="America/Los_Angeles" +export NTP_SERVER="10.1.0.12" +export NAMESERVERS="10.1.0.12" +export DOMAIN="ods.com" +export PARTITIONS="/home=5%,/tmp=5%,/var=20%" +export SUBNETS="10.1.0.0/24,172.16.2.0/24,172.16.3.0/24,172.16.4.0/24" +export MANAGEMENT_IP_START=${MANAGEMENT_IP_START:-'10.1.0.50'} +export TENANT_IP_START=${TENANT_IP_START:-'172.16.2.50'} +export PUBLIC_IP_START=${PUBLIC_IP_START:-'172.16.3.50'} +export STORAGE_IP_START=${STORAGE_IP_START:-'172.16.4.50'} +export MANAGEMENT_INTERFACE=${MANAGEMENT_INTERFACE:-eth0} +export TENANT_INTERFACE=${TENANT_INTERFACE:-eth1} +export STORAGE_INTERFACE=${STORAGE_INTERFACE:-eth3} +export PUBLIC_INTERFACE=${PUBLIC_INTERFACE:-eth2} + +function next_ip { + ip_addr=$1 + ip_base="$(echo $ip_addr | cut -d. -f'1 2 3')" + ip_last="$(echo $ip_addr | cut -d. -f4)" + let ip_last_next=$ip_last+1 + echo "${ip_base}.${ip_last_next}" +} + +if [ -z "$HOST_NETWORKS" ]; then + IFS=, read -a HOSTNAME_LIST <<< "$HOSTNAMES" + MANAGE_IP=${MANAGEMENT_IP_START} + TENANT_IP=${TENANT_IP_START} + PUBLIC_IP=${PUBLIC_IP_START} + STORAGE_IP=${STORAGE_IP_START} + for HOSTNAME in ${HOSTNAME_LIST[@]}; do + if [ -z "$HOST_NETWORKS" ]; then + HOST_NETWORKS="${HOSTNAME}:${MANAGEMENT_INTERFACE}=${MANAGE_IP}|is_mgmt,${TENANT_INTERFACE}=${TENANT_IP},${PUBLIC_INTERFACE}=${PUBLIC_IP}|is_promiscuous,${STORAGE_INTERFACE}=${STORAGE_IP}" + else + HOST_NETWORKS="${HOST_NETWORKS};${HOSTNAME}:${MANAGEMENT_INTERFACE}=${MANAGE_IP}|is_mgmt,${TENANT_INTERFACE}=${TENANT_IP},${PUBLIC_INTERFACE}=${PUBLIC_IP}|is_promiscuous,${STORAGE_INTERFACE}=${STORAGE_IP}" + fi + MANAGE_IP=$(next_ip ${MANAGE_IP}) + TENANT_IP=$(next_ip ${TENANT_IP}) + PUBLIC_IP=$(next_ip ${PUBLIC_IP}) + STORAGE_IP=$(next_ip ${STORAGE_IP}) + done + export HOST_NETWORKS +fi + +export NETWORK_MAPPING=${NETWORK_MAPPING:-"management=${MANAGEMENT_INTERFACE},tenant=${TENANT_INTERFACE},storage=${STORAGE_INTERFACE},external=${PUBLIC_INTERFACE}"} + +export PROXY="" +export IGNORE_PROXY="" +export SEARCH_PATH="ods.com" +export GATEWAY="10.1.0.1" +export SERVER_CREDENTIAL="root=root" +export LOCAL_REPO_URL="" +export OS_CONFIG_FILENAME="" +export SERVICE_CREDENTIALS="image:service=service,compute:service=service,dashboard:service=service,identity:service=service,metering:service=service,rabbitmq:service=service,volume:service=service,mysql:service=service" +export CONSOLE_CREDENTIALS="admin:console=console,compute:console=console,dashboard:console=console,image:console=console,metering:console=console,network:console=console,object-store:console=console,volume:console=console" +export PACKAGE_CONFIG_FILENAME="" diff --git a/compass/deploy/conf/five.conf b/compass/deploy/conf/five.conf new file mode 100644 index 0000000..e63e514 --- /dev/null +++ b/compass/deploy/conf/five.conf @@ -0,0 +1,19 @@ +export VIRT_NUMBER=5 +export VIRT_CPUS=4 +export VIRT_MEM=4096 +export VIRT_DISK=30G +export 'ADAPTER_OS_PATTERN=(?i)ubuntu-14\.04.*' +#export 'ADAPTER_OS_PATTERN=(?i)centos-6\.5.*' +export ADAPTER_NAME="openstack_juno" +export ADAPTER_TARGET_SYSTEM_PATTERN="^openstack$" +export ADAPTER_FLAVOR_PATTERN="single-controller" +export HOSTNAMES="host1,host2,host3,host4,host5" +export HOST_ROLES="host1=controller,network;host2=compute,storage;host3=compute,storage;host4=compute,storage;host5=compute,storage" +export DEFAULT_ROLES="" +export SWITCH_IPS="1.1.1.1" +export SWITCH_CREDENTIAL="version=2c,community=public" +export DEPLOYMENT_TIMEOUT="90" +export POLL_SWITCHES_FLAG="nopoll_switches" +export DASHBOARD_URL="" +export REGTEST_DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd ) +source ${REGTEST_DIR}/base.conf diff --git a/compass/deploy/deploy-vm.sh b/compass/deploy/deploy-vm.sh new file mode 100644 index 0000000..18857cd --- /dev/null +++ b/compass/deploy/deploy-vm.sh @@ -0,0 +1,45 @@ +cd .. +rm -rf compass-core +git clone http://git.openstack.org/stackforge/compass-core -b dev/experimental +cd compass-core +virtualenv venv +source venv/bin/activate +pip install -i http://pypi.douban.com/simple -e . +if [[ ! -f /var/log/compass ]]; then + sudo mkdir /var/log/compass + sudo chown -R 777 /var/log/compass +fi +if [[ ! -f /etc/compass ]]; then + sudo mkdir /etc/compass + sudo cp -rf conf/setting /etc/compass/. +fi +cp bin/switch_virtualenv.py.template bin/switch_virtualenv.py +sed -i "s|\$PythonHome|$VIRTUAL_ENV|g" bin/switch_virtualenv.py +#source ../compass-install/ci/allinone.conf +bin/client.py --logfile= --loglevel=debug --logdir= --compass_server="${COMPASS_SERVER_URL}" \ +--compass_user_email="${COMPASS_USER_EMAIL}" --compass_user_password="${COMPASS_USER_PASSWORD}" \ +--cluster_name="${CLUSTER_NAME}" --language="${LANGUAGE}" --timezone="${TIMEZONE}" \ +--hostnames="${HOSTNAMES}" --partitions="${PARTITIONS}" --subnets="${SUBNETS}" \ +--adapter_os_pattern="${ADAPTER_OS_PATTERN}" --adapter_name="${ADAPTER_NAME}" \ +--adapter_target_system_pattern="${ADAPTER_TARGET_SYSTEM_PATTERN}" \ +--adapter_flavor_pattern="${ADAPTER_FLAVOR_PATTERN}" \ +--http_proxy="${PROXY}" --https_proxy="${PROXY}" --no_proxy="${IGNORE_PROXY}" \ +--ntp_server="${NTP_SERVER}" --dns_servers="${NAMESERVERS}" --domain="${DOMAIN}" \ +--search_path="${SEARCH_PATH}" --default_gateway="${GATEWAY}" \ +--server_credential="${SERVER_CREDENTIAL}" --local_repo_url="${LOCAL_REPO_URL}" \ +--os_config_json_file="${OS_CONFIG_FILENAME}" --service_credentials="${SERVICE_CREDENTIALS}" \ +--console_credentials="${CONSOLE_CREDENTIALS}" --host_networks="${HOST_NETWORKS}" \ +--network_mapping="${NETWORK_MAPPING}" --package_config_json_file="${PACKAGE_CONFIG_FILENAME}" \ +--host_roles="${HOST_ROLES}" --default_roles="${DEFAULT_ROLES}" --switch_ips="${SWITCH_IPS}" \ +--machines=${machines//\'} --switch_credential="${SWITCH_CREDENTIAL}" \ +--deployment_timeout="${DEPLOYMENT_TIMEOUT}" --${POLL_SWITCHES_FLAG} --dashboard_url="${DASHBOARD_URL}" +deploy_result=$? +tear_down_machines +cd ../compass-install +sudo vagrant destroy compass_nodocker +if [[ $deploy_result != 0 ]]; then + echo "deployment failed" + exit 1 +else + echo "deployment complete" +fi diff --git a/compass/deploy/func.sh b/compass/deploy/func.sh new file mode 100644 index 0000000..29c2c23 --- /dev/null +++ b/compass/deploy/func.sh @@ -0,0 +1,20 @@ +function tear_down_machines() { + virtmachines=$(virsh list --name |grep pxe) + for virtmachine in $virtmachines; do + echo "destroy $virtmachine" + virsh destroy $virtmachine + if [[ "$?" != "0" ]]; then + echo "destroy instance $virtmachine failed" + exit 1 + fi + done + virtmachines=$(virsh list --all --name |grep pxe) + for virtmachine in $virtmachines; do + echo "undefine $virtmachine" + virsh undefine $virtmachine + if [[ "$?" != "0" ]]; then + echo "undefine instance $virtmachine failed" + exit 1 + fi + done +} diff --git a/compass/deploy/mac_generator.sh b/compass/deploy/mac_generator.sh new file mode 100755 index 0000000..ca898cb --- /dev/null +++ b/compass/deploy/mac_generator.sh @@ -0,0 +1,23 @@ +#!/bin/bash +function mac_address_part() { + hex_number=$(printf '%02x' $RANDOM) + number_length=${#hex_number} + number_start=$(expr $number_length - 2) + echo ${hex_number:$number_start:2} +} + +function mac_address() { + echo "'00:00:$(mac_address_part):$(mac_address_part):$(mac_address_part):$(mac_address_part)'" +} + +machines='' +for i in `seq $1`; do + mac=$(mac_address) + + if [[ -z $machines ]]; then + machines="${mac}" + else + machines="${machines} ${mac}" + fi +done +echo ${machines} diff --git a/compass/deploy/prepare.sh b/compass/deploy/prepare.sh new file mode 100644 index 0000000..2086c5d --- /dev/null +++ b/compass/deploy/prepare.sh @@ -0,0 +1,35 @@ +sudo apt-get update -y +sudo apt-get install git python-pip python-dev -y +vagrant --version +if [[ $? != 0 ]]; then + vagrant_pkg_url=https://dl.bintray.com/mitchellh/vagrant/vagrant_1.7.2_x86_64.deb + wget ${vagrant_pkg_url} + sudo dpkg -i $(basename ${vagrant_pkg_url}) +else + echo "vagrant is already installed" +fi +sudo apt-get install libxslt-dev libxml2-dev libvirt-dev build-essential qemu-utils qemu-kvm libvirt-bin virtinst libmysqld-dev -y +sudo service libvirt-bin restart + +for plugin in vagrant-libvirt vagrant-mutate; do + vagrant plugin list |grep $plugin + if [[ $? != 0 ]]; then + vagrant plugin install $plugin --plugin-source https://ruby.taobao.org + else + echo "$plugin plugin is already installed" + fi +done +sudo pip install --upgrade ansible virtualenv +#precise_box_vb_url=https://cloud-images.ubuntu.com/vagrant/precise/current/precise-server-cloudimg-amd64-vagrant-disk1.box +#precise_box_vb_filename=$(basename ${precise_box_vb_url}) +centos65_box_vb_url=https://developer.nrel.gov/downloads/vagrant-boxes/CentOS-6.5-x86_64-v20140504.box +centos65_box_vb_filename=$(basename ${centos65_box_vb_url}) +#wget ${precise_box_vb_url} +vagrant box list |grep centos65 +if [[ $? != 0 ]]; then + wget ${centos65_box_vb_url} + mv ${centos65_box_vb_filename} centos65.box + vagrant mutate centos65.box libvirt +else + echo "centos65 box already exists" +fi diff --git a/compass/deploy/setup-env.sh b/compass/deploy/setup-env.sh new file mode 100644 index 0000000..ffa9aa5 --- /dev/null +++ b/compass/deploy/setup-env.sh @@ -0,0 +1,61 @@ +rm -rf compass-install +git clone http://git.openstack.org/stackforge/compass-install +cd compass-install + +function join { local IFS="$1"; shift; echo "$*"; } +source ${SCRIPT_DIR}/../deploy/conf/${CONF_NAME}.conf +source ${SCRIPT_DIR}/../deploy/func.sh +if [[ ! -z $VIRT_NUMBER ]]; then + mac_array=$(${SCRIPT_DIR}/../deploy/mac_generator.sh $VIRT_NUMBER) + mac_list=$(join , $mac_array) + echo "pxe_boot_macs: [${mac_list}]" >> install/group_vars/all + echo "test: true" >> install/group_vars/all +fi +virsh list |grep compass +if [[ $? == 0 ]]; then + compass_old=`virsh list |grep compass|awk '{print$2}'` + virsh destroy ${compass_old} + virsh undefine ${compass_old} +fi +sudo vagrant up compass_nodocker +if [[ $? != 0 ]]; then + echo "installation of compass failed" + sudo vagrant destroy compass_nodocker + exit 1 +fi +echo "compass is up" + +tear_down_machines +if [[ -n $mac_array ]]; then + echo "bringing up pxe boot vms" + i=0 + for mac in $mac_array; do + echo "creating vm disk for instance pxe${i}" + sudo qemu-img create -f raw /home/pxe${i}.raw ${VIRT_DISK} + sudo virt-install --accelerate --hvm --connect qemu:///system \ + --name pxe$i --ram=$VIRT_MEM --pxe --disk /home/pxe$i.raw,format=raw \ + --vcpus=$VIRT_CPUS --graphics vnc,listen=0.0.0.0 \ + --network=bridge:virbr2,mac=$mac \ + --network=bridge:virbr2 \ + --network=bridge:virbr2 \ + --network=bridge:virbr2 \ + --noautoconsole --autostart --os-type=linux --os-variant=rhel6 + if [[ $? != 0 ]]; then + echo "launching pxe${i} failed" + exit 1 + fi + echo "checking pxe${i} state" + state=$(virsh domstate pxe${i}) + if [[ "$state" == "running" ]]; then + echo "pxe${i} is running" + sudo virsh destroy pxe${i} + fi + echo "add network boot option and make pxe${i} reboot if failing" + sudo sed -i "// a\ " /etc/libvirt/qemu/pxe${i}.xml + sudo sed -i "// a\ " /etc/libvirt/qemu/pxe${i}.xml + sudo virsh define /etc/libvirt/qemu/pxe${i}.xml + sudo virsh start pxe${i} + let i=i+1 + done +fi +machines=${mac_list} diff --git a/foreman/ci/deploy.sh b/foreman/ci/deploy.sh index 82fbef7..49e1590 100755 --- a/foreman/ci/deploy.sh +++ b/foreman/ci/deploy.sh @@ -280,6 +280,9 @@ for interface in ${output}; do private_subnet_mask=$subnet_mask private_short_subnet_mask=$(find_short_netmask $interface) fi + if [ "$if_counter" -eq 3 ]; then + storage_subnet_mask=$subnet_mask + fi sed -i 's/^.*eth_replace'"$if_counter"'.*$/ config.vm.network "public_network", ip: '\""$new_ip"\"', bridge: '\'"$interface"\'', netmask: '\""$subnet_mask"\"'/' Vagrantfile ((if_counter++)) done @@ -343,7 +346,6 @@ sed -i 's/^.*default_gw:.*$/default_gw:'" $defaultgw"'/' opnfv_ksgen_settings.ym ##we add biosdevname=0, net.ifnames=0 to the kickstart to use regular interface naming convention on hosts ##replace IP for parameters with next IP that will be given to controller if [ "$deployment_type" == "single_network" ]; then - sed -i 's/^.*ovs_tunnel_if:.*$/ ovs_tunnel_if: eth0/' opnfv_ksgen_settings.yml ##we also need to assign IP addresses to nodes ##for single node, foreman is managing the single network, so we can't reserve them ##not supporting single network anymore for now @@ -351,13 +353,9 @@ if [ "$deployment_type" == "single_network" ]; then exit 0 elif [[ "$deployment_type" == "multi_network" || "$deployment_type" == "three_network" ]]; then - sed -i 's/^.*ovs_tunnel_if:.*$/ ovs_tunnel_if: eth1/' opnfv_ksgen_settings.yml if [ "$deployment_type" == "three_network" ]; then - sed -i 's/^.*storage_iface:.*$/ storage_iface: eth1/' opnfv_ksgen_settings.yml sed -i 's/^.*network_type:.*$/network_type: three_network/' opnfv_ksgen_settings.yml - else - sed -i 's/^.*storage_iface:.*$/ storage_iface: eth3/' opnfv_ksgen_settings.yml fi ##get ip addresses for private network on controllers to make dhcp entries @@ -392,8 +390,10 @@ elif [[ "$deployment_type" == "multi_network" || "$deployment_type" == "three_ne fi done - ##replace public_vips + ##replace foreman site next_public_ip=${interface_ip_arr[2]} + sed -i 's/^.*foreman_url:.*$/ foreman_url:'" https:\/\/$next_public_ip"'\/api\/v2\//' opnfv_ksgen_settings.yml + ##replace public vips next_public_ip=$(increment_ip $next_public_ip 10) grep -E '*public_vip' opnfv_ksgen_settings.yml | while read -r line ; do sed -i 's/^.*'"$line"'.*$/ '"$line $next_public_ip"'/' opnfv_ksgen_settings.yml @@ -404,8 +404,19 @@ elif [[ "$deployment_type" == "multi_network" || "$deployment_type" == "three_ne fi done - ##replace private_subnet param + ##replace private_network param private_subnet=$(find_subnet $next_private_ip $private_subnet_mask) + sed -i 's/^.*private_network:.*$/ private_network:'" $private_subnet"'/' opnfv_ksgen_settings.yml + ##replace storage_network + if [ "$deployment_type" == "three_network" ]; then + sed -i 's/^.*storage_network:.*$/ storage_network:'" $private_subnet"'/' opnfv_ksgen_settings.yml + else + next_storage_ip=${interface_ip_arr[3]} + storage_subnet=$(find_subnet $next_storage_ip $storage_subnet_mask) + sed -i 's/^.*storage_network:.*$/ storage_network:'" $storage_subnet"'/' opnfv_ksgen_settings.yml + fi + + ##replace private_subnet param private_subnet=$private_subnet'\'$private_short_subnet_mask sed -i 's/^.*private_subnet:.*$/ private_subnet:'" $private_subnet"'/' opnfv_ksgen_settings.yml else diff --git a/foreman/docs/src/release-notes.rst b/foreman/docs/src/release-notes.rst new file mode 100644 index 0000000..cb6db30 --- /dev/null +++ b/foreman/docs/src/release-notes.rst @@ -0,0 +1,195 @@ +:Authors: Tim Rozet (trozet@redhat.com) +:Version: 0.1 + +================================================================ +OPNFV Release Note for "Arno-RC2 release candidate" - Foreman/QuickStack@OPNFV +================================================================ + +Abstract +======== + +This document provides the release notes for ARNO-RC2 release candidate of Foreman/QuickStack@OPNFV. + +License +======= +All Foreman/QuickStack and "common" entities are protected by the Apache License ( http://www.apache.org/licenses/ ) + +**Contents** + +1 Version History + +2 Important notes + +3 Summary + +4 Delivery Data + +5 Known Limitations, Issues and Workarounds + +6 Test Result + +7 References + +1 Version history +=================== + ++--------------------+--------------------+--------------------+--------------------+ +| **Date** | **Ver.** | **Author** | **Comment** | +| | | | | ++--------------------+--------------------+--------------------+--------------------+ +| 2015-04-16 | 0.1.0 | Tim Rozet | First draft | +| | | | | ++--------------------+--------------------+--------------------+--------------------+ + +2 Important notes +=================== + +This is the first OPNFV Arno pre-release that implements the deploy stage of the OPNFV CI pipeline. + +Carefully follow the installation-instructions which guide a user on how to deploy OPNFV using Foreman/QuickStack installer. + +3 Summary +=========== + +Arno Foreman/QuickStack@OPNFV is an installer capable of setting up an OPNFV target system. The current definition of an OPNFV target system is OpenStack Juno upstream project versioncombined with OpenDaylight version: Helium. The system is deployed with OpenStack High Availability (HA) for most OpenStack services. OpenDaylight is deployed in non-HA form as HA is not availble for Arno release. Ceph storage is used as Cinder backend, and is the only supported storage for Arno. Ceph is setup as 3 OSDs and 3 Monitors, one OSD+Mon per Controller node. + +This Arno pre-release of Foreman/QuickStack@OPNFV adds the deploy stage of the OPNFV CI pipeline + +- Documentation is built by Jenkins +- .iso image is built by Jenkins +- Jenkins deploys an Foreman/QuickStack@OPNFV stack to baremetal, which includes 3 control+network nodes, and 2 compute nodes. + +Automatic test of the deployed system is not part of this pre-release. + +4 Release Data +================ + ++--------------------------------------+--------------------------------------+ +| **Project** | Arno/genesis/bgs | +| | | ++--------------------------------------+--------------------------------------+ +| **Repo/tag** | genesis/arno-rc2 | +| | | ++--------------------------------------+--------------------------------------+ +| **Release designation** | Arno RC2 | +| | | ++--------------------------------------+--------------------------------------+ +| **Release date** | 2015-04-23 | +| | | ++--------------------------------------+--------------------------------------+ +| **Purpose of the delivery** | OPNFV Internal quality assurance | +| | and CI Pipline dry-run | +| | | ++--------------------------------------+--------------------------------------+ + +4.1 Version change +------------------ + +4.1.1 Module version changes +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +This is the first tracked version of genesis-Foreman/QuickStack. It is based on following upstream versions: + +- OpenStack (Juno release) + +- OpenDaylight Helium-SR2 + +- CentOS 7 + +4.1.2 Document version changes +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +This is the first tracked version of genesis-Foreman/QuickStack. It comes with the following documentation: + +- OPNFV Installation instructions for - Foreman/QuickStack@OPNFV - ver. 0.0.1 +- OPNFV Release Note for "Arno-RC2 release candidate" - Foreman/QuickStack@OPNFV - ver. 0.1 (this document) + +4.2 Reason for version +---------------------- +4.2.1 Feature additions +~~~~~~~~~~~~~~~~~~~~~~~ + ++--------------------------------------+--------------------------------------+ +| **JIRA REFERENCE** | **SLOGAN** | +| | | ++--------------------------------------+--------------------------------------+ +| JIRA: BGS-4 | OPNFV base system install | +| | using Foreman/Quickstack. | ++--------------------------------------+--------------------------------------+ + +4.2.2 Bug corrections +~~~~~~~~~~~~~~~~~~~~~ + +**JIRA TICKETS:** + ++--------------------------------------+--------------------------------------+ +| **JIRA REFERENCE** | **SLOGAN** | +| | | ++--------------------------------------+--------------------------------------+ +| | | +| | | ++--------------------------------------+--------------------------------------+ + +4.3 Deliverables +---------------- + +4.3.1 Software deliverables +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Foreman/QuickStack@OPNFV .iso file +deploy.sh - Automatically deploys Target OPNFV System to Bare Metal + +4.3.2 Documentation deliverables +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +- OPNFV Installation instructions for - Foreman/QuickStack@OPNFV - ver. 0.0.1 +- OPNFV Release Note for "Arno-RC2 release candidate" - Foreman/QuickStack@OPNFV - ver. 0.1 (this document) + +5 Known Limitations, Issues and Workarounds +============================================ + +5.1 System Limitations +------------------------- + +**Max number of blades:** 1 Foreman/QuickStack master, 3 Controllers, 20 Compute blades + +**Min number of blades:** 1 Foreman/QuickStack master, 1 Controller, 1 Compute blade + +**Storage:** Ceph is the only supported storage configuration. + +**Min master requirements:** At least 2048 MB of RAM + + +5.2 Known issues +------------------- + +**JIRA TICKETS:** + ++--------------------------------------+--------------------------------------+ +| **JIRA REFERENCE** | **SLOGAN** | +| | | ++--------------------------------------+--------------------------------------+ +| JIRA: BGS-13 | bridge br-ex is not auto configured | +| | by puppet | ++--------------------------------------+--------------------------------------+ + +5.3 Workarounds +------------------ +**-** + + +6 Test Result +============== + +Foreman/QuickStack@OPNFV Arno RC2 has undergone QA test runs with the following results: + ++--------------------------------------+--------------------------------------+ +| **TEST-SUITE** | **Results:** | +| | | ++--------------------------------------+--------------------------------------+ +| **-** | **-** | ++--------------------------------------+--------------------------------------+ + + +7 References +============= + +For more information on the OPNFV Arno release, please see: + +http://wiki.opnfv.org/release/arno diff --git a/fuel/LICENCE b/fuel/LICENCE deleted file mode 100644 index 8b13789..0000000 --- a/fuel/LICENCE +++ /dev/null @@ -1 +0,0 @@ - diff --git a/fuel/LICENSE.rst b/fuel/LICENSE.rst new file mode 100644 index 0000000..9537658 --- /dev/null +++ b/fuel/LICENSE.rst @@ -0,0 +1,85 @@ +Copyright 2015 Open Platform for NFV Project, Inc. and its contributors + +Open Platform for NFV Project Software Licence +============================================== +Any software developed by the "Open Platform for NFV" Project is licenced under the +Apache License, Version 2.0 (the "License"); +you may not use the content of this software bundle except in compliance with the License. +You may obtain a copy of the License at + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +Open Platform for NFV Project Documentation Licence +=================================================== +Any documentation developed by the "Open Platform for NFV Project" +is licensed under a Creative Commons Attribution 4.0 International License. +You should have received a copy of the license along with this. If not, +see . + +Unless required by applicable law or agreed to in writing, documentation +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +Other applicable upstream project Licenses relevant for Fuel@OPNFV +================================================================== +You may not use the content of this software bundle except in compliance with the +Licenses as listed below: + ++----------------+-----------------------------------------------------+ +| **Component** | **Licence** | ++----------------+-----------------------------------------------------+ +| OpenStack | Apache License 2.0 | +| | https://www.apache.org/licenses/LICENSE-2.0 | ++----------------+-----------------------------------------------------+ +| OpenDaylight | Eclipse Public License 1.0 | +| | https://www.eclipse.org/legal/epl-v10.html | ++----------------+-----------------------------------------------------+ +| PostgreSQL | PostgreSQL Licence: | +| | http://opensource.org/licenses/postgresql | ++----------------+-----------------------------------------------------+ +| MongoDB | GNU AGPL v3.0. | +| | http://www.fsf.org/licensing/licenses/agpl-3.0.html | ++----------------+-----------------------------------------------------+ +| CoroSync | BSD 2-Clause | +| | http://opensource.org/licenses/bsd-license.php | ++----------------+-----------------------------------------------------+ +| Pacemaker | GPL v2 | +| | https://www.gnu.org/licenses/gpl-2.0.html | ++----------------+-----------------------------------------------------+ +| RabbitMQ | Mozilla Public License | +| | https://www.rabbitmq.com/mpl.html | ++----------------+-----------------------------------------------------+ +| Linux | GPLv3 | +| | https://www.gnu.org/copyleft/gpl.html | ++----------------+-----------------------------------------------------+ +| Docker | Apache License 2.0 | +| | https://www.apache.org/licenses/LICENSE-2.0 ++----------------+-----------------------------------------------------+ +| Fuel | Apache License 2.0 | +| | https://www.apache.org/licenses/LICENSE-2.0 | ++----------------+-----------------------------------------------------+ +| OpenJDK/JRE | GPL v2 | +| | https://www.gnu.org/licenses/gpl-2.0.html | ++----------------+-----------------------------------------------------+ +| Ceph | GPL v2 | +| | https://www.gnu.org/licenses/gpl-2.0.html | ++----------------+-----------------------------------------------------+ +| Puppet | Apache License 2.0 | +| | https://www.apache.org/licenses/LICENSE-2.0 | ++----------------+-----------------------------------------------------+ +| Cobbler | GPL v2 | +| | https://www.gnu.org/licenses/gpl-2.0.html | ++----------------+-----------------------------------------------------+ +| Nailgun | Apache License 2.0 | +| | https://www.apache.org/licenses/LICENSE-2.0 | ++----------------+-----------------------------------------------------+ +| Astute | Apache License 2.0 | +| | https://www.apache.org/licenses/LICENSE-2.0 | ++----------------+-----------------------------------------------------+ + diff --git a/fuel/TODO b/fuel/TODO index 906dfb5..7aa42d2 100644 --- a/fuel/TODO +++ b/fuel/TODO @@ -4,6 +4,7 @@ ######################################################################### Following items needs to be done to achieve an OPNFV/BGS ARNO Fuel Stack: 1) Add support for CentOS 6.5 - REMAINING -2) Add Autodeployment "deploy.sh" for Jenkins -3) Dry-run Funktest (Jenkins/Robot/etc.) -4) Finalize Documentation \ No newline at end of file +2) Add Local GIT repo mirror +3) Add Auto-deployment for Linux-Foundation Lab. +4) Dry-run Funktest (Jenkins/Robot/etc.) +5) Finalize Documentation \ No newline at end of file diff --git a/fuel/build/f_ntp/puppet/modules/opnfv/manifests/ntp.pp b/fuel/build/f_ntp/puppet/modules/opnfv/manifests/ntp.pp index 8180e3d..c5dce1b 100644 --- a/fuel/build/f_ntp/puppet/modules/opnfv/manifests/ntp.pp +++ b/fuel/build/f_ntp/puppet/modules/opnfv/manifests/ntp.pp @@ -31,6 +31,15 @@ class opnfv::ntp( $file='/etc/ntp.conf' ) { + case $::operatingsystem { + centos, redhat: { + $service_name = 'ntpd' + } + debian, ubuntu: { + $service_name = 'ntp' + } + } + if $::fuel_settings['role'] { if ($::fuel_settings['opnfv'] and $::fuel_settings['opnfv']['ntp']) { @@ -63,9 +72,9 @@ class opnfv::ntp( service { 'ntp': ensure => running, + name => $service_name, enable => true, require => [ Package['ntp'], File[$file]] } } } - diff --git a/fuel/build/f_odl_docker/puppet/modules/opnfv/manifests/odl_docker.pp b/fuel/build/f_odl_docker/puppet/modules/opnfv/manifests/odl_docker.pp index 922ab41..7370169 100644 --- a/fuel/build/f_odl_docker/puppet/modules/opnfv/manifests/odl_docker.pp +++ b/fuel/build/f_odl_docker/puppet/modules/opnfv/manifests/odl_docker.pp @@ -1,44 +1,45 @@ class opnfv::odl_docker { - case $::fuel_settings['role'] { - /controller/: { - - file { "/opt": - ensure => "directory", - } - - file { "/opt/opnfv": - ensure => "directory", - owner => "root", - group => "root", - mode => 777, - } - - file { "/opt/opnfv/odl": - ensure => "directory", - } - - file { "/opt/opnfv/odl/odl_docker_image.tar": - ensure => present, - source => "/etc/puppet/modules/opnfv/odl_docker/odl_docker_image.tar", - mode => 750, - } - - file { "/opt/opnfv/odl/docker-latest": - ensure => present, - source => "/etc/puppet/modules/opnfv/odl_docker/docker-latest", - mode => 750, - } - - file { "/opt/opnfv/odl/start_odl_conatiner.sh": - ensure => present, - source => "/etc/puppet/modules/opnfv/scripts/start_odl_container.sh", - mode => 750, - } - - # fix failed to find the cgroup root issue - # https://github.com/docker/docker/issues/8791 - if $::operatingsystem == 'Ubuntu' { + case $::fuel_settings['role'] { + /controller/: { + + file { '/opt': + ensure => 'directory', + } + + file { '/opt/opnfv': + ensure => 'directory', + owner => 'root', + group => 'root', + mode => 777, + } + + file { '/opt/opnfv/odl': + ensure => 'directory', + } + + file { '/opt/opnfv/odl/odl_docker_image.tar': + ensure => present, + source => '/etc/puppet/modules/opnfv/odl_docker/odl_docker_image.tar', + mode => 750, + } + + file { '/opt/opnfv/odl/docker-latest': + ensure => present, + source => '/etc/puppet/modules/opnfv/odl_docker/docker-latest', + mode => 750, + } + + file { '/opt/opnfv/odl/start_odl_conatiner.sh': + ensure => present, + source => '/etc/puppet/modules/opnfv/scripts/start_odl_container.sh', + mode => 750, + } + + # fix failed to find the cgroup root issue + # https://github.com/docker/docker/issues/8791 + case $::operatingsystem { + 'ubuntu': { package {'cgroup-lite': ensure => present, } @@ -49,6 +50,12 @@ class opnfv::odl_docker require => Package['cgroup-lite'], } } + 'centos': { + package {'docker-io': + ensure => latest, + } + } + } + } } - } } diff --git a/fuel/build/f_resolvconf/puppet/modules/opnfv/manifests/resolver.pp b/fuel/build/f_resolvconf/puppet/modules/opnfv/manifests/resolver.pp index be4e67d..44f36a2 100644 --- a/fuel/build/f_resolvconf/puppet/modules/opnfv/manifests/resolver.pp +++ b/fuel/build/f_resolvconf/puppet/modules/opnfv/manifests/resolver.pp @@ -48,16 +48,26 @@ class opnfv::resolver() mode => '0644', content => template('opnfv/resolv.conf.erb'), } -# /etc/resolv.conf is re-generated at each boot by resolvconf, so we -# need to store there as well. - file { '/etc/resolvconf/resolv.conf.d/head': + + # /etc/resolv.conf is re-generated at each boot by resolvconf, so we + # need to store there as well. + + case $::operatingsystem { + 'ubuntu': { + file { '/etc/resolvconf/resolv.conf.d/head': owner => root, group => root, mode => '0644', content => template('opnfv/resolv.conf.erb'), + } + } + 'centos': { + exec { 'for file in ifcfg-eth*; do grep -q -F "PEERDNS=" $file || echo "PEERDNS=no" >> $file; done ': + provider => 'shell', + cwd => '/etc/sysconfig/network-scripts', + } + } } } } } - - diff --git a/fuel/prototypes/libvirt/README.rst b/fuel/deploy/README.rst similarity index 89% rename from fuel/prototypes/libvirt/README.rst rename to fuel/deploy/README.rst index e0ceb6f..f7b5711 100644 --- a/fuel/prototypes/libvirt/README.rst +++ b/fuel/deploy/README.rst @@ -15,18 +15,22 @@ instead. Pre-condition 1: The host needs to be Ubuntu 14.x Pre-condition 2: Necessary packages installed by running -genesis/fuel/prototypes/libvirt/setup_vms/setup-vm-host.sh +sudo genesis/fuel/prototypes/libvirt/setup_vms/setup-vm-host.sh Pre-condition 3: Example VM configuration deployed by running genesis/fuel/prototypes/libvirt/setup_vms/apply_setup.sh The VMs and networks to be setup are in genesis/fuel/prototypes/libvirt/examples: "vms" and "networks" +sudo mkdir /mnt/images +cd setup-vms +sudo ./apply_setup.sh /mnt/images 50 In order to run the automated install, it's just a matter of running genesis/fuel/prototypes/libvirt/deploy.sh [] The deafile will be optional, if not specified the example one in genesis/fuel/prototypes/libvirt/examples/libvirt_dea.yaml will be used. +sudo ./deploy.sh ~/ISO/opnfv-P0000.iso ~/DEPLOY/deploy/dea.yaml Now either this will succeed (return code 0) or fail. I'll have a three hours safety catch to kill off things if something is hanging, diff --git a/fuel/deploy/cloud_deploy/__init__.py b/fuel/deploy/cloud_deploy/__init__.py new file mode 100644 index 0000000..c274feb --- /dev/null +++ b/fuel/deploy/cloud_deploy/__init__.py @@ -0,0 +1 @@ +__author__ = 'eszicse' diff --git a/fuel/deploy/cloud_deploy/cloud/__init__.py b/fuel/deploy/cloud_deploy/cloud/__init__.py new file mode 100644 index 0000000..c274feb --- /dev/null +++ b/fuel/deploy/cloud_deploy/cloud/__init__.py @@ -0,0 +1 @@ +__author__ = 'eszicse' diff --git a/fuel/deploy/cloud_deploy/cloud/common.py b/fuel/deploy/cloud_deploy/cloud/common.py new file mode 100644 index 0000000..365f6fb --- /dev/null +++ b/fuel/deploy/cloud_deploy/cloud/common.py @@ -0,0 +1,51 @@ +import subprocess +import sys +import os +import logging + +N = {'id': 0, 'status': 1, 'name': 2, 'cluster': 3, 'ip': 4, 'mac': 5, + 'roles': 6, 'pending_roles': 7, 'online': 8} +E = {'id': 0, 'status': 1, 'name': 2, 'mode': 3, 'release_id': 4, + 'changes': 5, 'pending_release_id': 6} +R = {'id': 0, 'name': 1, 'state': 2, 'operating_system': 3, 'version': 4} +RO = {'name': 0, 'conflicts': 1} + +LOG = logging.getLogger(__name__) +LOG.setLevel(logging.DEBUG) +formatter = logging.Formatter('%(message)s') +out_handler = logging.StreamHandler(sys.stdout) +out_handler.setFormatter(formatter) +LOG.addHandler(out_handler) +out_handler = logging.FileHandler('autodeploy.log', mode='w') +out_handler.setFormatter(formatter) +LOG.addHandler(out_handler) + +def exec_cmd(cmd): + process = subprocess.Popen(cmd, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + shell=True) + return process.communicate()[0], process.returncode + +def run_proc(cmd): + process = subprocess.Popen(cmd, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + shell=True) + return process + +def parse(printout, *args): + parsed_list = [] + lines = printout[0].splitlines() + for l in lines[2:]: + parsed = [e.strip() for e in l.split('|')] + parsed_list.append(parsed) + return parsed_list + +def err(error_message): + LOG.error(error_message) + sys.exit(1) + +def check_file_exists(file_path): + if not os.path.isfile(file_path): + err('ERROR: File %s not found\n' % file_path) diff --git a/fuel/deploy/cloud_deploy/cloud/configure_environment.py b/fuel/deploy/cloud_deploy/cloud/configure_environment.py new file mode 100644 index 0000000..426bbd1 --- /dev/null +++ b/fuel/deploy/cloud_deploy/cloud/configure_environment.py @@ -0,0 +1,74 @@ +import common +import os +import shutil + +from configure_settings import ConfigureSettings +from configure_network import ConfigureNetwork +from configure_nodes import ConfigureNodes + +N = common.N +E = common.E +R = common.R +RO = common.RO +exec_cmd = common.exec_cmd +parse = common.parse +err = common.err +LOG = common.LOG + +class ConfigureEnvironment(object): + + def __init__(self, dea, yaml_config_dir, release_id, node_id_roles_dict): + self.env_id = None + self.dea = dea + self.yaml_config_dir = yaml_config_dir + self.env_name = dea.get_environment_name() + self.release_id = release_id + self.node_id_roles_dict = node_id_roles_dict + self.required_networks = [] + + def env_exists(self, env_name): + env_list = parse(exec_cmd('fuel env --list')) + for env in env_list: + if env[E['name']] == env_name and env[E['status']] == 'new': + self.env_id = env[E['id']] + return True + return False + + def configure_environment(self): + LOG.debug('Configure environment\n') + if os.path.exists(self.yaml_config_dir): + LOG.debug('Deleting existing config directory %s\n' + % self.yaml_config_dir) + shutil.rmtree(self.yaml_config_dir) + LOG.debug('Creating new config directory %s\n' % self.yaml_config_dir) + os.makedirs(self.yaml_config_dir) + + LOG.debug('Creating environment %s release %s, mode ha, network-mode ' + 'neutron, net-segment-type vlan\n' + % (self.env_name, self.release_id)) + exec_cmd('fuel env create --name %s --release %s --mode ha ' + '--network-mode neutron --net-segment-type vlan' + % (self.env_name, self.release_id)) + + if not self.env_exists(self.env_name): + err("Failed to create environment %s\n" % self.env_name) + self.config_settings() + self.config_network() + self.config_nodes() + + def config_settings(self): + settings = ConfigureSettings(self.yaml_config_dir, self.env_id, + self.dea) + settings.config_settings() + + def config_network(self): + network = ConfigureNetwork(self.yaml_config_dir, self.env_id, self.dea) + network.config_network() + + def config_nodes(self): + nodes = ConfigureNodes(self.yaml_config_dir, self.env_id, + self.node_id_roles_dict, self.dea) + nodes.config_nodes() + + + diff --git a/fuel/deploy/cloud_deploy/cloud/configure_network.py b/fuel/deploy/cloud_deploy/cloud/configure_network.py new file mode 100644 index 0000000..f4d6f87 --- /dev/null +++ b/fuel/deploy/cloud_deploy/cloud/configure_network.py @@ -0,0 +1,62 @@ +import common +import yaml +import io + +N = common.N +E = common.E +R = common.R +RO = common.RO +exec_cmd = common.exec_cmd +parse = common.parse +err = common.err +check_file_exists = common.check_file_exists +LOG = common.LOG + +class ConfigureNetwork(object): + + def __init__(self, yaml_config_dir, env_id, dea): + self.yaml_config_dir = yaml_config_dir + self.env_id = env_id + self.dea = dea + self.required_networks = [] + + def download_network_config(self): + LOG.debug('Download network config for environment %s\n' % self.env_id) + exec_cmd('fuel network --env %s --download --dir %s' + % (self.env_id, self.yaml_config_dir)) + + def upload_network_config(self): + LOG.debug('Upload network config for environment %s\n' % self.env_id) + exec_cmd('fuel network --env %s --upload --dir %s' + % (self.env_id, self.yaml_config_dir)) + + def config_network(self): + LOG.debug('Configure network\n') + self.download_network_config() + self.modify_network_config() + self.upload_network_config() + + def modify_network_config(self): + LOG.debug('Modify network config for environment %s\n' % self.env_id) + network_yaml = (self.yaml_config_dir + '/network_%s.yaml' + % self.env_id) + check_file_exists(network_yaml) + + network_config = self.dea.get_networks() + + + with io.open(network_yaml) as stream: + network = yaml.load(stream) + + net_names = self.dea.get_network_names() + net_id = {} + for net in network['networks']: + if net['name'] in net_names: + net_id[net['name']] = {'id': net['id'], + 'group_id': net['group_id']} + + for network in network_config['networks']: + network.update(net_id[network['name']]) + + with io.open(network_yaml, 'w') as stream: + yaml.dump(network_config, stream, default_flow_style=False) \ No newline at end of file diff --git a/fuel/deploy/cloud_deploy/cloud/configure_nodes.py b/fuel/deploy/cloud_deploy/cloud/configure_nodes.py new file mode 100644 index 0000000..a5e24a8 --- /dev/null +++ b/fuel/deploy/cloud_deploy/cloud/configure_nodes.py @@ -0,0 +1,108 @@ +import common +import yaml +import io +import glob + +N = common.N +E = common.E +R = common.R +RO = common.RO +exec_cmd = common.exec_cmd +parse = common.parse +err = common.err +check_file_exists = common.check_file_exists +LOG = common.LOG + + +class ConfigureNodes(object): + + def __init__(self, yaml_config_dir, env_id, node_id_roles_dict, dea): + self.yaml_config_dir = yaml_config_dir + self.env_id = env_id + self.node_id_roles_dict = node_id_roles_dict + self.dea = dea + + def config_nodes(self): + LOG.debug('Configure nodes\n') + for node_id, roles_shelf_blade in self.node_id_roles_dict.iteritems(): + exec_cmd('fuel node set --node-id %s --role %s --env %s' + % (node_id, ','.join(roles_shelf_blade[0]), self.env_id)) + + self.download_deployment_config() + self.modify_node_network_schemes() + self.upload_deployment_config() + + for node_id, roles_shelf_blade in self.node_id_roles_dict.iteritems(): + self.download_interface_config(node_id) + self.modify_node_interface(node_id) + self.upload_interface_config(node_id) + + def modify_node_network_schemes(self): + LOG.debug('Modify node network schemes in environment %s\n' % self.env_id) + for node_file in glob.glob('%s/deployment_%s/*.yaml' + % (self.yaml_config_dir, self.env_id)): + check_file_exists(node_file) + + if 'compute' in node_file: + node_type = 'compute' + else: + node_type = 'controller' + + network_scheme = self.dea.get_network_scheme(node_type) + + with io.open(node_file) as stream: + node = yaml.load(stream) + + node['network_scheme']['transformations'] = network_scheme + + with io.open(node_file, 'w') as stream: + yaml.dump(node, stream, default_flow_style=False) + + + def download_deployment_config(self): + LOG.debug('Download deployment config for environment %s\n' % self.env_id) + r, c = exec_cmd('fuel deployment --env %s --default --dir %s' + % (self.env_id, self.yaml_config_dir)) + + def upload_deployment_config(self): + LOG.debug('Upload deployment config for environment %s\n' % self.env_id) + r, c = exec_cmd('fuel deployment --env %s --upload --dir %s' + % (self.env_id, self.yaml_config_dir)) + + def download_interface_config(self, node_id): + LOG.debug('Download interface config for node %s\n' % node_id) + r, c = exec_cmd('fuel node --env %s --node %s --network --download ' + '--dir %s' % (self.env_id, node_id, + self.yaml_config_dir)) + + def upload_interface_config(self, node_id): + LOG.debug('Upload interface config for node %s\n' % node_id) + r, c = exec_cmd('fuel node --env %s --node %s --network --upload ' + '--dir %s' % (self.env_id, node_id, + self.yaml_config_dir)) + + def modify_node_interface(self, node_id): + LOG.debug('Modify interface config for node %s\n' % node_id) + interface_yaml = (self.yaml_config_dir + '/node_%s/interfaces.yaml' + % node_id) + + with io.open(interface_yaml) as stream: + interfaces = yaml.load(stream) + + net_name_id = {} + for interface in interfaces: + for network in interface['assigned_networks']: + net_name_id[network['name']] = network['id'] + + interface_config = self.dea.get_interfaces() + + for interface in interfaces: + interface['assigned_networks'] = [] + for net_name in interface_config[interface['name']]: + net = {} + net['id'] = net_name_id[net_name] + net['name'] = net_name + interface['assigned_networks'].append(net) + + with io.open(interface_yaml, 'w') as stream: + yaml.dump(interfaces, stream, default_flow_style=False) \ No newline at end of file diff --git a/fuel/deploy/cloud_deploy/cloud/configure_settings.py b/fuel/deploy/cloud_deploy/cloud/configure_settings.py new file mode 100644 index 0000000..3a3e4d5 --- /dev/null +++ b/fuel/deploy/cloud_deploy/cloud/configure_settings.py @@ -0,0 +1,47 @@ +import common +import yaml +import io + +N = common.N +E = common.E +R = common.R +RO = common.RO +exec_cmd = common.exec_cmd +parse = common.parse +err = common.err +check_file_exists = common.check_file_exists +LOG = common.LOG + +class ConfigureSettings(object): + + def __init__(self, yaml_config_dir, env_id, dea): + self.yaml_config_dir = yaml_config_dir + self.env_id = env_id + self.dea = dea + + def download_settings(self): + LOG.debug('Download settings for environment %s\n' % self.env_id) + r, c = exec_cmd('fuel settings --env %s --download --dir %s' + % (self.env_id, self.yaml_config_dir)) + + def upload_settings(self): + LOG.debug('Upload settings for environment %s\n' % self.env_id) + r, c = exec_cmd('fuel settings --env %s --upload --dir %s' + % (self.env_id, self.yaml_config_dir)) + + def config_settings(self): + LOG.debug('Configure settings\n') + self.download_settings() + self.modify_settings() + self.upload_settings() + + def modify_settings(self): + LOG.debug('Modify settings for environment %s\n' % self.env_id) + settings_yaml = (self.yaml_config_dir + '/settings_%s.yaml' + % self.env_id) + check_file_exists(settings_yaml) + + settings = self.dea.get_settings() + + with io.open(settings_yaml, 'w') as stream: + yaml.dump(settings, stream, default_flow_style=False) diff --git a/fuel/deploy/dea.py b/fuel/deploy/cloud_deploy/cloud/dea.py similarity index 52% rename from fuel/deploy/dea.py rename to fuel/deploy/cloud_deploy/cloud/dea.py index 5f306a2..295636a 100644 --- a/fuel/deploy/dea.py +++ b/fuel/deploy/cloud_deploy/cloud/dea.py @@ -7,13 +7,15 @@ class DeploymentEnvironmentAdapter(object): self.blade_ids_per_shelves = {} self.blades_per_shelves = {} self.shelf_ids = [] - self.networks = {} + self.info_per_shelves = {} + self.network_names = [] def parse_yaml(self, yaml_path): with io.open(yaml_path) as yaml_file: self.dea_struct = yaml.load(yaml_file) self.collect_shelf_and_blade_info() - self.collect_network_info() + self.collect_shelf_info() + self.collect_network_names() def get_no_of_blades(self): no_of_blades = 0 @@ -21,14 +23,16 @@ class DeploymentEnvironmentAdapter(object): no_of_blades += len(shelf['blade']) return no_of_blades - def get_server_type(self): - return self.dea_struct['server']['type'] + def collect_shelf_info(self): + self.info_per_shelves = {} + for shelf in self.dea_struct['shelf']: + self.info_per_shelves[shelf['id']] = shelf - def get_server_info(self): - return (self.dea_struct['server']['type'], - self.dea_struct['server']['mgmt_ip'], - self.dea_struct['server']['username'], - self.dea_struct['server']['password']) + def get_shelf_info(self, shelf): + return (self.info_per_shelves[shelf]['type'], + self.info_per_shelves[shelf]['mgmt_ip'], + self.info_per_shelves[shelf]['username'], + self.info_per_shelves[shelf]['password']) def get_environment_name(self): return self.dea_struct['name'] @@ -54,19 +58,29 @@ class DeploymentEnvironmentAdapter(object): blade_ids.append(blade['id']) blades[blade['id']] = blade - def is_controller(self, shelf_id, blade_id): - blade = self.blades[shelf_id][blade_id] - return (True if 'role' in blade and blade['role'] == 'controller' + def has_role(self, role, shelf, blade): + blade = self.blades_per_shelves[shelf][blade] + if role == 'compute': + return True if 'roles' not in blade else False + return (True if 'roles' in blade and role in blade['roles'] else False) - def is_compute_host(self, shelf_id, blade_id): - blade = self.blades[shelf_id][blade_id] - return True if 'role' not in blade else False - - def collect_network_info(self): - self.networks = {} - for network in self.dea_struct['network']: - self.networks[network['name']] = network + def collect_network_names(self): + self.network_names = [] + for network in self.dea_struct['networks']['networks']: + self.network_names.append(network['name']) def get_networks(self): - return self.networks \ No newline at end of file + return self.dea_struct['networks'] + + def get_network_names(self): + return self.network_names + + def get_settings(self): + return self.dea_struct['settings'] + + def get_network_scheme(self, node_type): + return self.dea_struct[node_type] + + def get_interfaces(self): + return self.dea_struct['interfaces'] \ No newline at end of file diff --git a/fuel/deploy/cloud_deploy/cloud/deploy.py b/fuel/deploy/cloud_deploy/cloud/deploy.py new file mode 100644 index 0000000..ea33f8b --- /dev/null +++ b/fuel/deploy/cloud_deploy/cloud/deploy.py @@ -0,0 +1,208 @@ +import time +import yaml +import io +import os + +import common +from dea import DeploymentEnvironmentAdapter +from configure_environment import ConfigureEnvironment +from deployment import Deployment + +SUPPORTED_RELEASE = 'Juno on CentOS 6.5' + +N = common.N +E = common.E +R = common.R +RO = common.RO +exec_cmd = common.exec_cmd +parse = common.parse +err = common.err +check_file_exists = common.check_file_exists +LOG = common.LOG + +class Deploy(object): + + def __init__(self, yaml_config_dir): + self.supported_release = None + self.yaml_config_dir = yaml_config_dir + self.macs_per_shelf_dict = {} + self.node_ids_dict = {} + self.node_id_roles_dict = {} + self.env_id = None + self.shelf_blades_dict = {} + + def cleanup_fuel_environments(self, env_list): + WAIT_LOOP = 60 + SLEEP_TIME = 10 + for env in env_list: + LOG.debug('Deleting environment %s\n' % env[E['id']]) + exec_cmd('fuel env --env %s --delete' % env[E['id']]) + all_env_erased = False + for i in range(WAIT_LOOP): + env_list = parse(exec_cmd('fuel env list')) + if env_list[0][0]: + time.sleep(SLEEP_TIME) + else: + all_env_erased = True + break + if not all_env_erased: + err('Could not erase these environments %s' + % [(env[E['id']], env[E['status']]) for env in env_list]) + + def cleanup_fuel_nodes(self, node_list): + for node in node_list: + if node[N['status']] == 'discover': + LOG.debug('Deleting node %s\n' % node[N['id']]) + exec_cmd('fuel node --node-id %s --delete-from-db' + % node[N['id']]) + exec_cmd('cobbler system remove --name node-%s' + % node[N['id']]) + + def check_previous_installation(self): + LOG.debug('Check previous installation\n') + env_list = parse(exec_cmd('fuel env list')) + if env_list[0][0]: + self.cleanup_fuel_environments(env_list) + node_list = parse(exec_cmd('fuel node list')) + if node_list[0][0]: + self.cleanup_fuel_nodes(node_list) + + def check_supported_release(self): + LOG.debug('Check supported release: %s\n' % SUPPORTED_RELEASE) + release_list = parse(exec_cmd('fuel release -l')) + for release in release_list: + if release[R['name']] == SUPPORTED_RELEASE: + self.supported_release = release + break + if not self.supported_release: + err('This Fuel does not contain the following ' + 'release: %s\n' % SUPPORTED_RELEASE) + + def check_prerequisites(self): + LOG.debug('Check prerequisites\n') + self.check_supported_release() + self.check_previous_installation() + + def find_mac_in_dict(self, mac): + for shelf, blade_dict in self.macs_per_shelf_dict.iteritems(): + for blade, mac_list in blade_dict.iteritems(): + if mac in mac_list: + return shelf, blade + + def all_blades_discovered(self): + for shelf, blade_dict in self.node_ids_dict.iteritems(): + for blade, node_id in blade_dict.iteritems(): + if not node_id: + return False + return True + + def not_discovered_blades_summary(self): + summary = '' + for shelf, blade_dict in self.node_ids_dict.iteritems(): + for blade, node_id in blade_dict.iteritems(): + if not node_id: + summary += '[shelf %s, blade %s]\n' % (shelf, blade) + return summary + + def collect_blade_ids_per_shelves(self, dea): + self.shelf_blades_dict = dea.get_blade_ids_per_shelves() + + def node_discovery(self, node_list, discovered_macs): + for node in node_list: + if (node[N['status']] == 'discover' and + node[N['online']] == 'True' and + node[N['mac']] not in discovered_macs): + discovered_macs.append(node[N['mac']]) + shelf_blade = self.find_mac_in_dict(node[N['mac']]) + if shelf_blade: + self.node_ids_dict[shelf_blade[0]][shelf_blade[1]] = \ + node[N['id']] + + def discovery_waiting_loop(self, discovered_macs): + WAIT_LOOP = 180 + SLEEP_TIME = 10 + all_discovered = False + for i in range(WAIT_LOOP): + node_list = parse(exec_cmd('fuel node list')) + if node_list[0][0]: + self.node_discovery(node_list, discovered_macs) + if self.all_blades_discovered(): + all_discovered = True + break + else: + time.sleep(SLEEP_TIME) + return all_discovered + + def wait_for_discovered_blades(self): + LOG.debug('Wait for discovered blades\n') + discovered_macs = [] + for shelf, blade_list in self.shelf_blades_dict.iteritems(): + self.node_ids_dict[shelf] = {} + for blade in blade_list: + self.node_ids_dict[shelf][blade] = None + all_discovered = self.discovery_waiting_loop(discovered_macs) + if not all_discovered: + err('Not all blades have been discovered: %s\n' + % self.not_discovered_blades_summary()) + + def get_mac_addresses(self, macs_yaml): + with io.open(macs_yaml, 'r') as stream: + self.macs_per_shelf_dict = yaml.load(stream) + + def assign_roles_to_cluster_node_ids(self, dea): + self.node_id_roles_dict = {} + for shelf, blades_dict in self.node_ids_dict.iteritems(): + for blade, node_id in blades_dict.iteritems(): + role_list = [] + if dea.has_role('controller', shelf, blade): + role_list.extend(['controller', 'mongo']) + if dea.has_role('cinder', shelf, blade): + role_list.extend(['cinder']) + elif dea.has_role('compute', shelf, blade): + role_list.extend(['compute']) + self.node_id_roles_dict[node_id] = (role_list, shelf, blade) + + def configure_environment(self, dea): + config_env = ConfigureEnvironment(dea, self.yaml_config_dir, + self.supported_release[R['id']], + self.node_id_roles_dict) + config_env.configure_environment() + self.env_id = config_env.env_id + + def deploy(self, dea): + dep = Deployment(dea, self.yaml_config_dir, self.env_id, + self.node_id_roles_dict) + dep.deploy() + + +def main(): + + base_dir = os.path.dirname(os.path.realpath(__file__)) + dea_yaml = base_dir + '/dea.yaml' + check_file_exists(dea_yaml) + macs_yaml = base_dir + '/macs.yaml' + check_file_exists(macs_yaml) + + yaml_config_dir = '/var/lib/opnfv/pre_deploy' + + deploy = Deploy(yaml_config_dir) + dea = DeploymentEnvironmentAdapter() + dea.parse_yaml(dea_yaml) + + deploy.get_mac_addresses(macs_yaml) + + deploy.collect_blade_ids_per_shelves(dea) + + deploy.check_prerequisites() + + deploy.wait_for_discovered_blades() + + deploy.assign_roles_to_cluster_node_ids(dea) + + deploy.configure_environment(dea) + + deploy.deploy(dea) + + +if __name__ == '__main__': + main() \ No newline at end of file diff --git a/fuel/deploy/cloud_deploy/cloud/deployment.py b/fuel/deploy/cloud_deploy/cloud/deployment.py new file mode 100644 index 0000000..831059b --- /dev/null +++ b/fuel/deploy/cloud_deploy/cloud/deployment.py @@ -0,0 +1,100 @@ +import common +import os +import shutil +import glob +import yaml +import io +import time + +N = common.N +E = common.E +R = common.R +RO = common.RO +exec_cmd = common.exec_cmd +run_proc = common.run_proc +parse = common.parse +err = common.err +LOG = common.LOG + + +class Deployment(object): + + def __init__(self, dea, yaml_config_dir, env_id, node_id_roles_dict): + self.dea = dea + self.env_name = dea.get_environment_name() + self.yaml_config_dir = yaml_config_dir + self.env_id = env_id + self.node_id_roles_dict = node_id_roles_dict + self.node_id_list = [] + for node_id in self.node_id_roles_dict.iterkeys(): + self.node_id_list.append(node_id) + self.node_id_list.sort() + + def download_deployment_info(self): + LOG.debug('Download deployment info for environment %s\n' % self.env_id) + deployment_dir = self.yaml_config_dir + '/deployment_%s' % self.env_id + if os.path.exists(deployment_dir): + shutil.rmtree(deployment_dir) + r, c = exec_cmd('fuel --env %s deployment --default --dir %s' + % (self.env_id, self.yaml_config_dir)) + if c > 0: + err('Error: Could not download deployment info for env %s,' + ' reason: %s\n' % (self.env_id, r)) + + def upload_deployment_info(self): + LOG.debug('Upload deployment info for environment %s\n' % self.env_id) + r, c = exec_cmd('fuel --env %s deployment --upload --dir %s' + % (self.env_id, self.yaml_config_dir)) + if c > 0: + err('Error: Could not upload deployment info for env %s,' + ' reason: %s\n' % (self.env_id, r)) + + def pre_deploy(self): + LOG.debug('Running pre-deploy on environment %s\n' % self.env_name) + self.download_deployment_info() + opnfv = {'opnfv': {}} + + for node_file in glob.glob('%s/deployment_%s/*.yaml' + % (self.yaml_config_dir, self.env_id)): + with io.open(node_file) as stream: + node = yaml.load(stream) + + if 'opnfv' not in node: + node.update(opnfv) + + with io.open(node_file, 'w') as stream: + yaml.dump(node, stream, default_flow_style=False) + self.upload_deployment_info() + + + def deploy(self): + WAIT_LOOP = 180 + SLEEP_TIME = 60 + + self.pre_deploy() + + log_file = 'cloud.log' + + LOG.debug('Starting deployment of environment %s\n' % self.env_name) + run_proc('fuel --env %s deploy-changes | strings | tee %s' + % (self.env_id, log_file)) + + ready = False + for i in range(WAIT_LOOP): + env = parse(exec_cmd('fuel env --env %s' % self.env_id)) + LOG.debug('Environment status: %s\n' % env[0][E['status']]) + r, _ = exec_cmd('tail -2 %s | head -1' % log_file) + if r: + LOG.debug('%s\n' % r) + if env[0][E['status']] == 'operational': + ready = True + break + else: + time.sleep(SLEEP_TIME) + exec_cmd('rm %s' % log_file) + + if ready: + LOG.debug('Environment %s successfully deployed\n' % self.env_name) + else: + err('Deployment failed, environment %s is not operational\n' + % self.env_name) diff --git a/fuel/deploy/cloud_deploy/cloud_deploy.py b/fuel/deploy/cloud_deploy/cloud_deploy.py new file mode 100644 index 0000000..4197519 --- /dev/null +++ b/fuel/deploy/cloud_deploy/cloud_deploy.py @@ -0,0 +1,117 @@ +import os +import io +import yaml + +from cloud import common +from cloud.dea import DeploymentEnvironmentAdapter +from hardware_adapters.dha import DeploymentHardwareAdapter +from ssh_client import SSHClient + +exec_cmd = common.exec_cmd +err = common.err +check_file_exists = common.check_file_exists +LOG = common.LOG + +class CloudDeploy(object): + + def __init__(self, fuel_ip, fuel_username, fuel_password): + self.fuel_ip = fuel_ip + self.fuel_username = fuel_username + self.fuel_password = fuel_password + self.shelf_blades_dict = {} + self.macs_per_shelf_dict = {} + + def copy_to_fuel_master(self, dir_path=None, file_path=None, target='~'): + if dir_path: + path = '-r ' + dir_path + elif file_path: + path = file_path + LOG.debug('Copying %s to Fuel Master %s' % (path, target)) + if path: + exec_cmd('sshpass -p %s scp -o UserKnownHostsFile=/dev/null' + ' -o StrictHostKeyChecking=no -o ConnectTimeout=15' + ' %s %s@%s:%s' + % (self.fuel_password, path, self.fuel_username, + self.fuel_ip, target)) + + def run_cloud_deploy(self, deploy_dir, deploy_app): + LOG.debug('START CLOUD DEPLOYMENT') + ssh = SSHClient(self.fuel_ip, self.fuel_username, self.fuel_password) + ssh.open() + ssh.run('python %s/%s' % (deploy_dir, deploy_app)) + ssh.close() + + def power_off_blades(self, dea): + for shelf, blade_list in self.shelf_blades_dict.iteritems(): + type, mgmt_ip, username, password = dea.get_shelf_info(shelf) + dha = DeploymentHardwareAdapter(type, mgmt_ip, username, password) + dha.power_off_blades(shelf, blade_list) + + def power_on_blades(self, dea): + for shelf, blade_list in self.shelf_blades_dict.iteritems(): + type, mgmt_ip, username, password = dea.get_shelf_info(shelf) + dha = DeploymentHardwareAdapter(type, mgmt_ip, username, password) + dha.power_on_blades(shelf, blade_list) + + def set_boot_order(self, dea): + for shelf, blade_list in self.shelf_blades_dict.iteritems(): + type, mgmt_ip, username, password = dea.get_shelf_info(shelf) + dha = DeploymentHardwareAdapter(type, mgmt_ip, username, password) + dha.set_boot_order_blades(shelf, blade_list) + + def get_mac_addresses(self, dea, macs_yaml): + self.macs_per_shelf_dict = {} + for shelf, blade_list in self.shelf_blades_dict.iteritems(): + type, mgmt_ip, username, password = dea.get_shelf_info(shelf) + dha = DeploymentHardwareAdapter(type, mgmt_ip, username, password) + self.macs_per_shelf_dict[shelf] = dha.get_blades_mac_addresses( + shelf, blade_list) + + with io.open(macs_yaml, 'w') as stream: + yaml.dump(self.macs_per_shelf_dict, stream, + default_flow_style=False) + + def collect_blade_ids_per_shelves(self, dea): + self.shelf_blades_dict = dea.get_blade_ids_per_shelves() + + + +def main(): + + fuel_ip = '10.20.0.2' + fuel_username = 'root' + fuel_password = 'r00tme' + deploy_dir = '~/cloud' + + cloud = CloudDeploy(fuel_ip, fuel_username, fuel_password) + + base_dir = os.path.dirname(os.path.realpath(__file__)) + deployment_dir = base_dir + '/cloud' + macs_yaml = base_dir + '/macs.yaml' + dea_yaml = base_dir + '/dea.yaml' + check_file_exists(dea_yaml) + + cloud.copy_to_fuel_master(dir_path=deployment_dir) + cloud.copy_to_fuel_master(file_path=dea_yaml, target=deploy_dir) + + dea = DeploymentEnvironmentAdapter() + dea.parse_yaml(dea_yaml) + + cloud.collect_blade_ids_per_shelves(dea) + + cloud.power_off_blades(dea) + + cloud.set_boot_order(dea) + + cloud.power_on_blades(dea) + + cloud.get_mac_addresses(dea, macs_yaml) + check_file_exists(dea_yaml) + + cloud.copy_to_fuel_master(file_path=macs_yaml, target=deploy_dir) + + cloud.run_cloud_deploy(deploy_dir, 'deploy.py') + + +if __name__ == '__main__': + main() \ No newline at end of file diff --git a/fuel/deploy/cloud_deploy/hardware_adapters/__init__.py b/fuel/deploy/cloud_deploy/hardware_adapters/__init__.py new file mode 100644 index 0000000..c274feb --- /dev/null +++ b/fuel/deploy/cloud_deploy/hardware_adapters/__init__.py @@ -0,0 +1 @@ +__author__ = 'eszicse' diff --git a/fuel/deploy/dha.py b/fuel/deploy/cloud_deploy/hardware_adapters/dha.py similarity index 94% rename from fuel/deploy/dha.py rename to fuel/deploy/cloud_deploy/hardware_adapters/dha.py index 87ac6e2..2764aeb 100644 --- a/fuel/deploy/dha.py +++ b/fuel/deploy/cloud_deploy/hardware_adapters/dha.py @@ -1,4 +1,5 @@ -from hardware_adapters.hp.hp_adapter import HpAdapter +from hp.hp_adapter import HpAdapter +from libvirt.libvirt_adapter import LibvirtAdapter class DeploymentHardwareAdapter(object): def __new__(cls, server_type, *args): @@ -55,8 +56,6 @@ class EsxiAdapter(HardwareAdapter): def get_blade_mac_addresses(self, shelf, blade): return self.environment[shelf][blade]['mac'] -class LibvirtAdapter(HardwareAdapter): - pass class DellAdapter(HardwareAdapter): pass diff --git a/fuel/deploy/cloud_deploy/hardware_adapters/hp/__init__.py b/fuel/deploy/cloud_deploy/hardware_adapters/hp/__init__.py new file mode 100644 index 0000000..c274feb --- /dev/null +++ b/fuel/deploy/cloud_deploy/hardware_adapters/hp/__init__.py @@ -0,0 +1 @@ +__author__ = 'eszicse' diff --git a/fuel/deploy/cloud_deploy/hardware_adapters/hp/hp_adapter.py b/fuel/deploy/cloud_deploy/hardware_adapters/hp/hp_adapter.py new file mode 100644 index 0000000..930d234 --- /dev/null +++ b/fuel/deploy/cloud_deploy/hardware_adapters/hp/hp_adapter.py @@ -0,0 +1,288 @@ +import re +import time +from netaddr import EUI, mac_unix +from cloud import common +from ssh_client import SSHClient + +LOG = common.LOG +err = common.err + +S = {'bay': 0, 'ilo_name': 1, 'ilo_ip': 2, 'status': 3, 'power': 4, + 'uid_partner': 5} + +class HpAdapter(object): + + def __init__(self, mgmt_ip, username, password): + self.mgmt_ip = mgmt_ip + self.username = username + self.password = password + + class mac_dhcp(mac_unix): + word_fmt = '%.2x' + + def next_ip(self): + digit_list = self.mgmt_ip.split('.') + digit_list[3] = str(int(digit_list[3]) + 1) + self.mgmt_ip = '.'.join(digit_list) + + def connect(self): + verified_ips = [self.mgmt_ip] + ssh = SSHClient(self.mgmt_ip, self.username, self.password) + try: + ssh.open() + except Exception: + self.next_ip() + verified_ips.append(self.mgmt_ip) + ssh = SSHClient(self.mgmt_ip, self.username, self.password) + try: + ssh.open() + except Exception as e: + err('Could not connect to HP Onboard Administrator through ' + 'these IPs: %s, reason: %s' % (verified_ips, e)) + + lines = self.clean_lines(ssh.execute('show oa status')) + for line in lines: + if 'Role: Standby' in line: + ssh.close() + if self.mgmt_ip != verified_ips[0]: + err('Can only talk to OA %s which is the standby OA\n' + % self.mgmt_ip) + else: + LOG.debug('%s is the standby OA, trying next OA\n' + % self.mgmt_ip) + self.next_ip() + verified_ips.append(self.mgmt_ip) + ssh = SSHClient(self.mgmt_ip, self.username, self.password) + try: + ssh.open() + except Exception as e: + err('Could not connect to HP Onboard Administrator' + ' through these IPs: %s, reason: %s' + % (verified_ips, e)) + + elif 'Role: Active' in line: + return ssh + err('Could not reach Active OA through these IPs %s' % verified_ips) + + def get_blades_mac_addresses(self, shelf, blade_list): + macs_per_blade_dict = {} + LOG.debug('Getting MAC addresses for shelf %s, blades %s' + % (shelf, blade_list)) + ssh = self.connect() + for blade in blade_list: + lines = self.clean_lines( + ssh.execute('show server info %s' % blade)) + left, right = self.find_mac(lines, shelf, blade) + + left = EUI(left, dialect=self.mac_dhcp) + right = EUI(right, dialect=self.mac_dhcp) + macs_per_blade_dict[blade] = [str(left), str(right)] + ssh.close() + return macs_per_blade_dict + + def find_mac(self, printout, shelf, blade): + left = False + right = False + for line in printout: + if ('No Server Blade Installed' in line or + 'Invalid Arguments' in line): + err('Blade %d in shelf %d does not exist' % (blade, shelf)) + + seobj = re.search(r'LOM1:1-a\s+([0-9A-F:]+)', line, re.I) + if seobj: + left = seobj.group(1) + else: + seobj = re.search(r'LOM1:2-a\s+([0-9A-F:]+)', line, re.I) + if seobj: + right = seobj.group(1) + if left and right: + return left, right + + def get_hardware_info(self, shelf, blade=None): + ssh = self.connect() + if ssh and not blade: + ssh.close() + return 'HP' + + lines = self.clean_lines(ssh.execute('show server info %s' % blade)) + ssh.close() + + match = r'Product Name:\s+(.+)\Z' + if not re.search(match, str(lines[:])): + LOG.debug('Blade %s in shelf %s does not exist\n' % (blade, shelf)) + return False + + for line in lines: + seobj = re.search(match, line) + if seobj: + return 'HP %s' % seobj.group(1) + return False + + def power_off_blades(self, shelf, blade_list): + return self.set_state(shelf, 'locked', blade_list) + + def power_on_blades(self, shelf, blade_list): + return self.set_state(shelf, 'unlocked', blade_list) + + def set_boot_order_blades(self, shelf, blade_list): + return self.set_boot_order(shelf, blade_list=blade_list) + + def parse(self, lines): + parsed_list = [] + for l in lines[5:-2]: + parsed = [] + cluttered = [e.strip() for e in l.split(' ')] + for p in cluttered: + if p: + parsed.append(p) + parsed_list.append(parsed) + return parsed_list + + def set_state(self, shelf, state, blade_list): + if state not in ['locked', 'unlocked']: + LOG.debug('Incorrect state: %s' % state) + return None + + LOG.debug('Setting state %s for blades %s in shelf %s' + % (state, blade_list, shelf)) + + blade_list = sorted(blade_list) + ssh = self.connect() + + LOG.debug('Check if blades are present') + server_list = self.parse( + self.clean_lines(ssh.execute('show server list'))) + + for blade in blade_list: + if server_list[S['status']] == 'Absent': + LOG.debug('Blade %s in shelf %s is missing. ' + 'Set state %s not performed\n' + % (blade, shelf, state)) + blade_list.remove(blade) + + bladelist = ','.join(blade_list) + + # Use leading upper case on On/Off so it can be reused in match + force = '' + if state == 'locked': + powerstate = 'Off' + force = 'force' + else: + powerstate = 'On' + cmd = 'power%s server %s' % (powerstate, bladelist) + if force: + cmd += ' %s' % force + + LOG.debug(cmd) + ssh.execute(cmd) + + # Check that all blades reach the state which can take some time, + # so re-try a couple of times + LOG.debug('Check if state %s successfully set' % state) + + WAIT_LOOP = 2 + SLEEP_TIME = 3 + + set_blades = [] + + for i in range(WAIT_LOOP): + server_list = self.parse( + self.clean_lines(ssh.execute('show server list'))) + + for blade in blade_list: + for server in server_list: + if (server[S['bay']] == blade and + server[S['power']] == powerstate): + set_blades.append(blade) + break + + all_set = set(blade_list) == set(set_blades) + if all_set: + break + else: + time.sleep(SLEEP_TIME) + + ssh.close() + + if all_set: + LOG.debug('State %s successfully set on blades %s in shelf %d' + % (state, set_blades, shelf)) + return True + else: + LOG.debug('Could not set state %s on blades %s in shelf %s\n' + % (state, set(blade_list) - set(set_blades), shelf)) + return False + + + def clean_lines(self, printout): + lines = [] + for p in [l.strip() for l in printout.splitlines()]: + if p: + lines.append(p) + return lines + + + def set_boot_order_blades(self, shelf, blade_list, boot_dev_list=None): + + boot_dict = {'Hard Drive': 'hdd', + 'PXE NIC': 'pxe', + 'CD-ROM': 'cd', + 'USB': 'usb', + 'Diskette Driver': 'disk'} + + boot_options = [b for b in boot_dict.itervalues()] + diff = list(set(boot_dev_list) - set(boot_options)) + if diff: + err('The following boot options %s are not valid' % diff) + + blade_list = sorted(blade_list) + LOG.debug('Setting boot order %s for blades %s in shelf %s' + % (boot_dev_list, blade_list, shelf)) + + ssh = self.connect() + + LOG.debug('Check if blades are present') + server_list = self.parse( + self.clean_lines(ssh.execute('show server list'))) + + for blade in blade_list: + if server_list[S['status']] == 'Absent': + LOG.debug('Blade %s in shelf %s is missing. ' + 'Change boot order %s not performed.\n' + % (blade, shelf, boot_dev_list)) + blade_list.remove(blade) + + bladelist = ','.join(blade_list) + + for boot_dev in reversed(boot_dev_list): + ssh.execute('set server boot first %s %s' % (boot_dev, bladelist)) + + LOG.debug('Check if boot order is successfully set') + + success_list = [] + boot_keys = [b for b in boot_dict.iterkeys()] + for blade in blade_list: + lines = self.clean_lines(ssh.execute('show server boot %s' + % blade)) + boot_order = lines[lines.index('IPL Devices (Boot Order):')+1:] + boot_list = [] + success = False + for b in boot_order: + for k in boot_keys: + if k in b: + boot_list.append(boot_dict[k]) + break + if boot_list == boot_dev_list: + success = True + break + + success_list.append(success) + if success: + LOG.debug('Boot order %s successfully set on blade %s in ' + 'shelf %s\n' % (boot_dev_list, blade, shelf)) + else: + LOG.debug('Failed to set boot order %s on blade %s in ' + 'shelf %s\n' % (boot_dev_list, blade, shelf)) + + ssh.close() + return all(success_list) diff --git a/fuel/deploy/cloud_deploy/hardware_adapters/libvirt/__init__.py b/fuel/deploy/cloud_deploy/hardware_adapters/libvirt/__init__.py new file mode 100644 index 0000000..c274feb --- /dev/null +++ b/fuel/deploy/cloud_deploy/hardware_adapters/libvirt/__init__.py @@ -0,0 +1 @@ +__author__ = 'eszicse' diff --git a/fuel/deploy/cloud_deploy/hardware_adapters/libvirt/libvirt_adapter.py b/fuel/deploy/cloud_deploy/hardware_adapters/libvirt/libvirt_adapter.py new file mode 100644 index 0000000..d332e59 --- /dev/null +++ b/fuel/deploy/cloud_deploy/hardware_adapters/libvirt/libvirt_adapter.py @@ -0,0 +1,153 @@ +from lxml import etree +from cloud import common +from ssh_client import SSHClient + +exec_cmd = common.exec_cmd +err = common.err +LOG = common.LOG + + +class LibvirtAdapter(object): + + def __init__(self, mgmt_ip, username, password): + self.mgmt_ip = mgmt_ip + self.username = username + self.password = password + self.parser = etree.XMLParser(remove_blank_text=True) + + def power_off_blades(self, shelf, blade_list): + ssh = SSHClient(self.mgmt_ip, self.username, self.password) + ssh.open() + for blade in blade_list: + LOG.debug('Power off blade %s in shelf %s' % (blade, shelf)) + vm_name = 's%s_b%s' % (shelf, blade) + resp = ssh.execute('virsh destroy %s' % vm_name) + LOG.debug('response: %s' % resp) + ssh.close() + + def power_on_blades(self, shelf, blade_list): + ssh = SSHClient(self.mgmt_ip, self.username, self.password) + ssh.open() + for blade in blade_list: + LOG.debug('Power on blade %s in shelf %s' % (blade, shelf)) + vm_name = 's%s_b%s' % (shelf, blade) + resp = ssh.execute('virsh start %s' % vm_name) + LOG.debug('response: %s' % resp) + ssh.close() + + def set_boot_order_blades(self, shelf, blade_list, boot_dev_list=None): + if not boot_dev_list: + boot_dev_list = ['network', 'hd'] + ssh = SSHClient(self.mgmt_ip, self.username, self.password) + ssh.open() + temp_dir= ssh.execute('mktemp -d').strip() + for blade in blade_list: + LOG.debug('Set boot order %s on blade %s in shelf %s' + % (boot_dev_list, blade, shelf)) + vm_name = 's%s_b%s' % (shelf, blade) + resp = ssh.execute('virsh dumpxml %s' % vm_name) + xml_dump = etree.fromstring(resp, self.parser) + os = xml_dump.xpath('/domain/os') + for o in os: + for bootelem in ['boot', 'bootmenu']: + boot = o.xpath(bootelem) + for b in boot: + b.getparent().remove(b) + for dev in boot_dev_list: + b = etree.Element('boot') + b.set('dev', dev) + o.append(b) + bmenu = etree.Element('bootmenu') + bmenu.set('enable', 'no') + o.append(bmenu) + tree = etree.ElementTree(xml_dump) + xml_file = temp_dir + '/%s.xml' % vm_name + with open(xml_file, 'w') as f: + tree.write(f, pretty_print=True, xml_declaration=True) + ssh.execute('virsh define %s' % xml_file) + ssh.execute('rm -fr %s' % temp_dir) + ssh.close() + + def get_blades_mac_addresses(self, shelf, blade_list): + LOG.debug('Get the MAC addresses of blades %s in shelf %s' + % (blade_list, shelf)) + macs_per_blade_dict = {} + ssh = SSHClient(self.mgmt_ip, self.username, self.password) + ssh.open() + for blade in blade_list: + vm_name = 's%s_b%s' % (shelf, blade) + mac_list = macs_per_blade_dict[blade] = [] + resp = ssh.execute('virsh dumpxml %s' % vm_name) + xml_dump = etree.fromstring(resp) + interfaces = xml_dump.xpath('/domain/devices/interface') + for interface in interfaces: + macs = interface.xpath('mac') + for mac in macs: + mac_list.append(mac.get('address')) + ssh.close() + return macs_per_blade_dict + + def load_image_file(self, shelf=None, blade=None, vm=None, + image_path=None): + if shelf and blade: + vm_name = 's%s_b%s' % (shelf, blade) + else: + vm_name = vm + + LOG.debug('Load media file %s into %s ' + % (image_path, 'vm %s' % vm if vm else 'blade %s in shelf %s' + % (shelf, blade))) + + ssh = SSHClient(self.mgmt_ip, self.username, self.password) + ssh.open() + temp_dir= ssh.execute('mktemp -d').strip() + resp = ssh.execute('virsh dumpxml %s' % vm_name) + xml_dump = etree.fromstring(resp) + + disks = xml_dump.xpath('/domain/devices/disk') + for disk in disks: + if disk.get('device') == 'cdrom': + disk.set('type', 'file') + sources = disk.xpath('source') + for source in sources: + disk.remove(source) + source = etree.SubElement(disk, 'source') + source.set('file', image_path) + tree = etree.ElementTree(xml_dump) + xml_file = temp_dir + '/%s.xml' % vm_name + with open(xml_file, 'w') as f: + tree.write(f, pretty_print=True, xml_declaration=True) + ssh.execute('virsh define %s' % xml_file) + ssh.execute('rm -fr %s' % temp_dir) + ssh.close() + + def eject_image_file(self, shelf=None, blade=None, vm=None): + if shelf and blade: + vm_name = 's%s_b%s' % (shelf, blade) + else: + vm_name = vm + + LOG.debug('Eject media file from %s ' + % 'vm %s' % vm if vm else 'blade %s in shelf %s' + % (shelf, blade)) + + ssh = SSHClient(self.mgmt_ip, self.username, self.password) + ssh.open() + temp_dir= ssh.execute('mktemp -d').strip() + resp = ssh.execute('virsh dumpxml %s' % vm_name) + xml_dump = etree.fromstring(resp) + + disks = xml_dump.xpath('/domain/devices/disk') + for disk in disks: + if disk.get('device') == 'cdrom': + disk.set('type', 'block') + sources = disk.xpath('source') + for source in sources: + disk.remove(source) + tree = etree.ElementTree(xml_dump) + xml_file = temp_dir + '/%s.xml' % vm_name + with open(xml_file, 'w') as f: + tree.write(f, pretty_print=True, xml_declaration=True) + ssh.execute('virsh define %s' % xml_file) + ssh.execute('rm -fr %s' % temp_dir) + ssh.close() diff --git a/fuel/deploy/cloud_deploy/ssh_client.py b/fuel/deploy/cloud_deploy/ssh_client.py new file mode 100644 index 0000000..b9aad6c --- /dev/null +++ b/fuel/deploy/cloud_deploy/ssh_client.py @@ -0,0 +1,56 @@ +import paramiko +from cloud import common + +TIMEOUT = 600 +LOG = common.LOG + +class SSHClient(object): + + def __init__(self, host, username, password): + self.host = host + self.username = username + self.password = password + self.client = None + + def open(self, timeout=TIMEOUT): + self.client = paramiko.SSHClient() + self.client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + self.client.connect(self.host, username=self.username, + password=self.password, timeout=timeout) + + def close(self): + if self.client is not None: + self.client.close() + self.client = None + + def execute(self, command, sudo=False, timeout=TIMEOUT): + if sudo and self.username != 'root': + command = "sudo -S -p '' %s" % command + stdin, stdout, stderr = self.client.exec_command(command, + timeout=timeout) + if sudo: + stdin.write(self.password + '\n') + stdin.flush() + return ''.join(''.join(stderr.readlines()) + + ''.join(stdout.readlines())) + + def run(self, command): + transport = self.client.get_transport() + transport.set_keepalive(1) + chan = transport.open_session() + chan.exec_command(command) + + while not chan.exit_status_ready(): + if chan.recv_ready(): + data = chan.recv(1024) + while data: + print data + data = chan.recv(1024) + + if chan.recv_stderr_ready(): + error_buff = chan.recv_stderr(1024) + while error_buff: + print error_buff + error_buff = chan.recv_stderr(1024) + exit_status = chan.recv_exit_status() + LOG.debug('Exit status %s' % exit_status) \ No newline at end of file diff --git a/fuel/deploy/common.py b/fuel/deploy/common.py deleted file mode 100644 index cd5085c..0000000 --- a/fuel/deploy/common.py +++ /dev/null @@ -1,29 +0,0 @@ -import subprocess -import sys - - -N = {'id': 0, 'status': 1, 'name': 2, 'cluster': 3, 'ip': 4, 'mac': 5, - 'roles': 6, 'pending_roles': 7, 'online': 8} -E = {'id': 0, 'status': 1, 'name': 2, 'mode': 3, 'release_id': 4, - 'changes': 5, 'pending_release_id': 6} -R = {'id': 0, 'name': 1, 'state': 2, 'operating_system': 3, 'version': 4} -RO = {'name': 0, 'conflicts': 1} - -def exec_cmd(cmd): - process = subprocess.Popen(cmd, - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, - shell=True) - return process.communicate()[0] - -def parse(printout): - parsed_list = [] - lines = printout.splitlines() - for l in lines[2:]: - parsed = [e.strip() for e in l.split('|')] - parsed_list.append(parsed) - return parsed_list - -def err(error_message): - sys.stderr.write(error_message) - sys.exit(1) diff --git a/fuel/deploy/configure_environment.py b/fuel/deploy/configure_environment.py deleted file mode 100644 index 9aca904..0000000 --- a/fuel/deploy/configure_environment.py +++ /dev/null @@ -1,70 +0,0 @@ -import common -import os -import shutil -import yaml - - -from configure_settings import ConfigureSettings -from configure_network import ConfigureNetwork - -N = common.N -E = common.E -R = common.R -RO = common.RO -exec_cmd = common.exec_cmd -parse = common.parse -err = common.err - -class ConfigureEnvironment(object): - - def __init__(self, dea, yaml_config_dir): - self.env_id = None - self.dea = dea - self.yaml_config_dir = yaml_config_dir - self.env_name = dea.get_environment_name() - - def env_exists(self, env_name): - env_list = parse(exec_cmd('fuel env --list')) - for env in env_list: - if env[E['name']] == env_name and env[E['status']] == 'new': - return True - return False - - def get_env_id(self, env_name): - env_list = parse(exec_cmd('fuel env --list')) - for env in env_list: - if env[E['name']] == env_name: - return env[E['id']] - - def configure_environment(self, dea): - exec_cmd('fuel env -c --name %s --release %s --mode ha --net neutron ' - '--nst vlan' % (self.env_name, - self.supported_release[R['id']])) - - self.env_id = self.get_env_id(self.env_name) - if not self.env_exists(self.env_name): - err("Failed to create environment %s" % self.env_name) - - self.config_settings() - self.config_network() - - def config_settings(self): - if os.path.exists(self.yaml_config_dir): - shutil.rmtree(self.yaml_config_dir) - os.makedirs(self.yaml_config_dir) - - settings = ConfigureSettings(self.yaml_config_dir, self.env_id) - settings.config_settings() - - - def config_network(self): - network_yaml=self.yaml_config_dir + '/network_%s.yaml' % self.env_id - os.remove(network_yaml) - - network = ConfigureNetwork(self.yaml_config_dir, network_yaml, - self.env_id, self.dea) - network.config_network() - - - - diff --git a/fuel/deploy/configure_network.py b/fuel/deploy/configure_network.py deleted file mode 100644 index 0b298e5..0000000 --- a/fuel/deploy/configure_network.py +++ /dev/null @@ -1,91 +0,0 @@ -import common -import os -import yaml -import io -import re - -N = common.N -E = common.E -R = common.R -RO = common.RO -exec_cmd = common.exec_cmd -parse = common.parse -err = common.err - -P1 = re.compile('!\s.*') - -class ConfigureNetwork(object): - - def __init__(self, yaml_config_dir, network_yaml, env_id, dea): - self.yaml_config_dir = yaml_config_dir - self.network_yaml = network_yaml - self.env_id = env_id - self.dea = dea - - def download_settings(self): - exec_cmd('fuel network --env %s --download --dir %s' - % (self.env_id, self.yaml_config_dir)) - - def upload_settings(self): - exec_cmd('fuel network --env %s --upload --dir %s' - % (self.env_id, self.yaml_config_dir)) - - def config_network(self): - - self.download_settings() - - self.apply_network_config() - - self.upload_settings() - - self.verify() - - def apply_network_config(self): - - with io.open(self.network_yaml) as stream: - network_config = yaml.load(stream) - networks = network_config['networks'] - - net = self.dea.get_networks() - net['fuelweb_admin'] = net['management'] - if 'vlan' in net['fuelweb_admin']: - del net['fuelweb_admin']['vlan'] - del net['management'] - net_names = [n for n in net.iterkeys()] - - for i in range(len(networks)): - if networks[i]['name'] == 'management': - networks = networks[:i] + networks[i+1:] - network_config['networks'] = networks - break - - for network in networks: - name = network['name'] - if name in net_names: - if ('vlan' in net[name] and net[name]['vlan'] is not None): - network['vlan_start'] = net[name]['vlan'] - network['cidr'] = net[name]['cidr'] - network['ip_ranges'][0][0] = net[name]['start'] - network['ip_ranges'][0][1] = net[name]['end'] - - with io.open(self.network_yaml, 'w') as stream: - yaml.dump(network_config, stream, default_flow_style=False) - - def verify(self): - ret = exec_cmd('mktemp -d') - temp_dir = ret.splitlines()[0] - - exec_cmd('fuel network --env %s --download --dir %s' - % (self.env_id, temp_dir)) - - ret = exec_cmd('diff -C0 %s %s' - % (self.network_yaml, - temp_dir + '/network_%s.yaml' % self.env_id)) - diff_list = [] - for l in ret.splitlines(): - m = P1.match(l) - if m and '_vip' not in l: - diff_list.append(l) - if diff_list: - err('Uploaded network yaml rejected by Fuel\n') - \ No newline at end of file diff --git a/fuel/deploy/configure_settings.py b/fuel/deploy/configure_settings.py deleted file mode 100644 index cdeea49..0000000 --- a/fuel/deploy/configure_settings.py +++ /dev/null @@ -1,88 +0,0 @@ -import common -import os -import yaml -import io -import re - -N = common.N -E = common.E -R = common.R -RO = common.RO -exec_cmd = common.exec_cmd -parse = common.parse -err = common.err - -class ConfigureSettings(object): - - def __init__(self, yaml_config_dir, env_id): - self.yaml_config_dir = yaml_config_dir - self.env_id = env_id - - def download_settings(self): - exec_cmd('fuel --env %s settings --download' % self.env_id) - - def upload_settings(self): - exec_cmd('fuel --env %s settings --upload' % self.env_id) - - - def config_settings(self): - self.download_settings() - self.modify_settings() - self.upload_settings() - - # Fix console speed - def fix_console_speed(data): - # First remove all console= from the kernel cmdline - cmdline = data["editable"]["kernel_params"]["kernel"]["value"] - pat = re.compile(r"console=[\w,]+\s+") - repl = 1 - while repl != 0: - cmdline, repl = pat.subn("", cmdline) - - # Then add the console info we want - cmdline = re.sub(r"^", "console=tty0 console=ttyS0,115200 ", cmdline) - data["editable"]["kernel_params"]["kernel"]["value"] = cmdline - - # Initialize kernel audit - def initialize_kernel_audit(data): - cmdline = data["editable"]["kernel_params"]["kernel"]["value"] - cmdline = "audit=1 " + cmdline - data["editable"]["kernel_params"]["kernel"]["value"] = cmdline - - # Add crashkernel parameter to boot parameters. W/o this we can't - # make crash dumps after initial deploy. Standard grub setup will add - # crashkernel= options - with bad values but that is another issue - but - # that only enables crash dumps after first reboot - def add_crashkernel_support(data): - cmdline = data["editable"]["kernel_params"]["kernel"]["value"] - cmdline += " crashkernel=256M" - data["editable"]["kernel_params"]["kernel"]["value"] = cmdline - - - def modify_settings(self): - - filename = "%s/settings_%d.yaml" % (self.yaml_config_dir, self.env_id) - if not os.path.isfile(filename): - err("Failed to find %s\n" % filename) - - with io.open(filename) as stream: - data = yaml.load(stream) - - self.fix_console_speed(data) - - self.initialize_kernel_audit(data) - - self.add_crashkernel_support(data) - - # Make sure we have the correct libvirt type - data["editable"]["common"]["libvirt_type"]["value"] = "kvm" - - - # Save the settings into the file from which we loaded them - with io.open(filename, "w") as stream: - yaml.dump(data, stream, default_flow_style=False) - - - - - diff --git a/fuel/deploy/dea.yaml b/fuel/deploy/dea.yaml index 420dae7..b83ddea 100644 --- a/fuel/deploy/dea.yaml +++ b/fuel/deploy/dea.yaml @@ -1,37 +1,947 @@ --- name: ENV-1 -server: - type: hp - mgmt_ip: 10.118.32.197 - username: opnfv - password: E///@work shelf: - id: 1 + type: libvirt + mgmt_ip: 10.20.0.1 + username: user + password: systemabc blade: - id: 1 - role: controller + roles: + - controller - id: 2 + roles: + - controller - id: 3 - role: controller + roles: + - controller - id: 4 - id: 5 - id: 6 -network: - - name: management - cidr: 192.168.0.0/24 - start: 192.168.0.1 - end: 192.168.0.253 - - name: private - vlan: - cidr: 192.168.11.0/24 - start: 192.168.11.1 - end: 192.168.11.253 - - name: storage - vlan: - cidr: 192.168.12.0/24 - start: 192.168.12.1 - end: 192.168.12.253 - - name: public - vlan: +networks: + management_vip: 192.168.0.2 + networking_parameters: + base_mac: fa:16:3e:00:00:00 + dns_nameservers: + - 8.8.4.4 + - 8.8.8.8 + floating_ranges: + - - 172.16.0.130 + - 172.16.0.254 + gre_id_range: + - 2 + - 65535 + internal_cidr: 192.168.111.0/24 + internal_gateway: 192.168.111.1 + net_l23_provider: ovs + segmentation_type: vlan + vlan_range: + - 1000 + - 1200 + networks: + - cidr: 172.16.0.0/24 + gateway: 172.16.0.1 + ip_ranges: + - - 172.16.0.2 + - 172.16.0.126 + meta: + assign_vip: true + cidr: 172.16.0.0/24 + configurable: true + floating_range_var: floating_ranges + ip_range: + - 172.16.0.2 + - 172.16.0.126 + map_priority: 1 + name: public + notation: ip_ranges + render_addr_mask: public + render_type: null + use_gateway: true + vlan_start: null + name: public + vlan_start: null + - cidr: null + gateway: null + ip_ranges: [] + meta: + assign_vip: false + configurable: false + map_priority: 2 + name: private + neutron_vlan_range: true + notation: null + render_addr_mask: null + render_type: null + seg_type: vlan + use_gateway: false + vlan_start: null + name: private + vlan_start: null + - cidr: 192.168.0.0/24 + gateway: null + ip_ranges: + - - 192.168.0.2 + - 192.168.0.254 + meta: + assign_vip: true + cidr: 192.168.0.0/24 + configurable: true + map_priority: 2 + name: management + notation: cidr + render_addr_mask: internal + render_type: cidr + use_gateway: false + vlan_start: 101 + name: management + vlan_start: 101 + - cidr: 192.168.1.0/24 + gateway: null + ip_ranges: + - - 192.168.1.2 + - 192.168.1.254 + meta: + assign_vip: false + cidr: 192.168.1.0/24 + configurable: true + map_priority: 2 + name: storage + notation: cidr + render_addr_mask: storage + render_type: cidr + use_gateway: false + vlan_start: 102 + name: storage + vlan_start: 102 + - cidr: 10.20.0.0/24 + gateway: null + ip_ranges: + - - 10.20.0.3 + - 10.20.0.254 + meta: + assign_vip: false + configurable: false + map_priority: 0 + notation: ip_ranges + render_addr_mask: null + render_type: null + unmovable: true + use_gateway: true + name: fuelweb_admin + vlan_start: null + public_vip: 172.16.0.2 +controller: +- action: add-br + name: br-eth0 +- action: add-port + bridge: br-eth0 + name: eth0 +- action: add-br + name: br-eth1 +- action: add-port + bridge: br-eth1 + name: eth1 +- action: add-br + name: br-eth2 +- action: add-port + bridge: br-eth2 + name: eth2 +- action: add-br + name: br-eth3 +- action: add-port + bridge: br-eth3 + name: eth3 +- action: add-br + name: br-ex +- action: add-br + name: br-mgmt +- action: add-br + name: br-storage +- action: add-br + name: br-fw-admin +- action: add-patch + bridges: + - br-eth1 + - br-storage + tags: + - 102 + - 0 + vlan_ids: + - 102 + - 0 +- action: add-patch + bridges: + - br-eth0 + - br-mgmt + tags: + - 101 + - 0 + vlan_ids: + - 101 + - 0 +- action: add-patch + bridges: + - br-eth0 + - br-fw-admin + trunks: + - 0 +- action: add-patch + bridges: + - br-eth3 + - br-ex + trunks: + - 0 +- action: add-br + name: br-prv +- action: add-patch + bridges: + - br-eth2 + - br-prv +compute: +- action: add-br + name: br-eth0 +- action: add-port + bridge: br-eth0 + name: eth0 +- action: add-br + name: br-eth1 +- action: add-port + bridge: br-eth1 + name: eth1 +- action: add-br + name: br-eth2 +- action: add-port + bridge: br-eth2 + name: eth2 +- action: add-br + name: br-eth3 +- action: add-port + bridge: br-eth3 + name: eth3 +- action: add-br + name: br-mgmt +- action: add-br + name: br-storage +- action: add-br + name: br-fw-admin +- action: add-patch + bridges: + - br-eth1 + - br-storage + tags: + - 102 + - 0 + vlan_ids: + - 102 + - 0 +- action: add-patch + bridges: + - br-eth0 + - br-mgmt + tags: + - 101 + - 0 + vlan_ids: + - 101 + - 0 +- action: add-patch + bridges: + - br-eth0 + - br-fw-admin + trunks: + - 0 +- action: add-br + name: br-prv +- action: add-patch + bridges: + - br-eth2 + - br-prv +interfaces: + eth0: + - fuelweb_admin + - management + eth1: + - storage + eth2: + - private + eth3: + - public +settings: + editable: + access: + email: + description: Email address for Administrator + label: email + type: text + value: admin@localhost + weight: 40 + metadata: + label: Access + weight: 10 + password: + description: Password for Administrator + label: password + type: password + value: admin + weight: 20 + tenant: + description: Tenant (project) name for Administrator + label: tenant + regex: + error: Invalid tenant name + source: ^(?!services$)(?!nova$)(?!glance$)(?!keystone$)(?!neutron$)(?!cinder$)(?!swift$)(?!ceph$)(?![Gg]uest$).* + type: text + value: admin + weight: 30 + user: + description: Username for Administrator + label: username + regex: + error: Invalid username + source: ^(?!services$)(?!nova$)(?!glance$)(?!keystone$)(?!neutron$)(?!cinder$)(?!swift$)(?!ceph$)(?![Gg]uest$).* + type: text + value: admin + weight: 10 + additional_components: + ceilometer: + description: If selected, Ceilometer component will be installed + label: Install Ceilometer + type: checkbox + value: true + weight: 40 + heat: + description: '' + label: '' + type: hidden + value: true + weight: 30 + metadata: + label: Additional Components + weight: 20 + murano: + description: If selected, Murano component will be installed + label: Install Murano + restrictions: + - cluster:net_provider != 'neutron' + type: checkbox + value: false + weight: 20 + sahara: + description: If selected, Sahara component will be installed + label: Install Sahara + type: checkbox + value: false + weight: 10 + common: + auth_key: + description: Public key(s) to include in authorized_keys on deployed nodes + label: Public Key + type: text + value: '' + weight: 70 + auto_assign_floating_ip: + description: If selected, OpenStack will automatically assign a floating IP + to a new instance + label: Auto assign floating IP + restrictions: + - cluster:net_provider == 'neutron' + type: checkbox + value: false + weight: 40 + compute_scheduler_driver: + label: Scheduler driver + type: radio + value: nova.scheduler.filter_scheduler.FilterScheduler + values: + - data: nova.scheduler.filter_scheduler.FilterScheduler + description: Currently the most advanced OpenStack scheduler. See the OpenStack + documentation for details. + label: Filter scheduler + - data: nova.scheduler.simple.SimpleScheduler + description: This is 'naive' scheduler which tries to find the least loaded + host + label: Simple scheduler + weight: 40 + debug: + description: Debug logging mode provides more information, but requires more + disk space. + label: OpenStack debug logging + type: checkbox + value: false + weight: 20 + disable_offload: + description: If set, generic segmentation offload (gso) and generic receive + offload (gro) on physical nics will be disabled. See ethtool man. + label: Disable generic offload on physical nics + restrictions: + - action: hide + condition: cluster:net_provider == 'neutron' and networking_parameters:segmentation_type + == 'gre' + type: checkbox + value: true + weight: 80 + libvirt_type: + label: Hypervisor type + type: radio + value: kvm + values: + - data: kvm + description: Choose this type of hypervisor if you run OpenStack on hardware + label: KVM + restrictions: + - settings:common.libvirt_type.value == 'vcenter' + - data: qemu + description: Choose this type of hypervisor if you run OpenStack on virtual + hosts. + label: QEMU + restrictions: + - settings:common.libvirt_type.value == 'vcenter' + - data: vcenter + description: Choose this type of hypervisor if you run OpenStack in a vCenter + environment. + label: vCenter + restrictions: + - settings:common.libvirt_type.value != 'vcenter' or cluster:net_provider + == 'neutron' + weight: 30 + metadata: + label: Common + weight: 30 + nova_quota: + description: Quotas are used to limit CPU and memory usage for tenants. Enabling + quotas will increase load on the Nova database. + label: Nova quotas + type: checkbox + value: false + weight: 25 + resume_guests_state_on_host_boot: + description: Whether to resume previous guests state when the host reboots. + If enabled, this option causes guests assigned to the host to resume their + previous state. If the guest was running a restart will be attempted when + nova-compute starts. If the guest was not running previously, a restart + will not be attempted. + label: Resume guests state on host boot + type: checkbox + value: false + weight: 60 + use_cow_images: + description: For most cases you will want qcow format. If it's disabled, raw + image format will be used to run VMs. OpenStack with raw format currently + does not support snapshotting. + label: Use qcow format for images + type: checkbox + value: true + weight: 50 + corosync: + group: + description: '' + label: Group + type: text + value: 226.94.1.1 + weight: 10 + metadata: + label: Corosync + restrictions: + - action: hide + condition: 'true' + weight: 50 + port: + description: '' + label: Port + type: text + value: '12000' + weight: 20 + verified: + description: Set True only if multicast is configured correctly on router. + label: Need to pass network verification. + type: checkbox + value: false + weight: 10 + external_dns: + dns_list: + description: List of upstream DNS servers, separated by comma + label: DNS list + type: text + value: 8.8.8.8, 8.8.4.4 + weight: 10 + metadata: + label: Upstream DNS + weight: 90 + external_ntp: + metadata: + label: Upstream NTP + weight: 100 + ntp_list: + description: List of upstream NTP servers, separated by comma + label: NTP servers list + type: text + value: 0.pool.ntp.org, 1.pool.ntp.org + weight: 10 + kernel_params: + kernel: + description: Default kernel parameters + label: Initial parameters + type: text + value: console=ttyS0,9600 console=tty0 rootdelay=90 nomodeset + weight: 45 + metadata: + label: Kernel parameters + weight: 40 + neutron_mellanox: + metadata: + enabled: true + label: Mellanox Neutron components + toggleable: false + weight: 50 + plugin: + label: Mellanox drivers and SR-IOV plugin + type: radio + value: disabled + values: + - data: disabled + description: If selected, Mellanox drivers, Neutron and Cinder plugin will + not be installed. + label: Mellanox drivers and plugins disabled + restrictions: + - settings:storage.iser.value == true + - data: drivers_only + description: If selected, Mellanox Ethernet drivers will be installed to + support networking over Mellanox NIC. Mellanox Neutron plugin will not + be installed. + label: Install only Mellanox drivers + restrictions: + - settings:common.libvirt_type.value != 'kvm' + - data: ethernet + description: If selected, both Mellanox Ethernet drivers and Mellanox network + acceleration (Neutron) plugin will be installed. + label: Install Mellanox drivers and SR-IOV plugin + restrictions: + - settings:common.libvirt_type.value != 'kvm' or not (cluster:net_provider + == 'neutron' and networking_parameters:segmentation_type == 'vlan') + weight: 60 + vf_num: + description: Note that one virtual function will be reserved to the storage + network, in case of choosing iSER. + label: Number of virtual NICs + restrictions: + - settings:neutron_mellanox.plugin.value != 'ethernet' + type: text + value: '16' + weight: 70 + nsx_plugin: + connector_type: + description: Default network transport type to use + label: NSX connector type + type: select + value: stt + values: + - data: gre + label: GRE + - data: ipsec_gre + label: GRE over IPSec + - data: stt + label: STT + - data: ipsec_stt + label: STT over IPSec + - data: bridge + label: Bridge + weight: 80 + l3_gw_service_uuid: + description: UUID for the default L3 gateway service to use with this cluster + label: L3 service UUID + regex: + error: Invalid L3 gateway service UUID + source: '[a-f\d]{8}-[a-f\d]{4}-[a-f\d]{4}-[a-f\d]{4}-[a-f\d]{12}' + type: text + value: '' + weight: 50 + metadata: + enabled: false + label: VMware NSX + restrictions: + - action: hide + condition: cluster:net_provider != 'neutron' or networking_parameters:net_l23_provider + != 'nsx' + weight: 20 + nsx_controllers: + description: One or more IPv4[:port] addresses of NSX controller node, separated + by comma (e.g. 10.30.30.2,192.168.110.254:443) + label: NSX controller endpoint + regex: + error: Invalid controller endpoints, specify valid IPv4[:port] pair + source: ^(([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])\.){3}([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])(:(6553[0-5]|655[0-2][\d]|65[0-4][\d]{2}|6[0-4][\d]{3}|5[\d]{4}|[\d][\d]{0,3}))?(,(([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])\.){3}([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])(:(6553[0-5]|655[0-2][\d]|65[0-4][\d]{2}|6[0-4][\d]{3}|5[\d]{4}|[\d][\d]{0,3}))?)*$ + type: text + value: '' + weight: 60 + nsx_password: + description: Password for Administrator + label: NSX password + regex: + error: Empty password + source: \S + type: password + value: '' + weight: 30 + nsx_username: + description: NSX administrator's username + label: NSX username + regex: + error: Empty username + source: \S + type: text + value: admin + weight: 20 + packages_url: + description: URL to NSX specific packages + label: URL to NSX bits + regex: + error: Invalid URL, specify valid HTTP/HTTPS URL with IPv4 address (e.g. + http://10.20.0.2/nsx) + source: ^https?://(([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])\.){3}([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])(:(6553[0-5]|655[0-2][\d]|65[0-4][\d]{2}|6[0-4][\d]{3}|5[\d]{4}|[\d][\d]{0,3}))?(/.*)?$ + type: text + value: '' + weight: 70 + replication_mode: + description: '' + label: NSX cluster has Service nodes + type: checkbox + value: true + weight: 90 + transport_zone_uuid: + description: UUID of the pre-existing default NSX Transport zone + label: Transport zone UUID + regex: + error: Invalid transport zone UUID + source: '[a-f\d]{8}-[a-f\d]{4}-[a-f\d]{4}-[a-f\d]{4}-[a-f\d]{12}' + type: text + value: '' + weight: 40 + provision: + metadata: + label: Provision + restrictions: + - action: hide + condition: not ('experimental' in version:feature_groups) + weight: 80 + method: + description: Which provision method to use for this cluster. + label: Provision method + type: radio + value: cobbler + values: + - data: image + description: Copying pre-built images on a disk. + label: Image + - data: cobbler + description: Install from scratch using anaconda or debian-installer. + label: Classic (use anaconda or debian-installer) + public_network_assignment: + assign_to_all_nodes: + description: When disabled, public network will be assigned to controllers + and zabbix-server only + label: Assign public network to all nodes + type: checkbox + value: false + weight: 10 + metadata: + label: Public network assignment + restrictions: + - action: hide + condition: cluster:net_provider != 'neutron' + weight: 50 + storage: + ephemeral_ceph: + description: Configures Nova to store ephemeral volumes in RBD. This works + best if Ceph is enabled for volumes and images, too. Enables live migration + of all types of Ceph backed VMs (without this option, live migration will + only work with VMs launched from Cinder volumes). + label: Ceph RBD for ephemeral volumes (Nova) + restrictions: + - settings:common.libvirt_type.value == 'vcenter' + type: checkbox + value: false + weight: 75 + images_ceph: + description: Configures Glance to use the Ceph RBD backend to store images. + If enabled, this option will prevent Swift from installing. + label: Ceph RBD for images (Glance) + type: checkbox + value: false + weight: 30 + images_vcenter: + description: Configures Glance to use the vCenter/ESXi backend to store images. + If enabled, this option will prevent Swift from installing. + label: VMWare vCenter/ESXi datastore for images (Glance) + restrictions: + - settings:common.libvirt_type.value != 'vcenter' + type: checkbox + value: false + weight: 35 + iser: + description: 'High performance block storage: Cinder volumes over iSER protocol + (iSCSI over RDMA). This feature requires SR-IOV capabilities in the NIC, + and will use a dedicated virtual function for the storage network.' + label: iSER protocol for volumes (Cinder) + restrictions: + - settings:storage.volumes_lvm.value != true or settings:common.libvirt_type.value + != 'kvm' + type: checkbox + value: false + weight: 11 + metadata: + label: Storage + weight: 60 + objects_ceph: + description: Configures RadosGW front end for Ceph RBD. This exposes S3 and + Swift API Interfaces. If enabled, this option will prevent Swift from installing. + label: Ceph RadosGW for objects (Swift API) + restrictions: + - settings:storage.images_ceph.value == false + type: checkbox + value: false + weight: 80 + osd_pool_size: + description: Configures the default number of object replicas in Ceph. This + number must be equal to or lower than the number of deployed 'Storage - + Ceph OSD' nodes. + label: Ceph object replication factor + regex: + error: Invalid number + source: ^[1-9]\d*$ + restrictions: + - settings:common.libvirt_type.value == 'vcenter' + type: text + value: '2' + weight: 85 + vc_datacenter: + description: Inventory path to a datacenter. If you want to use ESXi host + as datastore, it should be "ha-datacenter". + label: Datacenter name + regex: + error: Empty datacenter + source: \S + restrictions: + - action: hide + condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value + != 'vcenter' + type: text + value: '' + weight: 65 + vc_datastore: + description: Datastore associated with the datacenter. + label: Datastore name + regex: + error: Empty datastore + source: \S + restrictions: + - action: hide + condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value + != 'vcenter' + type: text + value: '' + weight: 60 + vc_host: + description: IP Address of vCenter/ESXi + label: vCenter/ESXi IP + regex: + error: Specify valid IPv4 address + source: ^(([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])\.){3}([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])$ + restrictions: + - action: hide + condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value + != 'vcenter' + type: text + value: '' + weight: 45 + vc_image_dir: + description: The name of the directory where the glance images will be stored + in the VMware datastore. + label: Datastore Images directory + regex: + error: Empty images directory + source: \S + restrictions: + - action: hide + condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value + != 'vcenter' + type: text + value: /openstack_glance + weight: 70 + vc_password: + description: vCenter/ESXi admin password + label: Password + regex: + error: Empty password + source: \S + restrictions: + - action: hide + condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value + != 'vcenter' + type: password + value: '' + weight: 55 + vc_user: + description: vCenter/ESXi admin username + label: Username + regex: + error: Empty username + source: \S + restrictions: + - action: hide + condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value + != 'vcenter' + type: text + value: '' + weight: 50 + volumes_ceph: + description: Configures Cinder to store volumes in Ceph RBD images. + label: Ceph RBD for volumes (Cinder) + restrictions: + - settings:storage.volumes_lvm.value == true or settings:common.libvirt_type.value + == 'vcenter' + type: checkbox + value: false + weight: 20 + volumes_lvm: + description: Requires at least one Storage - Cinder LVM node. + label: Cinder LVM over iSCSI for volumes + restrictions: + - settings:storage.volumes_ceph.value == true + type: checkbox + value: false + weight: 10 + volumes_vmdk: + description: Configures Cinder to store volumes via VMware vCenter. + label: VMware vCenter for volumes (Cinder) + restrictions: + - settings:common.libvirt_type.value != 'vcenter' or settings:storage.volumes_lvm.value + == true + type: checkbox + value: false + weight: 15 + syslog: + metadata: + label: Syslog + weight: 50 + syslog_port: + description: Remote syslog port + label: Port + regex: + error: Invalid Syslog port + source: ^([1-9][0-9]{0,3}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$ + type: text + value: '514' + weight: 20 + syslog_server: + description: Remote syslog hostname + label: Hostname + type: text + value: '' + weight: 10 + syslog_transport: + label: Syslog transport protocol + type: radio + value: tcp + values: + - data: udp + description: '' + label: UDP + - data: tcp + description: '' + label: TCP + weight: 30 + vcenter: + cluster: + description: vCenter cluster name. If you have multiple clusters, use comma + to separate names + label: Cluster + regex: + error: Invalid cluster list + source: ^([^,\ ]+([\ ]*[^,\ ])*)(,[^,\ ]+([\ ]*[^,\ ])*)*$ + type: text + value: '' + weight: 40 + datastore_regex: + description: The Datastore regexp setting specifies the data stores to use + with Compute. For example, "nas.*". If you want to use all available datastores, + leave this field blank + label: Datastore regexp + regex: + error: Invalid datastore regexp + source: ^(\S.*\S|\S|)$ + type: text + value: '' + weight: 50 + host_ip: + description: IP Address of vCenter + label: vCenter IP + regex: + error: Specify valid IPv4 address + source: ^(([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])\.){3}([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])$ + type: text + value: '' + weight: 10 + metadata: + label: vCenter + restrictions: + - action: hide + condition: settings:common.libvirt_type.value != 'vcenter' + weight: 20 + use_vcenter: + description: '' + label: '' + type: hidden + value: true + weight: 5 + vc_password: + description: vCenter admin password + label: Password + regex: + error: Empty password + source: \S + type: password + value: '' + weight: 30 + vc_user: + description: vCenter admin username + label: Username + regex: + error: Empty username + source: \S + type: text + value: '' + weight: 20 + vlan_interface: + description: Physical ESXi host ethernet adapter for VLAN networking (e.g. + vmnic1). If empty "vmnic0" is used by default + label: ESXi VLAN interface + restrictions: + - action: hide + condition: cluster:net_provider != 'nova_network' or networking_parameters:net_manager + != 'VlanManager' + type: text + value: '' + weight: 60 + zabbix: + metadata: + label: Zabbix Access + restrictions: + - action: hide + condition: not ('experimental' in version:feature_groups) + weight: 70 + password: + description: Password for Zabbix Administrator + label: password + type: password + value: zabbix + weight: 20 + username: + description: Username for Zabbix Administrator + label: username + type: text + value: admin + weight: 10 ... diff --git a/fuel/deploy/deploy.py b/fuel/deploy/deploy.py deleted file mode 100644 index 4037c1d..0000000 --- a/fuel/deploy/deploy.py +++ /dev/null @@ -1,212 +0,0 @@ -import time -import os -import sys - -import common -from dha import DeploymentHardwareAdapter -from dea import DeploymentEnvironmentAdapter -from configure_environment import ConfigureEnvironment - - -SUPPORTED_RELEASE = 'Juno on CentOS 6.5' - -N = common.N -E = common.E -R = common.R -RO = common.RO -exec_cmd = common.exec_cmd -parse = common.parse -err = common.err - -class Deploy(object): - - def __init__(self, yaml_config_dir): - self.supported_release = None - self.yaml_config_dir = yaml_config_dir - - def get_id_list(self, list): - return [l[0] for l in list] - - def cleanup_fuel_environments(self, env_list): - WAIT_LOOP = 10 - SLEEP_TIME = 2 - id_list = self.get_id_list(env_list) - for id in id_list: - exec_cmd('fuel env --env %s --delete' % id) - for i in range(WAIT_LOOP): - if id in self.get_id_list(parse(exec_cmd('fuel env list'))): - time.sleep(SLEEP_TIME) - else: - continue - - def cleanup_fuel_nodes(self, node_list): - for node in node_list: - if node[N['status']] == 'discover': - exec_cmd('fuel node --node-id %s --delete-from-db' - % node[N['id']]) - exec_cmd('dockerctl shell cobbler cobbler system remove ' - '--name node-%s' % node[N['id']]) - - def check_previous_installation(self): - env_list = parse(exec_cmd('fuel env list')) - if env_list: - self.cleanup_fuel_environments(env_list) - node_list = parse(exec_cmd('fuel node list')) - if node_list: - self.cleanup_fuel_nodes(node_list) - - def check_supported_release(self): - release_list= parse(exec_cmd('fuel release -l')) - for release in release_list: - if release[R['name']] == SUPPORTED_RELEASE: - self.supported_release = release - break - if not self.supported_release: - err("This Fuel doesn't contain the following " - "release: %s\n" % SUPPORTED_RELEASE) - - def check_role_definitions(self): - role_list= parse(exec_cmd('fuel role --release %s' - % self.supported_release[R['id']])) - roles = [role[RO['name']] for role in role_list] - if 'compute' not in roles: - err("Role compute does not exist in release %" - % self.supported_release[R['name']]) - if 'controller' not in roles: - err("Role controller does not exist in release %" - % self.supported_release[R['name']]) - - def check_prerequisites(self): - self.check_supported_release() - self.check_role_definitions() - self.check_previous_installation() - - def power_off_blades(self, dha, shelf_blades_dict): - for shelf, blade_list in shelf_blades_dict.iteritems(): - dha.power_off_blades(shelf, blade_list) - - def power_on_blades(self, dha, shelf_blades_dict): - for shelf, blade_list in shelf_blades_dict.iteritems(): - dha.power_on_blades(shelf, blade_list) - - def set_boot_order(self, dha, shelf_blades_dict): - for shelf, blade_list in shelf_blades_dict.iteritems(): - dha.set_boot_order_blades(shelf, blade_list) - - def count_discovered_nodes(self, node_list): - discovered_nodes = 0 - for node in node_list: - if node[N['status']] == 'discover': - discovered_nodes += 1 - return discovered_nodes - - def wait_for_discovered_blades(self, no_of_blades): - WAIT_LOOP = 10 - SLEEP_TIME = 2 - all_discovered = False - node_list = parse(exec_cmd('fuel node list')) - for i in range(WAIT_LOOP): - if (self.count_discovered_nodes(node_list) < no_of_blades): - time.sleep(SLEEP_TIME) - node_list = parse(exec_cmd('fuel node list')) - else: - all_discovered = True - break - if not all_discovered: - err("There are %s blades defined, but not all of " - "them have been discovered\n" % no_of_blades) - - def assign_cluster_node_ids(self, dha, dea, controllers, compute_hosts): - node_list= parse(exec_cmd('fuel node list')) - for shelf_id in dea.get_shelf_ids(): - for blade_id in dea.get_blade_ids_per_shelf(shelf_id): - blade_mac_list = dha.get_blade_mac_addresses( - shelf_id, blade_id) - - found = False - for node in node_list: - if (node[N['mac']] in blade_mac_list and - node[N['status']] == 'discover'): - found = True - break - if found: - if dea.is_controller(shelf_id, blade_id): - controllers.append(node[N['id']]) - if dea.is_compute_host(shelf_id, blade_id): - compute_hosts.append(node[N['id']]) - else: - err("Could not find the Node ID for blade " - "with MACs %s or blade is not in " - "discover status\n" % blade_mac_list) - - - def configure_environment(self, dea): - config_env = ConfigureEnvironment(dea, self.yaml_config_dir) - - - - def provision(self): - - - - def fix_power_address(self): - - - - - def deploy(self): - - if id in self.get_id_list(parse(exec_cmd('fuel env list'))): - - self.fix_power_address() - - - - -def main(): - - yaml_path = exec_cmd('pwd').strip() + '/dea.yaml' - yaml_config_dir = '/var/lib/opnfv/pre_deploy' - - deploy = Deploy(yaml_config_dir) - - dea = DeploymentEnvironmentAdapter() - - if not os.path.isfile(yaml_path): - sys.stderr.write("ERROR: File %s not found\n" % yaml_path) - sys.exit(1) - - dea.parse_yaml(yaml_path) - - server_type, mgmt_ip, username, password = dea.get_server_info() - shelf_blades_dict = dea.get_blade_ids_per_shelves() - - dha = DeploymentHardwareAdapter(server_type, mgmt_ip, username, password) - - deploy.check_prerequisites() - - deploy.power_off_blades(dha, shelf_blades_dict) - - deploy.set_boot_order(dha, shelf_blades_dict) - - deploy.power_on_blades(dha, shelf_blades_dict) - - macs = dha.get_blade_mac_addresses() - - deploy.wait_for_discovered_blades(dea.get_no_of_blades()) - - - controllers = [] - compute_hosts = [] - deploy.assign_cluster_node_ids(dha, dea, controllers, compute_hosts) - - - - deploy.configure_environment(dea) - - deploy.deploy(dea) - - - -if __name__ == '__main__': - main() \ No newline at end of file diff --git a/fuel/prototypes/libvirt/deploy/deploy.sh b/fuel/deploy/deploy.sh similarity index 91% rename from fuel/prototypes/libvirt/deploy/deploy.sh rename to fuel/deploy/deploy.sh index ba7f7cd..916125e 100755 --- a/fuel/prototypes/libvirt/deploy/deploy.sh +++ b/fuel/deploy/deploy.sh @@ -11,10 +11,10 @@ # Setup locations topdir=$(cd `dirname $0`; pwd) -exampledir=$(cd $topdir/../examples; pwd) functions=${topdir}/functions tmpdir=$HOME/fueltmp deployiso=${tmpdir}/deploy.iso +cloud_deploy=$(cd ${topdir}/cloud_deploy; pwd) # Define common functions . ${functions}/common.sh @@ -41,7 +41,7 @@ fi # Setup tmpdir if [ -d $tmpdir ]; then - rm -Rf $tmpdir || error_exit "Coul not remove tmpdir $tmpdir" + rm -Rf $tmpdir || error_exit "Could not remove tmpdir $tmpdir" fi mkdir $tmpdir || error_exit "Could not create tmpdir $tmpdir" @@ -54,16 +54,16 @@ fi # If no DEA specified, use the example one if [ $# -eq 1 ]; then - deafile=${exampledir}/libvirt_dea.yaml + deafile=${topdir}/dea.yaml else deafile=$(cd `dirname $2`; echo `pwd`/`basename $2`) fi +cp ${deafile} ${cloud_deploy}/ if [ ! -f $deafile ]; then error-exit "Could not find DEA file $deafile" fi - # Enable safety catch echo "Enabling auto-kill if deployment exceeds $MAXDEPLOYTIME" (sleep $MAXDEPLOYTIME; echo "Auto-kill of deploy after a timeout of $MAXDEPLOYTIME"; kill $$) & @@ -73,11 +73,12 @@ killpid=$! trap exit_handler exit # Stop all VMs -for node in controller1 controller2 controller3 compute4 compute5 fuel-master +for node in `ls libvirt/vms` do virsh destroy $node >/dev/null 2>&1 done + # Install the Fuel master # (Convert to functions at later stage) echo "Patching iso file" @@ -85,7 +86,8 @@ ${functions}/patch-iso.sh $isofile $deployiso $tmpdir || error_exit "Failed to p # Swap isofiles from now on isofile=$deployiso . ${functions}/install_iso.sh -. ${functions}/deploy_env.sh + +python ${cloud_deploy}/cloud_deploy.py echo "Waiting for five minutes for deploy to stabilize" sleep 5m diff --git a/fuel/deploy/deploy_fuel.sh b/fuel/deploy/deploy_fuel.sh deleted file mode 100755 index 8cb72b7..0000000 --- a/fuel/deploy/deploy_fuel.sh +++ /dev/null @@ -1,106 +0,0 @@ -#!/bin/bash -# Deploy in deployFuel has the "configure host-network, -# install fuel, configure vm and start it" meaning -set -o xtrace -set -o errexit -set -o nounset -set -o pipefail - -if [ $# -ne 2 ]; then - echo "Usage: $0 " - exit 1 -fi - -readonly iso_file=$1 -readonly interface=$2 -readonly vm_name="fuel_opnfv" -readonly ssh_fuel_vm="sshpass -p r00tme - ssh -o UserKnownHostsFile=/dev/null - -o StrictHostKeyChecking=no - -q - root@192.168.0.11" -readonly RUN_INSTALL="${RUN_INSTALL:-false}" -readonly DEV="${DEV:-false}" - -# poll is not real timeout, commands can take some undefined time to execute -# it is a count of how many times to try while sleeping shortly -# in between checks -readonly poll_virtinstall=1800 -readonly poll_fuel_startup=1200 -readonly poll_deployment=2150 -readonly fuel_logfile="/var/log/puppet/bootstrap_admin_node.log" - -cat >$interface.xml < - $interface - - - - -EOF - -cleanup_previous_run() { - echo "Cleaning up previous run" - set +eu - virsh net-destroy $interface > /dev/null 2>&1 - virsh net-undefine $interface > /dev/null 2>&1 - virsh destroy $vm_name > /dev/null 2>&1 - virsh undefine $vm_name > /dev/null 2>&1 - set -eu -} - -create_disk_and_install() { - rm -rf $vm_name.qcow2 - qemu-img create -f qcow2 -o preallocation=metadata $vm_name.qcow2 60G - virt-install --connect=qemu:///system \ - --name=$vm_name \ - --network=network:$interface \ - --ram 2048 --vcpus=4,cores=2 --check-cpu --hvm \ - --disk path=$vm_name.qcow2,format=qcow2,device=disk,bus=virtio \ - --noautoconsole --vnc \ - --cdrom $iso_file -} - -wait_for_virtinstall() { - # Workaround for virt-install --wait which restarts vm - # too fast too attach disk - echo "Waiting for virt-install to finish..." - set +eu - stopped=false - for i in $(seq 0 $poll_virtinstall); do - virsh_out=`virsh list | grep "$vm_name"` - if [ -z "$virsh_out" ]; then - stopped=true - break - fi - sleep 2 - done - set -eu -} - -wait_for_fuel_startup() { - echo "Wait for fuel to start up..." - for i in $(seq 0 $poll_fuel_startup); do - sleep 2 && echo -n "$i " - $ssh_fuel_vm grep complete $fuel_logfile && - echo "Fuel bootstrap is done, deployment should have started now" && - return 0 - done - return 1 -} - - -cleanup_previous_run -virsh net-define $interface.xml -virsh net-start $interface -create_disk_and_install -wait_for_virtinstall - -echo "Starting $vm_name after installation in 6s..." && sleep 6s -set +eu - -virsh start $vm_name -if ! wait_for_fuel_startup; then - echo "Fuel failed to start up" - exit 1 -fi diff --git a/fuel/prototypes/libvirt/deploy/functions/common.sh b/fuel/deploy/functions/common.sh similarity index 100% rename from fuel/prototypes/libvirt/deploy/functions/common.sh rename to fuel/deploy/functions/common.sh diff --git a/fuel/prototypes/libvirt/deploy/functions/install_iso.sh b/fuel/deploy/functions/install_iso.sh similarity index 100% rename from fuel/prototypes/libvirt/deploy/functions/install_iso.sh rename to fuel/deploy/functions/install_iso.sh diff --git a/fuel/prototypes/libvirt/deploy/functions/isolinux.cfg.patch b/fuel/deploy/functions/isolinux.cfg.patch similarity index 100% rename from fuel/prototypes/libvirt/deploy/functions/isolinux.cfg.patch rename to fuel/deploy/functions/isolinux.cfg.patch diff --git a/fuel/prototypes/libvirt/deploy/functions/ks.cfg.patch b/fuel/deploy/functions/ks.cfg.patch similarity index 100% rename from fuel/prototypes/libvirt/deploy/functions/ks.cfg.patch rename to fuel/deploy/functions/ks.cfg.patch diff --git a/fuel/prototypes/libvirt/deploy/functions/patch-iso.sh b/fuel/deploy/functions/patch-iso.sh similarity index 100% rename from fuel/prototypes/libvirt/deploy/functions/patch-iso.sh rename to fuel/deploy/functions/patch-iso.sh diff --git a/fuel/deploy/hardware_adapters/hp/hp_adapter.py b/fuel/deploy/hardware_adapters/hp/hp_adapter.py deleted file mode 100644 index 7ce0dc9..0000000 --- a/fuel/deploy/hardware_adapters/hp/hp_adapter.py +++ /dev/null @@ -1,411 +0,0 @@ -import re -import time -from netaddr import EUI, mac_unix -import logging - -from run_oa_command import RunOACommand - - -LOG = logging.getLogger(__name__) -out_hdlr = logging.FileHandler(__file__.split('.')[0] + '.log', mode='w') -out_hdlr.setFormatter(logging.Formatter('%(levelname)s: %(message)s')) -LOG.addHandler(out_hdlr) -LOG.setLevel(logging.DEBUG) - -class HpAdapter(object): - - # Exception thrown at any kind of failure to get the requested - # information. - class NoInfoFoundError(Exception): - pass - - # Totally failed to connect so a re-try with other HW should - # be done. This exception should never escape this class. - class InternalConnectError(Exception): - pass - - # Format MAC so leading zeroes are displayed - class mac_dhcp(mac_unix): - word_fmt = "%.2x" - - def __init__(self, mgmt_ip, username, password): - self.mgmt_ip = mgmt_ip - self.username = username - self.password = password - self.oa_error_message = '' - - def get_blade_mac_addresses(self, shelf, blade): - - LOG.debug("Entering: get_mac_addr_hp(%d,%d)" % (shelf, blade)) - self.oa_error_message = '' - oa = RunOACommand(self.mgmt_ip, self.username, self.password) - - LOG.debug("Connect to active OA for shelf %d" % shelf) - try: - res = oa.connect_to_active() - except: - raise self.InternalConnectError(oa.error_message) - if res is None: - raise self.InternalConnectError(oa.error_message) - if not oa.connected(): - raise self.NoInfoFoundError(oa.error_message) - - cmd = ("show server info " + str(blade)) - - LOG.debug("Send command to OA: %s" % cmd) - try: - serverinfo = oa.send_command(cmd) - except: - raise self.NoInfoFoundError(oa.error_message) - finally: - oa.close() - - (left, right) = self.find_mac(serverinfo, shelf, blade) - - left = EUI(left, dialect=self.mac_dhcp) - right = EUI(right, dialect=self.mac_dhcp) - return [str(left), str(right)] - - def get_blade_hardware_info(self, shelf, blade=None): - - if blade: - LOG.debug("Entering: get_hp_info(%d,%d)" % (shelf, blade)) - else: - LOG.debug("Entering: get_hp_info(%d)" % shelf) - - self.oa_error_message = '' - oa = RunOACommand(self.mgmt_ip, self.username, self.password) - - LOG.debug("Connect to active OA for shelf %d" % shelf) - - try: - res = oa.connect_to_active() - except: - self.oa_error_message = oa.error_message - return None - if res is None: - self.oa_error_message = oa.error_message - return None - if not oa.connected(): - self.oa_error_message = oa.error_message - return None - - # If no blade specified we're done we know this is an HP at this point - if not blade: - oa.close() - return "HP" - - check = "show server info %d" % blade - LOG.debug("Send command to OA: %s" % check) - output = oa.send_command("%s" % check) - oa.close() - - match = r"Product Name:\s+(.+)\Z" - if re.search(match, str(output[:])) is None: - self.oa_error_message = ("Blade %d in shelf %d does not exist\n" - % (blade, shelf)) - return None - - for line in output: - seobj = re.search(match, line) - if seobj: - return "HP %s" % seobj.group(1) - return False - - def power_off_blades(self, shelf, blade_list): - return self.set_state(shelf, 'locked', blade_list=blade_list) - - def power_on_blades(self, shelf, blade_list): - return self.set_state(shelf, 'unlocked', blade_list=blade_list) - - def power_off_blade(self, shelf, blade): - return self.set_state(shelf, 'locked', one_blade=blade) - - def power_on_blade(self, shelf, blade): - return self.set_state(shelf, 'unlocked', one_blade=blade) - - def set_boot_order_blade(self, shelf, blade): - return self.set_boot_order(shelf, one_blade=blade) - - def set_boot_order_blades(self, shelf, blade_list): - return self.set_boot_order(shelf, blade_list=blade_list) - - - - # Search HP's OA server info for MAC for left and right control - def find_mac(self, serverinfo, shelf, blade): - left = False - right = False - for line in serverinfo: - if ("No Server Blade Installed" in line or - "Invalid Arguments" in line): - raise self.NoInfoFoundError("Blade %d in shelf %d " - "does not exist." % (blade, shelf)) - seobj = re.search(r"LOM1:1-a\s+([0-9A-F:]+)", line, re.I) - if seobj: - left = seobj.group(1) - else: - seobj = re.search(r"LOM1:2-a\s+([0-9A-F:]+)", line, re.I) - if seobj: - right = seobj.group(1) - if left and right: - return left, right - raise self.NoInfoFoundError("Could not find MAC for blade %d " - "in shelf %d." % (blade, shelf)) - - # Do power on or off on all configured blades in shelf - # Return None to indicate that no connection do OA succeeded, - # Return False to indicate some connection to OA succeeded, - # or config error - # Return True to indicate that power state succesfully updated - # state: locked, unlocked - def set_state(self, shelf, state, one_blade=None, blade_list=None): - - if state not in ['locked', 'unlocked']: - return None - - if one_blade: - LOG.debug("Entering: set_state_hp(%d,%s,%d)" % - (shelf, state, one_blade)) - else: - LOG.debug("Entering: set_state_hp(%d,%s)" % (shelf, state)) - - self.oa_error_message = '' - - oa = RunOACommand(self.mgmt_ip, self.username, self.password) - - LOG.debug("Connect to active OA for shelf %d" % shelf) - - try: - res = oa.connect_to_active() - except: - self.oa_error_message = oa.error_message - return None - if res is None: - self.oa_error_message = oa.error_message - return None - if not oa.connected(): - self.oa_error_message = oa.error_message - return False - - if one_blade: - blades = [one_blade] - else: - blades = sorted(blade_list) - - LOG.debug("Check if blades are present") - - check = "show server list" - - LOG.debug("Send command to OA: %s" % check) - output = oa.send_command(check) - first = True - bladelist = '' - for blade in blades: - prog = re.compile(r"\s+" + str(blade) + r"\s+\[Absent\]", - re.MULTILINE) - if prog.search(str(output[:])) is not None: - oa.close() - self.oa_error_message = ("Blade %d in shelf %d " - % (blade, shelf)) - if one_blade: - self.oa_error_message += ("does not exist.\n" - "Set state %s not performed.\n" - % state) - else: - self.oa_error_message += ( - "specified but does not exist.\nSet " - "state %s not performed on shelf %d\n" - % (state, shelf)) - return False - if not first: - bladelist += "," - else: - first = False - bladelist += str(blade) - - if blade_list: - LOG.debug("All blades present") - - # Use leading upper case on On/Off so it can be reused in match - extra = "" - if state == "locked": - powerstate = "Off" - extra = "force" - else: - powerstate = "On" - - cmd = "power%s server %s" % (powerstate, bladelist) - - if extra != "": - cmd += " %s" % extra - - LOG.debug("Send command to OA: %s" % cmd) - - try: - oa.send_command(cmd) - except: - self.oa_error_message = oa.error_message - oa.close() - return False - - # Check that all blades reach the state which can take some time, - # so re-try a couple of times - LOG.debug("Check if state %s successfully set" % state) - recheck = 2 - while True: - LOG.debug("Send command to OA: %s" % check) - try: - output = oa.send_command(check) - except: - self.oa_error_message = oa.error_message - oa.close() - return False - for blade in blades: - match = (r"\s+" + str(blade) + - r"\s+\w+\s+\w+.\w+.\w+.\w+\s+\w+\s+%s" % - powerstate) - prog = re.compile(match, re.MULTILINE) - if prog.search(str(output[:])) is None: - recheck -= 1 - if recheck >= 0: - # Re-try - time.sleep(3) - break - oa.close() - self.oa_error_message = ( - "Could not set state %s on blade %d in shelf %d\n" - % (state, one_blade, shelf)) - for line in output: - self.oa_error_message += line - return False - else: - # state reached for all blades, exit the infinite loop - break - - if one_blade: - LOG.debug("State %s successfully set on blade %d in shelf %d" - % (state, one_blade, shelf)) - else: - LOG.debug("State %s successfully set on blades %s in shelf %d" - % (state, blade_list, shelf)) - oa.close() - return True - - # Change boot order on all blades in shelf - # Return None to indicate that no connection do OA succeeded, - # Return False to indicate some connection to OA succeeded, - # or config error, - # Return True to indicate that boot order succesfully changed - def set_boot_order(self, shelf, one_blade=None, blade_list=None): - - if one_blade: - LOG.debug("Entering: set_bootorder_hp(%d,%d)" % (shelf, one_blade)) - else: - LOG.debug("Entering: set_bootorder_hp(%d)" % shelf) - - self.oa_error_message = '' - - oa = RunOACommand(self.mgmt_ip, self.username, self.password) - - LOG.debug("Connect to active OA for shelf %d" % shelf) - - try: - res = oa.connect_to_active() - except: - self.oa_error_message = oa.error_message - return None - if res is None: - self.oa_error_message = oa.error_message - return None - if not oa.connected(): - self.oa_error_message = oa.error_message - return False - - if one_blade: - blades = [one_blade] - else: - blades = sorted(blade_list) - - LOG.debug("Check if blades are present") - - check = "show server list" - - LOG.debug("Send command to OA: %s" % check) - - output = oa.send_command(check) - first = True - bladelist = '' - for blade in blades: - prog = re.compile(r"\s+" + str(blade) + r"\s+\[Absent\]", - re.MULTILINE) - if prog.search(str(output[:])) is not None: - oa.close() - self.oa_error_message = ("Blade %d in shelf %d " - % (blade, shelf)) - if one_blade: - self.oa_error_message += ( - "does not exist.\nChange boot order not performed.\n") - else: - self.oa_error_message += ( - "specified but does not exist.\n" - "Change boot order not performed on shelf %d\n" - % shelf) - return False - if not first: - bladelist += ',' - else: - first = False - bladelist += str(blade) - - if blade_list: - LOG.debug("All blades present") - - # Boot origins are pushed so first set boot from hard disk, then PXE - # NB! If we want to support boot from SD we must add USB to the "stack" - cmd1 = "set server boot first hdd %s" % bladelist - cmd2 = "set server boot first pxe %s" % bladelist - for cmd in [cmd1, cmd2]: - - LOG.debug("Send command to OA: %s" % cmd) - try: - output = oa.send_command(cmd) - except: - self.oa_error_message = oa.error_message - for line in output: - self.oa_error_message += line - oa.close() - return False - - # Check that all blades got the correct boot order - # Needs updating if USB is added - LOG.debug("Check if boot order successfully set") - match = (r"^.*Boot Order\):\',\s*\'(\\t)+PXE NIC 1\',\s*\'(\\t)" - r"+Hard Drive") - prog = re.compile(match) - for blade in blades: - - check = "show server boot %d" % blade - - LOG.debug("Send command to OA: %s" % check) - try: - output = oa.send_command(check) - except: - self.oa_error_message = oa.error_message - oa.close() - return False - if prog.search(str(output[:])) is None: - oa.close() - self.oa_error_message = ("Failed to set boot order on blade " - "%d in shelf %d\n" % (blade, shelf)) - for line in output: - self.oa_error_message += line - return False - LOG.debug("Boot order successfully set on blade %d in shelf %d" - % (blade, shelf)) - - if blade_list: - LOG.debug("Boot order successfully set on all configured blades " - "in shelf %d" % (shelf)) - oa.close() - return True diff --git a/fuel/deploy/hardware_adapters/hp/run_oa_command.py b/fuel/deploy/hardware_adapters/hp/run_oa_command.py deleted file mode 100644 index 32135c3..0000000 --- a/fuel/deploy/hardware_adapters/hp/run_oa_command.py +++ /dev/null @@ -1,113 +0,0 @@ -import socket -import paramiko -import logging - -LOG = logging.getLogger(__name__) -out_hdlr = logging.FileHandler(__file__.split('.')[0] + '.log', mode='w') -out_hdlr.setFormatter(logging.Formatter('%(levelname)s: %(message)s')) -LOG.addHandler(out_hdlr) -LOG.setLevel(logging.DEBUG) - -class RunOACommand: - - def __init__(self, mgmt_ip, username, password): - self.ssh = None - self.mgmt_ip = mgmt_ip - self.username = username - self.password = password - self.error_message = "" - - def connected(self): - return self.ssh is not None - - def close(self): - if self.connected(): - self.ssh.close() - self.ssh = None - self.error_message = "" - - def connect(self): - LOG.info("Trying to connect to OA at %s" % self.mgmt_ip) - try: - self.ssh.connect(self.mgmt_ip, - username=self.username, - password=self.password, - look_for_keys=False, - allow_agent=False) - return True - except socket.error, (err, message): - self.error_message += ("Can not talk to OA %s: %s\n" % - (self.mgmt_ip, message)) - except Exception as e: - self.error_message += ("Can not talk to OA %s: %s\n" % - (self.mgmt_ip, e.args)) - LOG.error("Failed to connect to OA at %s" % self.mgmt_ip) - return False - - # Return None if this most likely is not an OA - # False if we failed to connect to an active OA - # True if connected - def connect_to_active(self): - self.error_message = "OA connect failed with these errors:\n" - - self.ssh = paramiko.SSHClient() - self.ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) - - initial_mgmt_ip = self.mgmt_ip - if not self.connect(self.mgmt_ip, self.username, self.password): - octets = self.mgmt_ip.split(".") - self.mgmt_ip = "%s.%s.%s.%s" % (octets[0], - octets[1], - octets[2], - str(int(octets[3]) + 1)) - if not self.connect(self.mgmt_ip, self.username, self.password): - self.ssh = None - LOG.error("Failed to connect to OA at %s (and %s)" % - (initial_mgmt_ip, self.mgmt_ip)) - return None - - output = self.send_command("show oa status") - for line in output: - if "Standby" in line: - self.ssh.close() - self.error_message += ( - "%s is the standby OA, trying next OA\n" % self.mgmt_ip) - LOG.info("%s is the standby OA" % self.mgmt_ip) - if self.mgmt_ip != initial_mgmt_ip: - self.error_message += ( - "Can only talk to OA %s which is the standby OA\n" % - self.mgmt_ip) - self.ssh = None - return False - else: - octets = self.mgmt_ip.split(".") - self.mgmt_ip = "%s.%s.%s.%s" % (octets[0], - octets[1], - octets[2], - str(int(octets[3]) + 1)) - if not self.connect(self.mgmt_ip, self.username, - self.password): - self.ssh = None - return False - LOG.info("Connected to active OA at %s" % self.mgmt_ip) - self.error_message = "" - return True - - def send_command(self, cmd): - if not self.connected(): - self.error_message = ( - "Not connected, cannot send command %s\n" % (cmd)) - raise - - LOG.info('Sending "%s" to %s' % (cmd, self.mgmt_ip)) - stdin, stdout, stderr = self.ssh.exec_command(cmd) - output = [] - for line in stdout.read().splitlines(): - if line != '': - output.append(line) - return output - - def __exit__(self, type, value, traceback): - if self.connected(): - self.close() - self.ssh = None \ No newline at end of file diff --git a/fuel/prototypes/libvirt/examples/networks/fuel1 b/fuel/deploy/libvirt/networks/fuel1 similarity index 100% rename from fuel/prototypes/libvirt/examples/networks/fuel1 rename to fuel/deploy/libvirt/networks/fuel1 diff --git a/fuel/prototypes/libvirt/examples/networks/fuel2 b/fuel/deploy/libvirt/networks/fuel2 similarity index 100% rename from fuel/prototypes/libvirt/examples/networks/fuel2 rename to fuel/deploy/libvirt/networks/fuel2 diff --git a/fuel/prototypes/libvirt/examples/networks/fuel3 b/fuel/deploy/libvirt/networks/fuel3 similarity index 100% rename from fuel/prototypes/libvirt/examples/networks/fuel3 rename to fuel/deploy/libvirt/networks/fuel3 diff --git a/fuel/prototypes/libvirt/examples/networks/fuel4 b/fuel/deploy/libvirt/networks/fuel4 similarity index 100% rename from fuel/prototypes/libvirt/examples/networks/fuel4 rename to fuel/deploy/libvirt/networks/fuel4 diff --git a/fuel/prototypes/libvirt/examples/vms/fuel-master b/fuel/deploy/libvirt/vms/fuel-master similarity index 100% rename from fuel/prototypes/libvirt/examples/vms/fuel-master rename to fuel/deploy/libvirt/vms/fuel-master diff --git a/fuel/prototypes/libvirt/examples/vms/controller1 b/fuel/deploy/libvirt/vms/s1_b1 similarity index 99% rename from fuel/prototypes/libvirt/examples/vms/controller1 rename to fuel/deploy/libvirt/vms/s1_b1 index f82ad28..a879163 100644 --- a/fuel/prototypes/libvirt/examples/vms/controller1 +++ b/fuel/deploy/libvirt/vms/s1_b1 @@ -1,5 +1,5 @@ - controller1 + s1_b1 2097152 2097152 2 diff --git a/fuel/prototypes/libvirt/examples/vms/controller2 b/fuel/deploy/libvirt/vms/s1_b2 similarity index 99% rename from fuel/prototypes/libvirt/examples/vms/controller2 rename to fuel/deploy/libvirt/vms/s1_b2 index 63ad86a..27eebcf 100644 --- a/fuel/prototypes/libvirt/examples/vms/controller2 +++ b/fuel/deploy/libvirt/vms/s1_b2 @@ -1,5 +1,5 @@ - controller2 + s1_b2 2097152 2097152 2 diff --git a/fuel/prototypes/libvirt/examples/vms/controller3 b/fuel/deploy/libvirt/vms/s1_b3 similarity index 99% rename from fuel/prototypes/libvirt/examples/vms/controller3 rename to fuel/deploy/libvirt/vms/s1_b3 index 7c64a9d..37a4d2f 100644 --- a/fuel/prototypes/libvirt/examples/vms/controller3 +++ b/fuel/deploy/libvirt/vms/s1_b3 @@ -1,5 +1,5 @@ - controller3 + s1_b3 2097152 2097152 2 diff --git a/fuel/prototypes/libvirt/examples/vms/compute4 b/fuel/deploy/libvirt/vms/s1_b4 similarity index 99% rename from fuel/prototypes/libvirt/examples/vms/compute4 rename to fuel/deploy/libvirt/vms/s1_b4 index ec98eab..97384ba 100644 --- a/fuel/prototypes/libvirt/examples/vms/compute4 +++ b/fuel/deploy/libvirt/vms/s1_b4 @@ -1,5 +1,5 @@ - compute4 + s1_b4 8388608 8388608 2 diff --git a/fuel/prototypes/libvirt/examples/vms/compute5 b/fuel/deploy/libvirt/vms/s1_b5 similarity index 99% rename from fuel/prototypes/libvirt/examples/vms/compute5 rename to fuel/deploy/libvirt/vms/s1_b5 index 411be64..97218c3 100644 --- a/fuel/prototypes/libvirt/examples/vms/compute5 +++ b/fuel/deploy/libvirt/vms/s1_b5 @@ -1,5 +1,5 @@ - compute5 + s1_b5 8388608 8388608 2 diff --git a/fuel/deploy/libvirt/vms/s1_b6 b/fuel/deploy/libvirt/vms/s1_b6 new file mode 100644 index 0000000..0cd3028 --- /dev/null +++ b/fuel/deploy/libvirt/vms/s1_b6 @@ -0,0 +1,100 @@ + + s1_b6 + 8388608 + 8388608 + 2 + + hvm + + + + + + + + + + SandyBridge + Intel + + + + + + + + + + + + + + + + + + + + + + + + + + destroy + restart + restart + + /usr/bin/kvm + + + + +
+ + +
+ + + + + +
+ + + + +
+ + + + +
+ + + + +
+ + + + + + + + + + + +
+ +