Autodeploy inspired on Prototype #2 89/489/10
authorSzilard Cserey <szilard.cserey@ericsson.com>
Mon, 4 May 2015 10:11:29 +0000 (12:11 +0200)
committerSzilard Cserey <szilard.cserey@ericsson.com>
Tue, 19 May 2015 18:15:25 +0000 (20:15 +0200)
- setup libvirt environment
- setup vfuel environment
- patch iso
- install Fuel Master
- deploy cloud
- hardware adapter for libvirt
- hardware adapter for ipmi
- hardware adapter for hp
- README: AutoDeploy instructions

JIRA: [BGS-2] Create Fuel deployment script

Change-Id: I862d824829baaae7d21115776d13355d575a47c8
Signed-off-by: Szilard Cserey <szilard.cserey@ericsson.com>
57 files changed:
fuel/deploy/README.rst [deleted file]
fuel/deploy/README.txt [new file with mode: 0644]
fuel/deploy/__init__.py [new file with mode: 0644]
fuel/deploy/baremetal/dea.yaml [new file with mode: 0644]
fuel/deploy/baremetal/dha.yaml [new file with mode: 0644]
fuel/deploy/baremetal/vm/vFuel [new file with mode: 0644]
fuel/deploy/cloud/configure_environment.py [moved from fuel/deploy/cloud_deploy/cloud/configure_environment.py with 75% similarity]
fuel/deploy/cloud/configure_network.py [moved from fuel/deploy/cloud_deploy/cloud/configure_network.py with 77% similarity]
fuel/deploy/cloud/configure_nodes.py [new file with mode: 0644]
fuel/deploy/cloud/configure_settings.py [moved from fuel/deploy/cloud_deploy/cloud/configure_settings.py with 52% similarity]
fuel/deploy/cloud/deploy.py [new file with mode: 0644]
fuel/deploy/cloud/deployment.py [new file with mode: 0644]
fuel/deploy/cloud_deploy/__init__.py [deleted file]
fuel/deploy/cloud_deploy/cloud/__init__.py [deleted file]
fuel/deploy/cloud_deploy/cloud/configure_nodes.py [deleted file]
fuel/deploy/cloud_deploy/cloud/dea.py [deleted file]
fuel/deploy/cloud_deploy/cloud/deploy.py [deleted file]
fuel/deploy/cloud_deploy/cloud/deployment.py [deleted file]
fuel/deploy/cloud_deploy/cloud_deploy.py [deleted file]
fuel/deploy/cloud_deploy/hardware_adapters/__init__.py [deleted file]
fuel/deploy/cloud_deploy/hardware_adapters/dha.py [deleted file]
fuel/deploy/cloud_deploy/hardware_adapters/hp/__init__.py [deleted file]
fuel/deploy/cloud_deploy/hardware_adapters/hp/hp_adapter.py [deleted file]
fuel/deploy/cloud_deploy/hardware_adapters/libvirt/__init__.py [deleted file]
fuel/deploy/cloud_deploy/hardware_adapters/libvirt/libvirt_adapter.py [deleted file]
fuel/deploy/common.py [moved from fuel/deploy/cloud_deploy/cloud/common.py with 59% similarity]
fuel/deploy/dea.py [new file with mode: 0644]
fuel/deploy/deploy.py [new file with mode: 0644]
fuel/deploy/deploy.sh [deleted file]
fuel/deploy/deploy_env.py [new file with mode: 0644]
fuel/deploy/dha.py [new file with mode: 0644]
fuel/deploy/dha_adapters/__init__.py [new file with mode: 0644]
fuel/deploy/dha_adapters/hardware_adapter.py [new file with mode: 0644]
fuel/deploy/dha_adapters/hp_adapter.py [new file with mode: 0644]
fuel/deploy/dha_adapters/ipmi_adapter.py [new file with mode: 0644]
fuel/deploy/dha_adapters/libvirt_adapter.py [new file with mode: 0644]
fuel/deploy/functions/common.sh [deleted file]
fuel/deploy/functions/install_iso.sh [deleted file]
fuel/deploy/functions/isolinux.cfg.patch [deleted file]
fuel/deploy/functions/ks.cfg.patch [deleted file]
fuel/deploy/functions/patch-iso.sh [deleted file]
fuel/deploy/install-ubuntu-packages.sh [moved from fuel/deploy/setup_vms/setup-vm-host.sh with 90% similarity]
fuel/deploy/install_fuel_master.py [new file with mode: 0644]
fuel/deploy/libvirt/dea.yaml [moved from fuel/deploy/dea.yaml with 87% similarity]
fuel/deploy/libvirt/dha.yaml [new file with mode: 0644]
fuel/deploy/libvirt/vms/compute [moved from fuel/deploy/libvirt/vms/s1_b4 with 78% similarity]
fuel/deploy/libvirt/vms/controller [moved from fuel/deploy/libvirt/vms/s1_b1 with 78% similarity]
fuel/deploy/libvirt/vms/fuel-master
fuel/deploy/libvirt/vms/s1_b2 [deleted file]
fuel/deploy/libvirt/vms/s1_b3 [deleted file]
fuel/deploy/libvirt/vms/s1_b5 [deleted file]
fuel/deploy/libvirt/vms/s1_b6 [deleted file]
fuel/deploy/setup_environment.py [new file with mode: 0644]
fuel/deploy/setup_vfuel.py [new file with mode: 0644]
fuel/deploy/setup_vms/apply_setup.sh [deleted file]
fuel/deploy/ssh_client.py [moved from fuel/deploy/cloud_deploy/ssh_client.py with 61% similarity]
fuel/deploy/transplant_fuel_settings.py [new file with mode: 0644]

diff --git a/fuel/deploy/README.rst b/fuel/deploy/README.rst
deleted file mode 100644 (file)
index f7b5711..0000000
+++ /dev/null
@@ -1,47 +0,0 @@
-**DEA libvirt deployment prototype**
-
-This is an example of how to deploy a libvirt KVM setup with a DEA
-YAML file.
-
-The file is created from an already deployed Fuel installation using
-the create_dea script and helper files which are to be present on the
-Fuel master and run from there.
-
-The install is kicked off from the host by running deploy.sh and
-providing the ISO file to deploy and (optionally) an DEA file name as
-an argument. If the DEA file is omitted the example one will be used
-instead.
-
-Pre-condition 1: The host needs to be Ubuntu 14.x
-
-Pre-condition 2: Necessary packages installed by running
-sudo genesis/fuel/prototypes/libvirt/setup_vms/setup-vm-host.sh
-
-Pre-condition 3: Example VM configuration deployed by running
-genesis/fuel/prototypes/libvirt/setup_vms/apply_setup.sh The VMs and
-networks to be setup are in genesis/fuel/prototypes/libvirt/examples:
-"vms" and "networks"
-sudo mkdir /mnt/images
-cd setup-vms
-sudo ./apply_setup.sh /mnt/images 50
-
-In order to run the automated install, it's just a matter of running
-genesis/fuel/prototypes/libvirt/deploy.sh <isofile> [<deafile>] The
-deafile will be optional, if not specified the example one in
-genesis/fuel/prototypes/libvirt/examples/libvirt_dea.yaml will be
-used.
-sudo ./deploy.sh ~/ISO/opnfv-P0000.iso ~/DEPLOY/deploy/dea.yaml
-
-Now either this will succeed (return code 0) or fail. I'll have a
-three hours safety catch to kill off things if something is hanging,
-may need to be adjusted for slow environments (see deploy.sh).
-
-All the steps above should be run with sudo.
-
-In principle the deploy.sh is assuming the example vm setup (one fuel,
-three controllers, two computes) and will always deploy with full HA
-and Ceilometer.
-
-TODO: Copy also  the deployment mode in my dea.yaml creation script
-genesis/fuel/prototypes/libvirt/create_dea/create_dea.sh so it's a
-real xerox of the running deploy.
diff --git a/fuel/deploy/README.txt b/fuel/deploy/README.txt
new file mode 100644 (file)
index 0000000..d392f8f
--- /dev/null
@@ -0,0 +1,71 @@
+
+======== How to prepare and run the OPNFV Autodeployment =======
+
+in fuel/build/deploy run these:
+
+
+
+--- Step.1 Install prerequisites
+
+sudo ./install-ubuntu-packages.sh
+
+
+
+
+
+
+--- Step.2-A If wou want to deploy OPNFV cloud environment on top of KVM/Libvirt virtualization
+             run the following environment setup script
+
+sudo python setup_environment.py <storage_directory> <path_to_dha_file>
+
+Example:
+         sudo python setup_environment.py /mnt/images dha.yaml
+
+
+
+
+
+
+--- Step.2-B If you want to deploy OPNFV cloud environment on baremetal run the
+             following environment setup script
+
+sudo python setup_vfuel.py <storage_directory> <path_to_dha_file>
+
+Example:
+         sudo python setup_vfuel.py /mnt/images dha.yaml
+
+
+WARNING!:
+setup_vfuel.py adds the following snippet into /etc/network/interfaces
+making sure to replace in setup_vfuel.py interfafe 'p1p1.20' with your actual outbound
+interface in order to provide network access to the Fuel master for DNS and NTP.
+
+iface vfuelnet inet static
+       bridge_ports em1
+       address 10.40.0.1
+       netmask 255.255.255.0
+       pre-down iptables -t nat -D POSTROUTING --out-interface p1p1.20 -j MASQUERADE  -m comment --comment "vfuelnet"
+       pre-down iptables -D FORWARD --in-interface vfuelnet --out-interface p1p1.20 -m comment --comment "vfuelnet"
+       post-up iptables -t nat -A POSTROUTING --out-interface p1p1.20 -j MASQUERADE  -m comment --comment "vfuelnet"
+       post-up iptables -A FORWARD --in-interface vfuelnet --out-interface p1p1.20 -m comment --comment "vfuelnet"
+
+
+
+
+
+
+--- Step.3 Start Autodeployment
+Make sure you use the right Deployment Environment Adapter and
+Deployment Hardware Adaper configuration files:
+
+       - for baremetal:  baremetal/dea.yaml   baremetal/dha.yaml
+
+       - for libvirt:    libvirt/dea.yaml   libvirt/dha.yaml
+
+
+sudo python deploy.py [-nf] <isofile> <deafile> <dhafile>
+
+Example:
+         sudo python deploy.py ~/ISO/opnfv.iso baremetal/dea.yaml baremetal/dha.yaml
+
diff --git a/fuel/deploy/__init__.py b/fuel/deploy/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/fuel/deploy/baremetal/dea.yaml b/fuel/deploy/baremetal/dea.yaml
new file mode 100644 (file)
index 0000000..eb3019c
--- /dev/null
@@ -0,0 +1,982 @@
+title: Deployment Environment Adapter (DEA)
+# DEA API version supported
+version: 1.1
+created: Tue May  5 15:33:07 UTC 2015
+comment: Test environment Ericsson Montreal
+environment_name: opnfv
+environment_mode: multinode
+wanted_release: Juno on Ubuntu 12.04.4
+nodes:
+- id: 1
+  interfaces: interface1
+  transformations: controller1
+  role: controller
+- id: 2
+  interfaces: interface1
+  transformations: compute1
+  role: compute
+fuel:
+  ADMIN_NETWORK:
+    ipaddress: 10.40.0.2
+    netmask: 255.255.255.0
+    dhcp_pool_start: 10.40.0.3
+    dhcp_pool_end: 10.40.0.254
+  DNS_UPSTREAM: 10.118.32.193
+  DNS_DOMAIN: opnfvericsson.ca
+  DNS_SEARCH: opnfvericsson.ca
+  FUEL_ACCESS:
+    user: admin
+    password: admin
+  HOSTNAME: opnfv
+  NTP1: 0.ca.pool.ntp.org
+  NTP2: 1.ca.pool.ntp.org
+  NTP3: 2.ca.pool.ntp.org
+interfaces:
+  interface1:
+    eth0:
+    - fuelweb_admin
+    eth2:
+    - public
+    - management
+    - storage
+    - private
+transformations:
+  controller1:
+    - action: add-br
+      name: br-eth0
+    - action: add-port
+      bridge: br-eth0
+      name: eth0
+    - action: add-br
+      name: br-eth1
+    - action: add-port
+      bridge: br-eth1
+      name: eth1
+    - action: add-br
+      name: br-eth2
+    - action: add-port
+      bridge: br-eth2
+      name: eth2
+    - action: add-br
+      name: br-eth3
+    - action: add-port
+      bridge: br-eth3
+      name: eth3
+    - action: add-br
+      name: br-eth4
+    - action: add-port
+      bridge: br-eth4
+      name: eth4
+    - action: add-br
+      name: br-eth5
+    - action: add-port
+      bridge: br-eth5
+      name: eth5
+    - action: add-br
+      name: br-ex
+    - action: add-br
+      name: br-mgmt
+    - action: add-br
+      name: br-storage
+    - action: add-br
+      name: br-fw-admin
+    - action: add-patch
+      bridges:
+      - br-eth2
+      - br-storage
+      tags:
+      - 220
+      - 0
+      vlan_ids:
+      - 220
+      - 0
+    - action: add-patch
+      bridges:
+      - br-eth2
+      - br-mgmt
+      tags:
+      - 320
+      - 0
+      vlan_ids:
+      - 320
+      - 0
+    - action: add-patch
+      bridges:
+      - br-eth0
+      - br-fw-admin
+      trunks:
+      - 0
+    - action: add-patch
+      bridges:
+      - br-eth2
+      - br-ex
+      tags:
+      - 120
+      - 0
+      vlan_ids:
+      - 120
+      - 0
+    - action: add-br
+      name: br-prv
+    - action: add-patch
+      bridges:
+      - br-eth2
+      - br-prv
+  compute1:
+    - action: add-br
+      name: br-eth0
+    - action: add-port
+      bridge: br-eth0
+      name: eth0
+    - action: add-br
+      name: br-eth1
+    - action: add-port
+      bridge: br-eth1
+      name: eth1
+    - action: add-br
+      name: br-eth2
+    - action: add-port
+      bridge: br-eth2
+      name: eth2
+    - action: add-br
+      name: br-eth3
+    - action: add-port
+      bridge: br-eth3
+      name: eth3
+    - action: add-br
+      name: br-eth4
+    - action: add-port
+      bridge: br-eth4
+      name: eth4
+    - action: add-br
+      name: br-eth5
+    - action: add-port
+      bridge: br-eth5
+      name: eth5
+    - action: add-br
+      name: br-mgmt
+    - action: add-br
+      name: br-storage
+    - action: add-br
+      name: br-fw-admin
+    - action: add-patch
+      bridges:
+      - br-eth2
+      - br-storage
+      tags:
+      - 220
+      - 0
+      vlan_ids:
+      - 220
+      - 0
+    - action: add-patch
+      bridges:
+      - br-eth2
+      - br-mgmt
+      tags:
+      - 320
+      - 0
+      vlan_ids:
+      - 320
+      - 0
+    - action: add-patch
+      bridges:
+      - br-eth0
+      - br-fw-admin
+      trunks:
+      - 0
+    - action: add-br
+      name: br-prv
+    - action: add-patch
+      bridges:
+      - br-eth2
+      - br-prv
+opnfv:
+  compute: {}
+  controller: {}
+network:
+  networking_parameters:
+    base_mac: fa:16:3e:00:00:00
+    dns_nameservers:
+    - 10.118.32.193
+    - 8.8.8.8
+    floating_ranges:
+    - - 172.16.0.130
+      - 172.16.0.254
+    gre_id_range:
+    - 2
+    - 65535
+    internal_cidr: 192.168.111.0/24
+    internal_gateway: 192.168.111.1
+    net_l23_provider: ovs
+    segmentation_type: vlan
+    vlan_range:
+    - 2022
+    - 2023
+  networks:
+  - cidr: 172.16.0.0/24
+    gateway: 172.16.0.1
+    ip_ranges:
+    - - 172.16.0.2
+      - 172.16.0.126
+    meta:
+      assign_vip: true
+      cidr: 172.16.0.0/24
+      configurable: true
+      floating_range_var: floating_ranges
+      ip_range:
+      - 172.16.0.2
+      - 172.16.0.126
+      map_priority: 1
+      name: public
+      notation: ip_ranges
+      render_addr_mask: public
+      render_type: null
+      use_gateway: true
+      vlan_start: null
+    name: public
+    vlan_start: 120
+  - cidr: 192.168.0.0/24
+    gateway: null
+    ip_ranges:
+    - - 192.168.0.2
+      - 192.168.0.254
+    meta:
+      assign_vip: true
+      cidr: 192.168.0.0/24
+      configurable: true
+      map_priority: 2
+      name: management
+      notation: cidr
+      render_addr_mask: internal
+      render_type: cidr
+      use_gateway: false
+      vlan_start: 101
+    name: management
+    vlan_start: 320
+  - cidr: 192.168.1.0/24
+    gateway: null
+    ip_ranges:
+    - - 192.168.1.2
+      - 192.168.1.254
+    meta:
+      assign_vip: false
+      cidr: 192.168.1.0/24
+      configurable: true
+      map_priority: 2
+      name: storage
+      notation: cidr
+      render_addr_mask: storage
+      render_type: cidr
+      use_gateway: false
+      vlan_start: 102
+    name: storage
+    vlan_start: 220
+  - cidr: null
+    gateway: null
+    ip_ranges: []
+    meta:
+      assign_vip: false
+      configurable: false
+      map_priority: 2
+      name: private
+      neutron_vlan_range: true
+      notation: null
+      render_addr_mask: null
+      render_type: null
+      seg_type: vlan
+      use_gateway: false
+      vlan_start: null
+    name: private
+    vlan_start: null
+  - cidr: 10.40.0.0/24
+    gateway: null
+    ip_ranges:
+    - - 10.40.0.3
+      - 10.40.0.254
+    meta:
+      assign_vip: false
+      configurable: false
+      map_priority: 0
+      notation: ip_ranges
+      render_addr_mask: null
+      render_type: null
+      unmovable: true
+      use_gateway: true
+    name: fuelweb_admin
+    vlan_start: null
+settings:
+  editable:
+    access:
+      email:
+        description: Email address for Administrator
+        label: email
+        type: text
+        value: admin@localhost
+        weight: 40
+      metadata:
+        label: Access
+        weight: 10
+      password:
+        description: Password for Administrator
+        label: password
+        type: password
+        value: admin
+        weight: 20
+      tenant:
+        description: Tenant (project) name for Administrator
+        label: tenant
+        regex:
+          error: Invalid tenant name
+          source: ^(?!services$)(?!nova$)(?!glance$)(?!keystone$)(?!neutron$)(?!cinder$)(?!swift$)(?!ceph$)(?![Gg]uest$).*
+        type: text
+        value: admin
+        weight: 30
+      user:
+        description: Username for Administrator
+        label: username
+        regex:
+          error: Invalid username
+          source: ^(?!services$)(?!nova$)(?!glance$)(?!keystone$)(?!neutron$)(?!cinder$)(?!swift$)(?!ceph$)(?![Gg]uest$).*
+        type: text
+        value: admin
+        weight: 10
+    additional_components:
+      ceilometer:
+        description: If selected, Ceilometer component will be installed
+        label: Install Ceilometer
+        type: checkbox
+        value: false
+        weight: 40
+      heat:
+        description: ''
+        label: ''
+        type: hidden
+        value: true
+        weight: 30
+      metadata:
+        label: Additional Components
+        weight: 20
+      murano:
+        description: If selected, Murano component will be installed
+        label: Install Murano
+        restrictions:
+        - cluster:net_provider != 'neutron'
+        type: checkbox
+        value: false
+        weight: 20
+      sahara:
+        description: If selected, Sahara component will be installed
+        label: Install Sahara
+        type: checkbox
+        value: false
+        weight: 10
+    common:
+      auth_key:
+        description: Public key(s) to include in authorized_keys on deployed nodes
+        label: Public Key
+        type: text
+        value: ''
+        weight: 70
+      auto_assign_floating_ip:
+        description: If selected, OpenStack will automatically assign a floating IP
+          to a new instance
+        label: Auto assign floating IP
+        restrictions:
+        - cluster:net_provider == 'neutron'
+        type: checkbox
+        value: false
+        weight: 40
+      compute_scheduler_driver:
+        label: Scheduler driver
+        type: radio
+        value: nova.scheduler.filter_scheduler.FilterScheduler
+        values:
+        - data: nova.scheduler.filter_scheduler.FilterScheduler
+          description: Currently the most advanced OpenStack scheduler. See the OpenStack
+            documentation for details.
+          label: Filter scheduler
+        - data: nova.scheduler.simple.SimpleScheduler
+          description: This is 'naive' scheduler which tries to find the least loaded
+            host
+          label: Simple scheduler
+        weight: 40
+      debug:
+        description: Debug logging mode provides more information, but requires more
+          disk space.
+        label: OpenStack debug logging
+        type: checkbox
+        value: false
+        weight: 20
+      disable_offload:
+        description: If set, generic segmentation offload (gso) and generic receive
+          offload (gro) on physical nics will be disabled. See ethtool man.
+        label: Disable generic offload on physical nics
+        restrictions:
+        - action: hide
+          condition: cluster:net_provider == 'neutron' and networking_parameters:segmentation_type
+            == 'gre'
+        type: checkbox
+        value: true
+        weight: 80
+      libvirt_type:
+        label: Hypervisor type
+        type: radio
+        value: kvm
+        values:
+        - data: kvm
+          description: Choose this type of hypervisor if you run OpenStack on hardware
+          label: KVM
+          restrictions:
+          - settings:common.libvirt_type.value == 'vcenter'
+        - data: qemu
+          description: Choose this type of hypervisor if you run OpenStack on virtual
+            hosts.
+          label: QEMU
+          restrictions:
+          - settings:common.libvirt_type.value == 'vcenter'
+        - data: vcenter
+          description: Choose this type of hypervisor if you run OpenStack in a vCenter
+            environment.
+          label: vCenter
+          restrictions:
+          - settings:common.libvirt_type.value != 'vcenter' or cluster:net_provider
+            == 'neutron'
+        weight: 30
+      metadata:
+        label: Common
+        weight: 30
+      nova_quota:
+        description: Quotas are used to limit CPU and memory usage for tenants. Enabling
+          quotas will increase load on the Nova database.
+        label: Nova quotas
+        type: checkbox
+        value: false
+        weight: 25
+      resume_guests_state_on_host_boot:
+        description: Whether to resume previous guests state when the host reboots.
+          If enabled, this option causes guests assigned to the host to resume their
+          previous state. If the guest was running a restart will be attempted when
+          nova-compute starts. If the guest was not running previously, a restart
+          will not be attempted.
+        label: Resume guests state on host boot
+        type: checkbox
+        value: true
+        weight: 60
+      use_cow_images:
+        description: For most cases you will want qcow format. If it's disabled, raw
+          image format will be used to run VMs. OpenStack with raw format currently
+          does not support snapshotting.
+        label: Use qcow format for images
+        type: checkbox
+        value: true
+        weight: 50
+    corosync:
+      group:
+        description: ''
+        label: Group
+        type: text
+        value: 226.94.1.1
+        weight: 10
+      metadata:
+        label: Corosync
+        restrictions:
+        - action: hide
+          condition: 'true'
+        weight: 50
+      port:
+        description: ''
+        label: Port
+        type: text
+        value: '12000'
+        weight: 20
+      verified:
+        description: Set True only if multicast is configured correctly on router.
+        label: Need to pass network verification.
+        type: checkbox
+        value: false
+        weight: 10
+    external_dns:
+      dns_list:
+        description: List of upstream DNS servers, separated by comma
+        label: DNS list
+        type: text
+        value: 10.118.32.193, 8.8.8.8
+        weight: 10
+      metadata:
+        label: Upstream DNS
+        weight: 90
+    external_ntp:
+      metadata:
+        label: Upstream NTP
+        weight: 100
+      ntp_list:
+        description: List of upstream NTP servers, separated by comma
+        label: NTP servers list
+        type: text
+        value: 0.pool.ntp.org, 1.pool.ntp.org
+        weight: 10
+    kernel_params:
+      kernel:
+        description: Default kernel parameters
+        label: Initial parameters
+        type: text
+        value: console=ttyS0,9600 console=tty0 rootdelay=90 nomodeset
+        weight: 45
+      metadata:
+        label: Kernel parameters
+        weight: 40
+    neutron_mellanox:
+      metadata:
+        enabled: true
+        label: Mellanox Neutron components
+        toggleable: false
+        weight: 50
+      plugin:
+        label: Mellanox drivers and SR-IOV plugin
+        type: radio
+        value: disabled
+        values:
+        - data: disabled
+          description: If selected, Mellanox drivers, Neutron and Cinder plugin will
+            not be installed.
+          label: Mellanox drivers and plugins disabled
+          restrictions:
+          - settings:storage.iser.value == true
+        - data: drivers_only
+          description: If selected, Mellanox Ethernet drivers will be installed to
+            support networking over Mellanox NIC. Mellanox Neutron plugin will not
+            be installed.
+          label: Install only Mellanox drivers
+          restrictions:
+          - settings:common.libvirt_type.value != 'kvm'
+        - data: ethernet
+          description: If selected, both Mellanox Ethernet drivers and Mellanox network
+            acceleration (Neutron) plugin will be installed.
+          label: Install Mellanox drivers and SR-IOV plugin
+          restrictions:
+          - settings:common.libvirt_type.value != 'kvm' or not (cluster:net_provider
+            == 'neutron' and networking_parameters:segmentation_type == 'vlan')
+        weight: 60
+      vf_num:
+        description: Note that one virtual function will be reserved to the storage
+          network, in case of choosing iSER.
+        label: Number of virtual NICs
+        restrictions:
+        - settings:neutron_mellanox.plugin.value != 'ethernet'
+        type: text
+        value: '16'
+        weight: 70
+    nsx_plugin:
+      connector_type:
+        description: Default network transport type to use
+        label: NSX connector type
+        type: select
+        value: stt
+        values:
+        - data: gre
+          label: GRE
+        - data: ipsec_gre
+          label: GRE over IPSec
+        - data: stt
+          label: STT
+        - data: ipsec_stt
+          label: STT over IPSec
+        - data: bridge
+          label: Bridge
+        weight: 80
+      l3_gw_service_uuid:
+        description: UUID for the default L3 gateway service to use with this cluster
+        label: L3 service UUID
+        regex:
+          error: Invalid L3 gateway service UUID
+          source: '[a-f\d]{8}-[a-f\d]{4}-[a-f\d]{4}-[a-f\d]{4}-[a-f\d]{12}'
+        type: text
+        value: ''
+        weight: 50
+      metadata:
+        enabled: false
+        label: VMware NSX
+        restrictions:
+        - action: hide
+          condition: cluster:net_provider != 'neutron' or networking_parameters:net_l23_provider
+            != 'nsx'
+        weight: 20
+      nsx_controllers:
+        description: One or more IPv4[:port] addresses of NSX controller node, separated
+          by comma (e.g. 10.40.30.2,192.168.110.254:443)
+        label: NSX controller endpoint
+        regex:
+          error: Invalid controller endpoints, specify valid IPv4[:port] pair
+          source: ^(([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])\.){3}([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])(:(6553[0-5]|655[0-2][\d]|65[0-4][\d]{2}|6[0-4][\d]{3}|5[\d]{4}|[\d][\d]{0,3}))?(,(([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])\.){3}([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])(:(6553[0-5]|655[0-2][\d]|65[0-4][\d]{2}|6[0-4][\d]{3}|5[\d]{4}|[\d][\d]{0,3}))?)*$
+        type: text
+        value: ''
+        weight: 60
+      nsx_password:
+        description: Password for Administrator
+        label: NSX password
+        regex:
+          error: Empty password
+          source: \S
+        type: password
+        value: ''
+        weight: 30
+      nsx_username:
+        description: NSX administrator's username
+        label: NSX username
+        regex:
+          error: Empty username
+          source: \S
+        type: text
+        value: admin
+        weight: 20
+      packages_url:
+        description: URL to NSX specific packages
+        label: URL to NSX bits
+        regex:
+          error: Invalid URL, specify valid HTTP/HTTPS URL with IPv4 address (e.g.
+            http://10.20.0.2/nsx)
+          source: ^https?://(([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])\.){3}([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])(:(6553[0-5]|655[0-2][\d]|65[0-4][\d]{2}|6[0-4][\d]{3}|5[\d]{4}|[\d][\d]{0,3}))?(/.*)?$
+        type: text
+        value: ''
+        weight: 70
+      replication_mode:
+        description: ''
+        label: NSX cluster has Service nodes
+        type: checkbox
+        value: true
+        weight: 90
+      transport_zone_uuid:
+        description: UUID of the pre-existing default NSX Transport zone
+        label: Transport zone UUID
+        regex:
+          error: Invalid transport zone UUID
+          source: '[a-f\d]{8}-[a-f\d]{4}-[a-f\d]{4}-[a-f\d]{4}-[a-f\d]{12}'
+        type: text
+        value: ''
+        weight: 40
+    provision:
+      metadata:
+        label: Provision
+        restrictions:
+        - action: hide
+          condition: not ('experimental' in version:feature_groups)
+        weight: 80
+      method:
+        description: Which provision method to use for this cluster.
+        label: Provision method
+        type: radio
+        value: cobbler
+        values:
+        - data: image
+          description: Copying pre-built images on a disk.
+          label: Image
+        - data: cobbler
+          description: Install from scratch using anaconda or debian-installer.
+          label: Classic (use anaconda or debian-installer)
+    public_network_assignment:
+      assign_to_all_nodes:
+        description: When disabled, public network will be assigned to controllers
+          and zabbix-server only
+        label: Assign public network to all nodes
+        type: checkbox
+        value: false
+        weight: 10
+      metadata:
+        label: Public network assignment
+        restrictions:
+        - action: hide
+          condition: cluster:net_provider != 'neutron'
+        weight: 50
+    storage:
+      ephemeral_ceph:
+        description: Configures Nova to store ephemeral volumes in RBD. This works
+          best if Ceph is enabled for volumes and images, too. Enables live migration
+          of all types of Ceph backed VMs (without this option, live migration will
+          only work with VMs launched from Cinder volumes).
+        label: Ceph RBD for ephemeral volumes (Nova)
+        restrictions:
+        - settings:common.libvirt_type.value == 'vcenter'
+        type: checkbox
+        value: false
+        weight: 75
+      images_ceph:
+        description: Configures Glance to use the Ceph RBD backend to store images.
+          If enabled, this option will prevent Swift from installing.
+        label: Ceph RBD for images (Glance)
+        type: checkbox
+        value: false
+        weight: 30
+      images_vcenter:
+        description: Configures Glance to use the vCenter/ESXi backend to store images.
+          If enabled, this option will prevent Swift from installing.
+        label: VMWare vCenter/ESXi datastore for images (Glance)
+        restrictions:
+        - settings:common.libvirt_type.value != 'vcenter'
+        type: checkbox
+        value: false
+        weight: 35
+      iser:
+        description: 'High performance block storage: Cinder volumes over iSER protocol
+          (iSCSI over RDMA). This feature requires SR-IOV capabilities in the NIC,
+          and will use a dedicated virtual function for the storage network.'
+        label: iSER protocol for volumes (Cinder)
+        restrictions:
+        - settings:storage.volumes_lvm.value != true or settings:common.libvirt_type.value
+          != 'kvm'
+        type: checkbox
+        value: false
+        weight: 11
+      metadata:
+        label: Storage
+        weight: 60
+      objects_ceph:
+        description: Configures RadosGW front end for Ceph RBD. This exposes S3 and
+          Swift API Interfaces. If enabled, this option will prevent Swift from installing.
+        label: Ceph RadosGW for objects (Swift API)
+        restrictions:
+        - settings:storage.images_ceph.value == false
+        type: checkbox
+        value: false
+        weight: 80
+      osd_pool_size:
+        description: Configures the default number of object replicas in Ceph. This
+          number must be equal to or lower than the number of deployed 'Storage -
+          Ceph OSD' nodes.
+        label: Ceph object replication factor
+        regex:
+          error: Invalid number
+          source: ^[1-9]\d*$
+        restrictions:
+        - settings:common.libvirt_type.value == 'vcenter'
+        type: text
+        value: '2'
+        weight: 85
+      vc_datacenter:
+        description: Inventory path to a datacenter. If you want to use ESXi host
+          as datastore, it should be "ha-datacenter".
+        label: Datacenter name
+        regex:
+          error: Empty datacenter
+          source: \S
+        restrictions:
+        - action: hide
+          condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value
+            != 'vcenter'
+        type: text
+        value: ''
+        weight: 65
+      vc_datastore:
+        description: Datastore associated with the datacenter.
+        label: Datastore name
+        regex:
+          error: Empty datastore
+          source: \S
+        restrictions:
+        - action: hide
+          condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value
+            != 'vcenter'
+        type: text
+        value: ''
+        weight: 60
+      vc_host:
+        description: IP Address of vCenter/ESXi
+        label: vCenter/ESXi IP
+        regex:
+          error: Specify valid IPv4 address
+          source: ^(([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])\.){3}([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])$
+        restrictions:
+        - action: hide
+          condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value
+            != 'vcenter'
+        type: text
+        value: ''
+        weight: 45
+      vc_image_dir:
+        description: The name of the directory where the glance images will be stored
+          in the VMware datastore.
+        label: Datastore Images directory
+        regex:
+          error: Empty images directory
+          source: \S
+        restrictions:
+        - action: hide
+          condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value
+            != 'vcenter'
+        type: text
+        value: /openstack_glance
+        weight: 70
+      vc_password:
+        description: vCenter/ESXi admin password
+        label: Password
+        regex:
+          error: Empty password
+          source: \S
+        restrictions:
+        - action: hide
+          condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value
+            != 'vcenter'
+        type: password
+        value: ''
+        weight: 55
+      vc_user:
+        description: vCenter/ESXi admin username
+        label: Username
+        regex:
+          error: Empty username
+          source: \S
+        restrictions:
+        - action: hide
+          condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value
+            != 'vcenter'
+        type: text
+        value: ''
+        weight: 50
+      volumes_ceph:
+        description: Configures Cinder to store volumes in Ceph RBD images.
+        label: Ceph RBD for volumes (Cinder)
+        restrictions:
+        - settings:storage.volumes_lvm.value == true or settings:common.libvirt_type.value
+          == 'vcenter'
+        type: checkbox
+        value: false
+        weight: 20
+      volumes_lvm:
+        description: Requires at least one Storage - Cinder LVM node.
+        label: Cinder LVM over iSCSI for volumes
+        restrictions:
+        - settings:storage.volumes_ceph.value == true
+        type: checkbox
+        value: false
+        weight: 10
+      volumes_vmdk:
+        description: Configures Cinder to store volumes via VMware vCenter.
+        label: VMware vCenter for volumes (Cinder)
+        restrictions:
+        - settings:common.libvirt_type.value != 'vcenter' or settings:storage.volumes_lvm.value
+          == true
+        type: checkbox
+        value: false
+        weight: 15
+    syslog:
+      metadata:
+        label: Syslog
+        weight: 50
+      syslog_port:
+        description: Remote syslog port
+        label: Port
+        regex:
+          error: Invalid Syslog port
+          source: ^([1-9][0-9]{0,3}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$
+        type: text
+        value: '514'
+        weight: 20
+      syslog_server:
+        description: Remote syslog hostname
+        label: Hostname
+        type: text
+        value: ''
+        weight: 10
+      syslog_transport:
+        label: Syslog transport protocol
+        type: radio
+        value: tcp
+        values:
+        - data: udp
+          description: ''
+          label: UDP
+        - data: tcp
+          description: ''
+          label: TCP
+        weight: 30
+    vcenter:
+      cluster:
+        description: vCenter cluster name. If you have multiple clusters, use comma
+          to separate names
+        label: Cluster
+        regex:
+          error: Invalid cluster list
+          source: ^([^,\ ]+([\ ]*[^,\ ])*)(,[^,\ ]+([\ ]*[^,\ ])*)*$
+        type: text
+        value: ''
+        weight: 40
+      datastore_regex:
+        description: The Datastore regexp setting specifies the data stores to use
+          with Compute. For example, "nas.*". If you want to use all available datastores,
+          leave this field blank
+        label: Datastore regexp
+        regex:
+          error: Invalid datastore regexp
+          source: ^(\S.*\S|\S|)$
+        type: text
+        value: ''
+        weight: 50
+      host_ip:
+        description: IP Address of vCenter
+        label: vCenter IP
+        regex:
+          error: Specify valid IPv4 address
+          source: ^(([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])\.){3}([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])$
+        type: text
+        value: ''
+        weight: 10
+      metadata:
+        label: vCenter
+        restrictions:
+        - action: hide
+          condition: settings:common.libvirt_type.value != 'vcenter'
+        weight: 20
+      use_vcenter:
+        description: ''
+        label: ''
+        type: hidden
+        value: true
+        weight: 5
+      vc_password:
+        description: vCenter admin password
+        label: Password
+        regex:
+          error: Empty password
+          source: \S
+        type: password
+        value: admin
+        weight: 30
+      vc_user:
+        description: vCenter admin username
+        label: Username
+        regex:
+          error: Empty username
+          source: \S
+        type: text
+        value: admin
+        weight: 20
+      vlan_interface:
+        description: Physical ESXi host ethernet adapter for VLAN networking (e.g.
+          vmnic1). If empty "vmnic0" is used by default
+        label: ESXi VLAN interface
+        restrictions:
+        - action: hide
+          condition: cluster:net_provider != 'nova_network' or networking_parameters:net_manager
+            != 'VlanManager'
+        type: text
+        value: ''
+        weight: 60
+    zabbix:
+      metadata:
+        label: Zabbix Access
+        restrictions:
+        - action: hide
+          condition: not ('experimental' in version:feature_groups)
+        weight: 70
+      password:
+        description: Password for Zabbix Administrator
+        label: password
+        type: password
+        value: zabbix
+        weight: 20
+      username:
+        description: Username for Zabbix Administrator
+        label: username
+        type: text
+        value: admin
+        weight: 10
diff --git a/fuel/deploy/baremetal/dha.yaml b/fuel/deploy/baremetal/dha.yaml
new file mode 100644 (file)
index 0000000..6240f07
--- /dev/null
@@ -0,0 +1,53 @@
+title: Deployment Hardware Adapter (DHA)
+# DHA API version supported
+version: 1.1
+created: Mon May  4 09:03:46 UTC 2015
+comment: Test environment Ericsson Montreal
+
+# Adapter to use for this definition
+adapter: ipmi
+
+# Node list.
+# Mandatory properties are id and role.
+# The MAC address of the PXE boot interface for Fuel is not
+# mandatory to be defined.
+# All other properties are adapter specific.
+
+nodes:
+- id: 1
+  pxeMac: 14:58:D0:54:7A:28
+  ipmiIp: 10.118.32.205
+  ipmiUser: username
+  ipmiPass: password
+- id: 2
+  pxeMac: 14:58:D0:55:E2:E0
+  ipmiIp: 10.118.32.202
+  ipmiUser: username
+  ipmiPass: password
+# Adding the Fuel node as node id 3 which may not be correct - please
+# adjust as needed.
+- id: 3
+  libvirtName: vFuel
+  libvirtTemplate: vFuel
+  isFuel: yes
+  username: root
+  password: r00tme
+
+# Deployment power on strategy
+# all:      Turn on all nodes at once. There will be no correlation
+#           between the DHA and DEA node numbering. MAC addresses
+#           will be used to select the node roles though.
+# sequence: Turn on the nodes in sequence starting with the lowest order
+#           node and wait for the node to be detected by Fuel. Not until
+#           the node has been detected and assigned a role will the next
+#           node be turned on.
+powerOnStrategy: sequence
+
+# If fuelCustomInstall is set to true, Fuel is assumed to be installed by
+# calling the DHA adapter function "dha_fuelCustomInstall()"  with two
+# arguments: node ID and the ISO file name to deploy. The custom install
+# function is then to handle all necessary logic to boot the Fuel master
+# from the ISO and then return.
+# Allowed values: true, false
+fuelCustomInstall: true
+
diff --git a/fuel/deploy/baremetal/vm/vFuel b/fuel/deploy/baremetal/vm/vFuel
new file mode 100644 (file)
index 0000000..1b4f4eb
--- /dev/null
@@ -0,0 +1,87 @@
+<domain type='kvm'>
+  <name>vFuel</name>
+  <memory unit='KiB'>8290304</memory>
+  <currentMemory unit='KiB'>8290304</currentMemory>
+  <vcpu placement='static'>2</vcpu>
+  <resource>
+    <partition>/machine</partition>
+  </resource>
+  <os>
+    <type arch='x86_64' machine='pc-i440fx-utopic'>hvm</type>
+    <boot dev='hd'/>
+    <boot dev='cdrom'/>
+    <bootmenu enable='no'/>
+  </os>
+  <features>
+    <acpi/>
+    <apic/>
+    <pae/>
+  </features>
+  <cpu mode='custom' match='exact'>
+    <model fallback='allow'>SandyBridge</model>
+  </cpu>
+  <clock offset='utc'>
+    <timer name='rtc' tickpolicy='catchup'/>
+    <timer name='pit' tickpolicy='delay'/>
+    <timer name='hpet' present='no'/>
+  </clock>
+  <on_poweroff>destroy</on_poweroff>
+  <on_reboot>restart</on_reboot>
+  <on_crash>restart</on_crash>
+  <pm>
+    <suspend-to-mem enabled='no'/>
+    <suspend-to-disk enabled='no'/>
+  </pm>
+  <devices>
+    <emulator>/usr/bin/kvm</emulator>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='raw'/>
+      <source file='/mnt/images/vFuel.raw'/>
+      <target dev='vda' bus='virtio'/>
+    </disk>
+    <disk type='block' device='cdrom'>
+      <driver name='qemu' type='raw'/>
+      <target dev='hda' bus='ide'/>
+      <readonly/>
+    </disk>
+    <controller type='usb' index='0' model='ich9-ehci1'>
+    </controller>
+    <controller type='usb' index='0' model='ich9-uhci1'>
+      <master startport='0'/>
+    </controller>
+    <controller type='usb' index='0' model='ich9-uhci2'>
+      <master startport='2'/>
+    </controller>
+    <controller type='usb' index='0' model='ich9-uhci3'>
+      <master startport='4'/>
+    </controller>
+    <controller type='pci' index='0' model='pci-root'/>
+    <controller type='virtio-serial' index='0'>
+    </controller>
+    <controller type='ide' index='0'>
+    </controller>
+    <interface type='bridge'>
+      <source bridge='vfuelnet'/>
+      <model type='virtio'/>
+    </interface>
+    <serial type='pty'>
+      <target port='0'/>
+    </serial>
+    <console type='pty'>
+      <target type='serial' port='0'/>
+    </console>
+    <input type='mouse' bus='ps2'/>
+    <input type='keyboard' bus='ps2'/>
+    <graphics type='vnc' port='-1' autoport='yes' listen='127.0.0.1' keymap='sv'>
+      <listen type='address' address='127.0.0.1'/>
+    </graphics>
+    <sound model='ich6'>
+    </sound>
+    <video>
+      <model type='cirrus' vram='9216' heads='1'/>
+    </video>
+    <memballoon model='virtio'>
+    </memballoon>
+  </devices>
+  <seclabel type='dynamic' model='apparmor' relabel='yes'/>
+</domain>
@@ -13,7 +13,7 @@ RO = common.RO
 exec_cmd = common.exec_cmd
 parse = common.parse
 err = common.err
-LOG = common.LOG
+log = common.log
 
 class ConfigureEnvironment(object):
 
@@ -21,7 +21,7 @@ class ConfigureEnvironment(object):
         self.env_id = None
         self.dea = dea
         self.yaml_config_dir = yaml_config_dir
-        self.env_name = dea.get_environment_name()
+        self.env_name = self.dea.get_property('environment_name')
         self.release_id = release_id
         self.node_id_roles_dict = node_id_roles_dict
         self.required_networks = []
@@ -35,23 +35,22 @@ class ConfigureEnvironment(object):
         return False
 
     def configure_environment(self):
-        LOG.debug('Configure environment\n')
+        log('Configure environment')
         if os.path.exists(self.yaml_config_dir):
-            LOG.debug('Deleting existing config directory %s\n'
-                  % self.yaml_config_dir)
+            log('Deleting existing config directory %s' % self.yaml_config_dir)
             shutil.rmtree(self.yaml_config_dir)
-        LOG.debug('Creating new config directory %s\n' % self.yaml_config_dir)
+        log('Creating new config directory %s' % self.yaml_config_dir)
         os.makedirs(self.yaml_config_dir)
 
-        LOG.debug('Creating environment %s release %s, mode ha, network-mode '
-              'neutron, net-segment-type vlan\n'
-              % (self.env_name, self.release_id))
-        exec_cmd('fuel env create --name %s --release %s --mode ha '
+        mode = self.dea.get_property('environment_mode')
+        log('Creating environment %s release %s, mode %s, network-mode neutron'
+            ', net-segment-type vlan' % (self.env_name, self.release_id, mode))
+        exec_cmd('fuel env create --name %s --release %s --mode %s '
                  '--network-mode neutron --net-segment-type vlan'
-                 % (self.env_name, self.release_id))
+                 % (self.env_name, self.release_id, mode))
 
         if not self.env_exists(self.env_name):
-            err("Failed to create environment %s\n" % self.env_name)
+            err('Failed to create environment %s' % self.env_name)
         self.config_settings()
         self.config_network()
         self.config_nodes()
similarity index 77%
rename from fuel/deploy/cloud_deploy/cloud/configure_network.py
rename to fuel/deploy/cloud/configure_network.py
index f4d6f87..295eb90 100644 (file)
@@ -10,7 +10,7 @@ exec_cmd = common.exec_cmd
 parse = common.parse
 err = common.err
 check_file_exists = common.check_file_exists
-LOG = common.LOG
+log = common.log
 
 class ConfigureNetwork(object):
 
@@ -21,29 +21,28 @@ class ConfigureNetwork(object):
         self.required_networks = []
 
     def download_network_config(self):
-        LOG.debug('Download network config for environment %s\n' % self.env_id)
+        log('Download network config for environment %s' % self.env_id)
         exec_cmd('fuel network --env %s --download --dir %s'
                  % (self.env_id, self.yaml_config_dir))
 
     def upload_network_config(self):
-        LOG.debug('Upload network config for environment %s\n' % self.env_id)
+        log('Upload network config for environment %s' % self.env_id)
         exec_cmd('fuel network --env %s --upload --dir %s'
                  % (self.env_id, self.yaml_config_dir))
 
     def config_network(self):
-        LOG.debug('Configure network\n')
+        log('Configure network')
         self.download_network_config()
         self.modify_network_config()
         self.upload_network_config()
 
     def modify_network_config(self):
-        LOG.debug('Modify network config for environment %s\n' % self.env_id)
-        network_yaml = (self.yaml_config_dir + '/network_%s.yaml'
-                        % self.env_id)
+        log('Modify network config for environment %s' % self.env_id)
+        network_yaml = ('%s/network_%s.yaml'
+                        % (self.yaml_config_dir, self.env_id))
         check_file_exists(network_yaml)
 
-        network_config = self.dea.get_networks()
-
+        network_config = self.dea.get_property('network')
 
         with io.open(network_yaml) as stream:
             network = yaml.load(stream)
diff --git a/fuel/deploy/cloud/configure_nodes.py b/fuel/deploy/cloud/configure_nodes.py
new file mode 100644 (file)
index 0000000..4d1315a
--- /dev/null
@@ -0,0 +1,104 @@
+import common
+import yaml
+import io
+import glob
+
+N = common.N
+E = common.E
+R = common.R
+RO = common.RO
+exec_cmd = common.exec_cmd
+parse = common.parse
+err = common.err
+check_file_exists = common.check_file_exists
+log = common.log
+
+
+class ConfigureNodes(object):
+
+    def __init__(self, yaml_config_dir, env_id, node_id_roles_dict, dea):
+        self.yaml_config_dir = yaml_config_dir
+        self.env_id = env_id
+        self.node_id_roles_dict = node_id_roles_dict
+        self.dea = dea
+
+    def config_nodes(self):
+        log('Configure nodes')
+        for node_id, roles_blade in self.node_id_roles_dict.iteritems():
+            exec_cmd('fuel node set --node-id %s --role %s --env %s'
+                     % (node_id, ','.join(roles_blade[0]), self.env_id))
+
+        self.download_deployment_config()
+        for node_id, roles_blade in self.node_id_roles_dict.iteritems():
+            self.download_interface_config(node_id)
+            self.modify_node_interface(node_id, roles_blade)
+            self.modify_node_network_schemes(node_id, roles_blade)
+            self.upload_interface_config(node_id)
+        self.upload_deployment_config()
+
+    def modify_node_network_schemes(self, node_id, roles_blade):
+        log('Modify node network transformations in environment %s'
+            % self.env_id)
+        type = self.dea.get_node_property(roles_blade[1], 'transformations')
+        transformations = self.dea.get_transformations(type)
+
+        for node_file in glob.glob('%s/deployment_%s/*_%s.yaml'
+                                   % (self.yaml_config_dir, self.env_id,
+                                      node_id)):
+            with io.open(node_file) as stream:
+               node = yaml.load(stream)
+
+            node['network_scheme']['transformations'] = transformations
+
+            with io.open(node_file, 'w') as stream:
+               yaml.dump(node, stream, default_flow_style=False)
+
+
+    def download_deployment_config(self):
+        log('Download deployment config for environment %s' % self.env_id)
+        exec_cmd('fuel deployment --env %s --default --dir %s'
+                 % (self.env_id, self.yaml_config_dir))
+
+    def upload_deployment_config(self):
+        log('Upload deployment config for environment %s' % self.env_id)
+        exec_cmd('fuel deployment --env %s --upload --dir %s'
+                 % (self.env_id, self.yaml_config_dir))
+
+    def download_interface_config(self, node_id):
+        log('Download interface config for node %s' % node_id)
+        exec_cmd('fuel node --env %s --node %s --network --download '
+                 '--dir %s' % (self.env_id, node_id, self.yaml_config_dir))
+
+    def upload_interface_config(self, node_id):
+        log('Upload interface config for node %s' % node_id)
+        exec_cmd('fuel node --env %s --node %s --network --upload '
+                 '--dir %s' % (self.env_id, node_id, self.yaml_config_dir))
+
+    def modify_node_interface(self, node_id, roles_blade):
+        log('Modify interface config for node %s' % node_id)
+        interface_yaml = ('%s/node_%s/interfaces.yaml'
+                          % (self.yaml_config_dir, node_id))
+        check_file_exists(interface_yaml)
+
+        with io.open(interface_yaml) as stream:
+            interfaces = yaml.load(stream)
+
+        net_name_id = {}
+        for interface in interfaces:
+            for network in interface['assigned_networks']:
+                 net_name_id[network['name']] = network['id']
+
+        type = self.dea.get_node_property(roles_blade[1], 'interfaces')
+        interface_config = self.dea.get_interfaces(type)
+
+        for interface in interfaces:
+            interface['assigned_networks'] = []
+            if interface['name'] in interface_config:
+                for net_name in interface_config[interface['name']]:
+                    net = {}
+                    net['id'] = net_name_id[net_name]
+                    net['name'] = net_name
+                    interface['assigned_networks'].append(net)
+
+        with io.open(interface_yaml, 'w') as stream:
+            yaml.dump(interfaces, stream, default_flow_style=False)
\ No newline at end of file
@@ -10,7 +10,7 @@ exec_cmd = common.exec_cmd
 parse = common.parse
 err = common.err
 check_file_exists = common.check_file_exists
-LOG = common.LOG
+log = common.log
 
 class ConfigureSettings(object):
 
@@ -20,28 +20,28 @@ class ConfigureSettings(object):
         self.dea = dea
 
     def download_settings(self):
-        LOG.debug('Download settings for environment %s\n' % self.env_id)
-        r, c = exec_cmd('fuel settings --env %s --download --dir %s'
-                        % (self.env_id, self.yaml_config_dir))
+        log('Download settings for environment %s' % self.env_id)
+        exec_cmd('fuel settings --env %s --download --dir %s'
+                 % (self.env_id, self.yaml_config_dir))
 
     def upload_settings(self):
-        LOG.debug('Upload settings for environment %s\n' % self.env_id)
-        r, c = exec_cmd('fuel settings --env %s --upload --dir %s'
-                        % (self.env_id, self.yaml_config_dir))
+        log('Upload settings for environment %s' % self.env_id)
+        exec_cmd('fuel settings --env %s --upload --dir %s'
+                 % (self.env_id, self.yaml_config_dir))
 
     def config_settings(self):
-        LOG.debug('Configure settings\n')
+        log('Configure settings')
         self.download_settings()
         self.modify_settings()
         self.upload_settings()
 
     def modify_settings(self):
-        LOG.debug('Modify settings for environment %s\n' % self.env_id)
-        settings_yaml = (self.yaml_config_dir + '/settings_%s.yaml'
-                         % self.env_id)
+        log('Modify settings for environment %s' % self.env_id)
+        settings_yaml = ('%s/settings_%s.yaml'
+                         % (self.yaml_config_dir, self.env_id))
         check_file_exists(settings_yaml)
 
-        settings = self.dea.get_settings()
+        settings = self.dea.get_property('settings')
 
         with io.open(settings_yaml, 'w') as stream:
             yaml.dump(settings, stream, default_flow_style=False)
diff --git a/fuel/deploy/cloud/deploy.py b/fuel/deploy/cloud/deploy.py
new file mode 100644 (file)
index 0000000..c8714f8
--- /dev/null
@@ -0,0 +1,206 @@
+import time
+import yaml
+import io
+import sys
+
+import common
+from dea import DeploymentEnvironmentAdapter
+from configure_environment import ConfigureEnvironment
+from deployment import Deployment
+
+YAML_CONF_DIR = '/var/lib/opnfv'
+
+N = common.N
+E = common.E
+R = common.R
+RO = common.RO
+exec_cmd = common.exec_cmd
+parse = common.parse
+err = common.err
+check_file_exists = common.check_file_exists
+log = common.log
+
+class Deploy(object):
+
+    def __init__(self, dea_file, macs_file):
+        self.dea = DeploymentEnvironmentAdapter(dea_file)
+        self.macs_file = macs_file
+        self.macs_per_blade = {}
+        self.blades = self.dea.get_node_ids()
+        self.node_ids_dict = {}
+        self.node_id_roles_dict = {}
+        self.supported_release = None
+        self.env_id = None
+        self.wanted_release = self.dea.get_wanted_release()
+
+    def cleanup_fuel_environments(self, env_list):
+        WAIT_LOOP = 60
+        SLEEP_TIME = 10
+        for env in env_list:
+            log('Deleting environment %s' % env[E['id']])
+            exec_cmd('fuel env --env %s --delete' % env[E['id']])
+        all_env_erased = False
+        for i in range(WAIT_LOOP):
+            env_list = parse(exec_cmd('fuel env list'))
+            if env_list:
+               time.sleep(SLEEP_TIME)
+            else:
+               all_env_erased = True
+               break
+        if not all_env_erased:
+            err('Could not erase these environments %s'
+                % [(env[E['id']], env[E['status']]) for env in env_list])
+
+    def cleanup_fuel_nodes(self, node_list):
+        for node in node_list:
+            if node[N['status']] == 'discover':
+                log('Deleting node %s' % node[N['id']])
+                exec_cmd('fuel node --node-id %s --delete-from-db'
+                         % node[N['id']])
+                exec_cmd('dockerctl shell cobbler cobbler system remove '
+                         '--name node-%s' % node[N['id']])
+
+    def check_previous_installation(self):
+        log('Check previous installation')
+        env_list = parse(exec_cmd('fuel env list'))
+        if env_list:
+            self.cleanup_fuel_environments(env_list)
+            node_list = parse(exec_cmd('fuel node list'))
+            if node_list:
+                self.cleanup_fuel_nodes(node_list)
+
+    def check_supported_release(self):
+        log('Check supported release: %s' % self.wanted_release)
+        release_list = parse(exec_cmd('fuel release -l'))
+        for release in release_list:
+            if release[R['name']] == self.wanted_release:
+                self.supported_release = release
+                break
+        if not self.supported_release:
+            err('This Fuel does not contain the following release: %s'
+                % self.wanted_release)
+
+    def check_prerequisites(self):
+        log('Check prerequisites')
+        self.check_supported_release()
+        self.check_previous_installation()
+
+    def get_mac_addresses(self):
+        with io.open(self.macs_file, 'r') as stream:
+            self.macs_per_blade = yaml.load(stream)
+
+    def find_mac_in_dict(self, mac):
+        for blade, mac_list in self.macs_per_blade.iteritems():
+            if mac in mac_list:
+                return blade
+
+    def all_blades_discovered(self):
+        for blade, node_id in self.node_ids_dict.iteritems():
+            if not node_id:
+                return False
+        return True
+
+    def not_discovered_blades_summary(self):
+        summary = ''
+        for blade, node_id in self.node_ids_dict.iteritems():
+            if not node_id:
+                summary += '\n[blade %s]' % blade
+        return summary
+
+    def node_discovery(self, node_list, discovered_macs):
+        for node in node_list:
+            if (node[N['status']] == 'discover' and
+                node[N['online']] == 'True' and
+                node[N['mac']] not in discovered_macs):
+                discovered_macs.append(node[N['mac']])
+                blade = self.find_mac_in_dict(node[N['mac']])
+                if blade:
+                    log('Blade %s discovered as Node %s with MAC %s'
+                        % (blade, node[N['id']], node[N['mac']]))
+                    self.node_ids_dict[blade] = node[N['id']]
+
+    def discovery_waiting_loop(self, discovered_macs):
+        WAIT_LOOP = 180
+        SLEEP_TIME = 10
+        all_discovered = False
+        for i in range(WAIT_LOOP):
+            node_list = parse(exec_cmd('fuel node list'))
+            if node_list:
+                self.node_discovery(node_list, discovered_macs)
+            if self.all_blades_discovered():
+                all_discovered = True
+                break
+            else:
+                time.sleep(SLEEP_TIME)
+        return all_discovered
+
+    def wait_for_discovered_blades(self):
+        log('Wait for discovered blades')
+        discovered_macs = []
+        for blade in self.blades:
+            self.node_ids_dict[blade] = None
+        all_discovered = self.discovery_waiting_loop(discovered_macs)
+        if not all_discovered:
+            err('Not all blades have been discovered: %s'
+                % self.not_discovered_blades_summary())
+
+    def assign_roles_to_cluster_node_ids(self):
+        self.node_id_roles_dict = {}
+        for blade, node_id in self.node_ids_dict.iteritems():
+            role_list = []
+            role = self.dea.get_node_role(blade)
+            if role == 'controller':
+                role_list.extend(['controller', 'mongo'])
+            elif role == 'compute':
+                role_list.extend(['compute'])
+            self.node_id_roles_dict[node_id] = (role_list, blade)
+
+    def configure_environment(self):
+        config_env = ConfigureEnvironment(self.dea, YAML_CONF_DIR,
+                                          self.supported_release[R['id']],
+                                          self.node_id_roles_dict)
+        config_env.configure_environment()
+        self.env_id = config_env.env_id
+
+    def deploy_cloud(self):
+        dep = Deployment(self.dea, YAML_CONF_DIR, self.env_id,
+                         self.node_id_roles_dict)
+        dep.deploy()
+
+    def deploy(self):
+        self.get_mac_addresses()
+        self.check_prerequisites()
+        self.wait_for_discovered_blades()
+        self.assign_roles_to_cluster_node_ids()
+        self.configure_environment()
+        self.deploy_cloud()
+
+def usage():
+    print '''
+    Usage:
+    python deploy.py <dea_file> <macs_file>
+
+    Example:
+            python deploy.py dea.yaml macs.yaml
+    '''
+
+def parse_arguments():
+    if len(sys.argv) != 3:
+        log('Incorrect number of arguments')
+        usage()
+        sys.exit(1)
+    dea_file = sys.argv[-2]
+    macs_file = sys.argv[-1]
+    check_file_exists(dea_file)
+    check_file_exists(macs_file)
+    return dea_file, macs_file
+
+def main():
+
+    dea_file, macs_file = parse_arguments()
+
+    deploy = Deploy(dea_file, macs_file)
+    deploy.deploy()
+
+if __name__ == '__main__':
+    main()
\ No newline at end of file
diff --git a/fuel/deploy/cloud/deployment.py b/fuel/deploy/cloud/deployment.py
new file mode 100644 (file)
index 0000000..cf56c36
--- /dev/null
@@ -0,0 +1,113 @@
+import common
+import os
+import shutil
+import glob
+import yaml
+import io
+import time
+
+N = common.N
+E = common.E
+R = common.R
+RO = common.RO
+exec_cmd = common.exec_cmd
+run_proc = common.run_proc
+parse = common.parse
+err = common.err
+log = common.log
+
+
+class Deployment(object):
+
+    def __init__(self, dea, yaml_config_dir, env_id, node_id_roles_dict):
+        self.dea = dea
+        self.yaml_config_dir = yaml_config_dir
+        self.env_id = env_id
+        self.node_id_roles_dict = node_id_roles_dict
+
+    def download_deployment_info(self):
+        log('Download deployment info for environment %s' % self.env_id)
+        deployment_dir = '%s/deployment_%s' \
+                         % (self.yaml_config_dir, self.env_id)
+        if os.path.exists(deployment_dir):
+            shutil.rmtree(deployment_dir)
+        exec_cmd('fuel --env %s deployment --default --dir %s'
+                 % (self.env_id, self.yaml_config_dir))
+
+    def upload_deployment_info(self):
+        log('Upload deployment info for environment %s' % self.env_id)
+        exec_cmd('fuel --env %s deployment --upload --dir %s'
+                 % (self.env_id, self.yaml_config_dir))
+
+    def config_opnfv(self):
+        log('Configure OPNFV settings on environment %s' % self.env_id)
+        opnfv_compute = self.dea.get_opnfv('compute')
+        opnfv_controller = self.dea.get_opnfv('controller')
+        self.download_deployment_info()
+        for node_file in glob.glob('%s/deployment_%s/*.yaml'
+                                   % (self.yaml_config_dir, self.env_id)):
+             with io.open(node_file) as stream:
+                 node = yaml.load(stream)
+             if node['role'] == 'compute':
+                node.update(opnfv_compute)
+             else:
+                node.update(opnfv_controller)
+             with io.open(node_file, 'w') as stream:
+                 yaml.dump(node, stream, default_flow_style=False)
+        self.upload_deployment_info()
+
+    def run_deploy(self):
+        WAIT_LOOP = 180
+        SLEEP_TIME = 60
+        LOG_FILE = 'cloud.log'
+
+        log('Starting deployment of environment %s' % self.env_id)
+        run_proc('fuel --env %s deploy-changes | strings | tee %s'
+                 % (self.env_id, LOG_FILE))
+
+        ready = False
+        for i in range(WAIT_LOOP):
+            env = parse(exec_cmd('fuel env --env %s' % self.env_id))
+            log('Environment status: %s' % env[0][E['status']])
+            r, _ = exec_cmd('tail -2 %s | head -1' % LOG_FILE, False)
+            if r:
+                log(r)
+            if env[0][E['status']] == 'operational':
+                ready = True
+                break
+            elif env[0][E['status']] == 'error':
+                break
+            else:
+                time.sleep(SLEEP_TIME)
+        exec_cmd('rm %s' % LOG_FILE)
+
+        if ready:
+            log('Environment %s successfully deployed' % self.env_id)
+        else:
+            err('Deployment failed, environment %s is not operational'
+                % self.env_id)
+
+    def verify_node_status(self):
+        node_list = parse(exec_cmd('fuel node list'))
+        failed_nodes = []
+        for node in node_list:
+            if node[N['status']] != 'ready':
+                failed_nodes.append((node[N['id']], node[N['status']]))
+
+        if failed_nodes:
+            summary = ''
+            for node, status in failed_nodes:
+                summary += '[node %s, status %s]\n' % (node, status)
+            err('Deployment failed: %s' % summary)
+
+    def health_check(self):
+        log('Now running sanity and smoke health checks')
+        exec_cmd('fuel health --env %s --check sanity,smoke --force'
+                 % self.env_id)
+        log('Health checks passed !')
+
+    def deploy(self):
+        self.config_opnfv()
+        self.run_deploy()
+        self.verify_node_status()
+        self.health_check()
\ No newline at end of file
diff --git a/fuel/deploy/cloud_deploy/__init__.py b/fuel/deploy/cloud_deploy/__init__.py
deleted file mode 100644 (file)
index c274feb..0000000
+++ /dev/null
@@ -1 +0,0 @@
-__author__ = 'eszicse'
diff --git a/fuel/deploy/cloud_deploy/cloud/__init__.py b/fuel/deploy/cloud_deploy/cloud/__init__.py
deleted file mode 100644 (file)
index c274feb..0000000
+++ /dev/null
@@ -1 +0,0 @@
-__author__ = 'eszicse'
diff --git a/fuel/deploy/cloud_deploy/cloud/configure_nodes.py b/fuel/deploy/cloud_deploy/cloud/configure_nodes.py
deleted file mode 100644 (file)
index a5e24a8..0000000
+++ /dev/null
@@ -1,108 +0,0 @@
-import common
-import yaml
-import io
-import glob
-
-N = common.N
-E = common.E
-R = common.R
-RO = common.RO
-exec_cmd = common.exec_cmd
-parse = common.parse
-err = common.err
-check_file_exists = common.check_file_exists
-LOG = common.LOG
-
-
-class ConfigureNodes(object):
-
-    def __init__(self, yaml_config_dir, env_id, node_id_roles_dict, dea):
-        self.yaml_config_dir = yaml_config_dir
-        self.env_id = env_id
-        self.node_id_roles_dict = node_id_roles_dict
-        self.dea = dea
-
-    def config_nodes(self):
-        LOG.debug('Configure nodes\n')
-        for node_id, roles_shelf_blade in self.node_id_roles_dict.iteritems():
-            exec_cmd('fuel node set --node-id %s --role %s --env %s'
-                     % (node_id, ','.join(roles_shelf_blade[0]), self.env_id))
-
-        self.download_deployment_config()
-        self.modify_node_network_schemes()
-        self.upload_deployment_config()
-
-        for node_id, roles_shelf_blade in self.node_id_roles_dict.iteritems():
-            self.download_interface_config(node_id)
-            self.modify_node_interface(node_id)
-            self.upload_interface_config(node_id)
-
-    def modify_node_network_schemes(self):
-        LOG.debug('Modify node network schemes in environment %s\n' % self.env_id)
-        for node_file in glob.glob('%s/deployment_%s/*.yaml'
-                                   % (self.yaml_config_dir, self.env_id)):
-             check_file_exists(node_file)
-
-             if 'compute' in node_file:
-                 node_type = 'compute'
-             else:
-                 node_type = 'controller'
-
-             network_scheme = self.dea.get_network_scheme(node_type)
-
-             with io.open(node_file) as stream:
-                 node = yaml.load(stream)
-
-             node['network_scheme']['transformations'] = network_scheme
-
-             with io.open(node_file, 'w') as stream:
-                 yaml.dump(node, stream, default_flow_style=False)
-
-
-    def download_deployment_config(self):
-        LOG.debug('Download deployment config for environment %s\n' % self.env_id)
-        r, c = exec_cmd('fuel deployment --env %s --default --dir %s'
-                        % (self.env_id, self.yaml_config_dir))
-
-    def upload_deployment_config(self):
-        LOG.debug('Upload deployment config for environment %s\n' % self.env_id)
-        r, c = exec_cmd('fuel deployment --env %s --upload --dir %s'
-                        % (self.env_id, self.yaml_config_dir))
-
-    def download_interface_config(self, node_id):
-        LOG.debug('Download interface config for node %s\n' % node_id)
-        r, c = exec_cmd('fuel node --env %s --node %s --network --download '
-                        '--dir %s' % (self.env_id, node_id,
-                                      self.yaml_config_dir))
-
-    def upload_interface_config(self, node_id):
-        LOG.debug('Upload interface config for node %s\n' % node_id)
-        r, c = exec_cmd('fuel node --env %s --node %s --network --upload '
-                        '--dir %s' % (self.env_id, node_id,
-                                      self.yaml_config_dir))
-
-    def modify_node_interface(self, node_id):
-        LOG.debug('Modify interface config for node %s\n' % node_id)
-        interface_yaml = (self.yaml_config_dir + '/node_%s/interfaces.yaml'
-                          % node_id)
-
-        with io.open(interface_yaml) as stream:
-            interfaces = yaml.load(stream)
-
-        net_name_id = {}
-        for interface in interfaces:
-            for network in interface['assigned_networks']:
-                 net_name_id[network['name']] = network['id']
-
-        interface_config = self.dea.get_interfaces()
-
-        for interface in interfaces:
-            interface['assigned_networks'] = []
-            for net_name in interface_config[interface['name']]:
-                net = {}
-                net['id'] = net_name_id[net_name]
-                net['name'] = net_name
-                interface['assigned_networks'].append(net)
-
-        with io.open(interface_yaml, 'w') as stream:
-            yaml.dump(interfaces, stream, default_flow_style=False)
\ No newline at end of file
diff --git a/fuel/deploy/cloud_deploy/cloud/dea.py b/fuel/deploy/cloud_deploy/cloud/dea.py
deleted file mode 100644 (file)
index 295636a..0000000
+++ /dev/null
@@ -1,86 +0,0 @@
-import yaml
-import io
-
-class DeploymentEnvironmentAdapter(object):
-    def __init__(self):
-        self.dea_struct = None
-        self.blade_ids_per_shelves = {}
-        self.blades_per_shelves = {}
-        self.shelf_ids = []
-        self.info_per_shelves = {}
-        self.network_names = []
-
-    def parse_yaml(self, yaml_path):
-        with io.open(yaml_path) as yaml_file:
-            self.dea_struct = yaml.load(yaml_file)
-        self.collect_shelf_and_blade_info()
-        self.collect_shelf_info()
-        self.collect_network_names()
-
-    def get_no_of_blades(self):
-        no_of_blades = 0
-        for shelf in self.dea_struct['shelf']:
-            no_of_blades += len(shelf['blade'])
-        return no_of_blades
-
-    def collect_shelf_info(self):
-        self.info_per_shelves = {}
-        for shelf in self.dea_struct['shelf']:
-            self.info_per_shelves[shelf['id']] = shelf
-
-    def get_shelf_info(self, shelf):
-        return (self.info_per_shelves[shelf]['type'],
-                self.info_per_shelves[shelf]['mgmt_ip'],
-                self.info_per_shelves[shelf]['username'],
-                self.info_per_shelves[shelf]['password'])
-
-    def get_environment_name(self):
-        return self.dea_struct['name']
-
-    def get_shelf_ids(self):
-        return self.shelf_ids
-
-    def get_blade_ids_per_shelf(self, shelf_id):
-        return self.blade_ids_per_shelves[shelf_id]
-
-    def get_blade_ids_per_shelves(self):
-        return self.blade_ids_per_shelves
-
-    def collect_shelf_and_blade_info(self):
-        self.blade_ids_per_shelves = {}
-        self.blades_per_shelves = {}
-        self.shelf_ids = []
-        for shelf in self.dea_struct['shelf']:
-             self.shelf_ids.append(shelf['id'])
-             blade_ids = self.blade_ids_per_shelves[shelf['id']] = []
-             blades = self.blades_per_shelves[shelf['id']] = {}
-             for blade in shelf['blade']:
-                 blade_ids.append(blade['id'])
-                 blades[blade['id']] = blade
-
-    def has_role(self, role, shelf, blade):
-        blade = self.blades_per_shelves[shelf][blade]
-        if role == 'compute':
-            return True if 'roles' not in blade else False
-        return (True if 'roles' in blade and role in blade['roles']
-                else False)
-
-    def collect_network_names(self):
-        self.network_names = []
-        for network in self.dea_struct['networks']['networks']:
-            self.network_names.append(network['name'])
-
-    def get_networks(self):
-        return self.dea_struct['networks']
-
-    def get_network_names(self):
-        return self.network_names
-
-    def get_settings(self):
-        return self.dea_struct['settings']
-
-    def get_network_scheme(self, node_type):
-        return self.dea_struct[node_type]
-
-    def get_interfaces(self):
-        return self.dea_struct['interfaces']
\ No newline at end of file
diff --git a/fuel/deploy/cloud_deploy/cloud/deploy.py b/fuel/deploy/cloud_deploy/cloud/deploy.py
deleted file mode 100644 (file)
index ea33f8b..0000000
+++ /dev/null
@@ -1,208 +0,0 @@
-import time
-import yaml
-import io
-import os
-
-import common
-from dea import DeploymentEnvironmentAdapter
-from configure_environment import ConfigureEnvironment
-from deployment import Deployment
-
-SUPPORTED_RELEASE = 'Juno on CentOS 6.5'
-
-N = common.N
-E = common.E
-R = common.R
-RO = common.RO
-exec_cmd = common.exec_cmd
-parse = common.parse
-err = common.err
-check_file_exists = common.check_file_exists
-LOG = common.LOG
-
-class Deploy(object):
-
-    def __init__(self, yaml_config_dir):
-        self.supported_release = None
-        self.yaml_config_dir = yaml_config_dir
-        self.macs_per_shelf_dict = {}
-        self.node_ids_dict = {}
-        self.node_id_roles_dict = {}
-        self.env_id = None
-        self.shelf_blades_dict = {}
-
-    def cleanup_fuel_environments(self, env_list):
-        WAIT_LOOP = 60
-        SLEEP_TIME = 10
-        for env in env_list:
-            LOG.debug('Deleting environment %s\n' % env[E['id']])
-            exec_cmd('fuel env --env %s --delete' % env[E['id']])
-        all_env_erased = False
-        for i in range(WAIT_LOOP):
-            env_list = parse(exec_cmd('fuel env list'))
-            if env_list[0][0]:
-               time.sleep(SLEEP_TIME)
-            else:
-               all_env_erased = True
-               break
-        if not all_env_erased:
-            err('Could not erase these environments %s'
-                % [(env[E['id']], env[E['status']]) for env in env_list])
-
-    def cleanup_fuel_nodes(self, node_list):
-        for node in node_list:
-            if node[N['status']] == 'discover':
-                LOG.debug('Deleting node %s\n' % node[N['id']])
-                exec_cmd('fuel node --node-id %s --delete-from-db'
-                         % node[N['id']])
-                exec_cmd('cobbler system remove --name node-%s'
-                         % node[N['id']])
-
-    def check_previous_installation(self):
-        LOG.debug('Check previous installation\n')
-        env_list = parse(exec_cmd('fuel env list'))
-        if env_list[0][0]:
-            self.cleanup_fuel_environments(env_list)
-            node_list = parse(exec_cmd('fuel node list'))
-            if node_list[0][0]:
-                self.cleanup_fuel_nodes(node_list)
-
-    def check_supported_release(self):
-        LOG.debug('Check supported release: %s\n' % SUPPORTED_RELEASE)
-        release_list = parse(exec_cmd('fuel release -l'))
-        for release in release_list:
-            if release[R['name']] == SUPPORTED_RELEASE:
-                self.supported_release = release
-                break
-        if not self.supported_release:
-            err('This Fuel does not contain the following '
-                'release: %s\n' % SUPPORTED_RELEASE)
-
-    def check_prerequisites(self):
-        LOG.debug('Check prerequisites\n')
-        self.check_supported_release()
-        self.check_previous_installation()
-
-    def find_mac_in_dict(self, mac):
-        for shelf, blade_dict in self.macs_per_shelf_dict.iteritems():
-            for blade, mac_list in blade_dict.iteritems():
-                if mac in mac_list:
-                    return shelf, blade
-
-    def all_blades_discovered(self):
-        for shelf, blade_dict in self.node_ids_dict.iteritems():
-            for blade, node_id in blade_dict.iteritems():
-                if not node_id:
-                    return False
-        return True
-
-    def not_discovered_blades_summary(self):
-        summary = ''
-        for shelf, blade_dict in self.node_ids_dict.iteritems():
-            for blade, node_id in blade_dict.iteritems():
-                if not node_id:
-                    summary += '[shelf %s, blade %s]\n' % (shelf, blade)
-        return summary
-
-    def collect_blade_ids_per_shelves(self, dea):
-        self.shelf_blades_dict = dea.get_blade_ids_per_shelves()
-
-    def node_discovery(self, node_list, discovered_macs):
-        for node in node_list:
-            if (node[N['status']] == 'discover' and
-                node[N['online']] == 'True' and
-                node[N['mac']] not in discovered_macs):
-                discovered_macs.append(node[N['mac']])
-                shelf_blade = self.find_mac_in_dict(node[N['mac']])
-                if shelf_blade:
-                    self.node_ids_dict[shelf_blade[0]][shelf_blade[1]] = \
-                        node[N['id']]
-
-    def discovery_waiting_loop(self, discovered_macs):
-        WAIT_LOOP = 180
-        SLEEP_TIME = 10
-        all_discovered = False
-        for i in range(WAIT_LOOP):
-            node_list = parse(exec_cmd('fuel node list'))
-            if node_list[0][0]:
-                self.node_discovery(node_list, discovered_macs)
-            if self.all_blades_discovered():
-                all_discovered = True
-                break
-            else:
-                time.sleep(SLEEP_TIME)
-        return all_discovered
-
-    def wait_for_discovered_blades(self):
-        LOG.debug('Wait for discovered blades\n')
-        discovered_macs = []
-        for shelf, blade_list in self.shelf_blades_dict.iteritems():
-            self.node_ids_dict[shelf] = {}
-            for blade in blade_list:
-                self.node_ids_dict[shelf][blade] = None
-        all_discovered = self.discovery_waiting_loop(discovered_macs)
-        if not all_discovered:
-            err('Not all blades have been discovered: %s\n'
-                % self.not_discovered_blades_summary())
-
-    def get_mac_addresses(self, macs_yaml):
-        with io.open(macs_yaml, 'r') as stream:
-            self.macs_per_shelf_dict = yaml.load(stream)
-
-    def assign_roles_to_cluster_node_ids(self, dea):
-        self.node_id_roles_dict = {}
-        for shelf, blades_dict in self.node_ids_dict.iteritems():
-            for blade, node_id in blades_dict.iteritems():
-                role_list = []
-                if dea.has_role('controller', shelf, blade):
-                    role_list.extend(['controller', 'mongo'])
-                    if dea.has_role('cinder', shelf, blade):
-                        role_list.extend(['cinder'])
-                elif dea.has_role('compute', shelf, blade):
-                    role_list.extend(['compute'])
-                self.node_id_roles_dict[node_id] = (role_list, shelf, blade)
-
-    def configure_environment(self, dea):
-        config_env = ConfigureEnvironment(dea, self.yaml_config_dir,
-                                          self.supported_release[R['id']],
-                                          self.node_id_roles_dict)
-        config_env.configure_environment()
-        self.env_id = config_env.env_id
-
-    def deploy(self, dea):
-        dep = Deployment(dea, self.yaml_config_dir, self.env_id,
-                         self.node_id_roles_dict)
-        dep.deploy()
-
-
-def main():
-
-    base_dir = os.path.dirname(os.path.realpath(__file__))
-    dea_yaml = base_dir + '/dea.yaml'
-    check_file_exists(dea_yaml)
-    macs_yaml = base_dir + '/macs.yaml'
-    check_file_exists(macs_yaml)
-
-    yaml_config_dir = '/var/lib/opnfv/pre_deploy'
-
-    deploy = Deploy(yaml_config_dir)
-    dea = DeploymentEnvironmentAdapter()
-    dea.parse_yaml(dea_yaml)
-
-    deploy.get_mac_addresses(macs_yaml)
-
-    deploy.collect_blade_ids_per_shelves(dea)
-
-    deploy.check_prerequisites()
-
-    deploy.wait_for_discovered_blades()
-
-    deploy.assign_roles_to_cluster_node_ids(dea)
-
-    deploy.configure_environment(dea)
-
-    deploy.deploy(dea)
-
-
-if __name__ == '__main__':
-    main()
\ No newline at end of file
diff --git a/fuel/deploy/cloud_deploy/cloud/deployment.py b/fuel/deploy/cloud_deploy/cloud/deployment.py
deleted file mode 100644 (file)
index 831059b..0000000
+++ /dev/null
@@ -1,100 +0,0 @@
-import common
-import os
-import shutil
-import glob
-import yaml
-import io
-import time
-
-N = common.N
-E = common.E
-R = common.R
-RO = common.RO
-exec_cmd = common.exec_cmd
-run_proc = common.run_proc
-parse = common.parse
-err = common.err
-LOG = common.LOG
-
-
-class Deployment(object):
-
-    def __init__(self, dea, yaml_config_dir, env_id, node_id_roles_dict):
-        self.dea = dea
-        self.env_name = dea.get_environment_name()
-        self.yaml_config_dir = yaml_config_dir
-        self.env_id = env_id
-        self.node_id_roles_dict = node_id_roles_dict
-        self.node_id_list = []
-        for node_id in self.node_id_roles_dict.iterkeys():
-            self.node_id_list.append(node_id)
-        self.node_id_list.sort()
-
-    def download_deployment_info(self):
-        LOG.debug('Download deployment info for environment %s\n' % self.env_id)
-        deployment_dir = self.yaml_config_dir + '/deployment_%s' % self.env_id
-        if os.path.exists(deployment_dir):
-            shutil.rmtree(deployment_dir)
-        r, c = exec_cmd('fuel --env %s deployment --default --dir %s'
-                        % (self.env_id, self.yaml_config_dir))
-        if c > 0:
-            err('Error: Could not download deployment info for env %s,'
-                ' reason: %s\n' % (self.env_id, r))
-
-    def upload_deployment_info(self):
-        LOG.debug('Upload deployment info for environment %s\n' % self.env_id)
-        r, c = exec_cmd('fuel --env %s deployment --upload --dir %s'
-                        % (self.env_id, self.yaml_config_dir))
-        if c > 0:
-            err('Error: Could not upload deployment info for env %s,'
-                ' reason: %s\n' % (self.env_id, r))
-
-    def pre_deploy(self):
-        LOG.debug('Running pre-deploy on environment %s\n' % self.env_name)
-        self.download_deployment_info()
-        opnfv = {'opnfv': {}}
-
-        for node_file in glob.glob('%s/deployment_%s/*.yaml'
-                                   % (self.yaml_config_dir, self.env_id)):
-             with io.open(node_file) as stream:
-                 node = yaml.load(stream)
-
-             if 'opnfv' not in node:
-                 node.update(opnfv)
-
-             with io.open(node_file, 'w') as stream:
-                 yaml.dump(node, stream, default_flow_style=False)
-        self.upload_deployment_info()
-
-
-    def deploy(self):
-        WAIT_LOOP = 180
-        SLEEP_TIME = 60
-
-        self.pre_deploy()
-
-        log_file = 'cloud.log'
-
-        LOG.debug('Starting deployment of environment %s\n' % self.env_name)
-        run_proc('fuel --env %s deploy-changes | strings | tee %s'
-                 % (self.env_id, log_file))
-
-        ready = False
-        for i in range(WAIT_LOOP):
-            env = parse(exec_cmd('fuel env --env %s' % self.env_id))
-            LOG.debug('Environment status: %s\n' % env[0][E['status']])
-            r, _ = exec_cmd('tail -2 %s | head -1' % log_file)
-            if r:
-                LOG.debug('%s\n' % r)
-            if env[0][E['status']] == 'operational':
-                ready = True
-                break
-            else:
-                time.sleep(SLEEP_TIME)
-        exec_cmd('rm %s' % log_file)
-
-        if ready:
-            LOG.debug('Environment %s successfully deployed\n' % self.env_name)
-        else:
-            err('Deployment failed, environment %s is not operational\n'
-                % self.env_name)
diff --git a/fuel/deploy/cloud_deploy/cloud_deploy.py b/fuel/deploy/cloud_deploy/cloud_deploy.py
deleted file mode 100644 (file)
index 4197519..0000000
+++ /dev/null
@@ -1,117 +0,0 @@
-import os
-import io
-import yaml
-
-from cloud import common
-from cloud.dea import DeploymentEnvironmentAdapter
-from hardware_adapters.dha import DeploymentHardwareAdapter
-from ssh_client import SSHClient
-
-exec_cmd = common.exec_cmd
-err = common.err
-check_file_exists = common.check_file_exists
-LOG = common.LOG
-
-class CloudDeploy(object):
-
-    def __init__(self, fuel_ip, fuel_username, fuel_password):
-        self.fuel_ip = fuel_ip
-        self.fuel_username = fuel_username
-        self.fuel_password = fuel_password
-        self.shelf_blades_dict = {}
-        self.macs_per_shelf_dict = {}
-
-    def copy_to_fuel_master(self, dir_path=None, file_path=None, target='~'):
-        if dir_path:
-            path = '-r ' + dir_path
-        elif file_path:
-            path = file_path
-        LOG.debug('Copying %s to Fuel Master %s' % (path, target))
-        if path:
-            exec_cmd('sshpass -p %s scp -o UserKnownHostsFile=/dev/null'
-                     ' -o StrictHostKeyChecking=no -o ConnectTimeout=15'
-                     ' %s %s@%s:%s'
-                     % (self.fuel_password, path, self.fuel_username,
-                        self.fuel_ip, target))
-
-    def run_cloud_deploy(self, deploy_dir, deploy_app):
-        LOG.debug('START CLOUD DEPLOYMENT')
-        ssh = SSHClient(self.fuel_ip, self.fuel_username, self.fuel_password)
-        ssh.open()
-        ssh.run('python %s/%s' % (deploy_dir, deploy_app))
-        ssh.close()
-
-    def power_off_blades(self, dea):
-        for shelf, blade_list in self.shelf_blades_dict.iteritems():
-            type, mgmt_ip, username, password = dea.get_shelf_info(shelf)
-            dha = DeploymentHardwareAdapter(type, mgmt_ip, username, password)
-            dha.power_off_blades(shelf, blade_list)
-
-    def power_on_blades(self, dea):
-        for shelf, blade_list in self.shelf_blades_dict.iteritems():
-            type, mgmt_ip, username, password = dea.get_shelf_info(shelf)
-            dha = DeploymentHardwareAdapter(type, mgmt_ip, username, password)
-            dha.power_on_blades(shelf, blade_list)
-
-    def set_boot_order(self, dea):
-        for shelf, blade_list in self.shelf_blades_dict.iteritems():
-            type, mgmt_ip, username, password = dea.get_shelf_info(shelf)
-            dha = DeploymentHardwareAdapter(type, mgmt_ip, username, password)
-            dha.set_boot_order_blades(shelf, blade_list)
-
-    def get_mac_addresses(self, dea, macs_yaml):
-        self.macs_per_shelf_dict = {}
-        for shelf, blade_list in self.shelf_blades_dict.iteritems():
-            type, mgmt_ip, username, password = dea.get_shelf_info(shelf)
-            dha = DeploymentHardwareAdapter(type, mgmt_ip, username, password)
-            self.macs_per_shelf_dict[shelf] = dha.get_blades_mac_addresses(
-                shelf, blade_list)
-
-        with io.open(macs_yaml, 'w') as stream:
-            yaml.dump(self.macs_per_shelf_dict, stream,
-                      default_flow_style=False)
-
-    def collect_blade_ids_per_shelves(self, dea):
-        self.shelf_blades_dict = dea.get_blade_ids_per_shelves()
-
-
-
-def main():
-
-    fuel_ip = '10.20.0.2'
-    fuel_username = 'root'
-    fuel_password = 'r00tme'
-    deploy_dir = '~/cloud'
-
-    cloud = CloudDeploy(fuel_ip, fuel_username, fuel_password)
-
-    base_dir = os.path.dirname(os.path.realpath(__file__))
-    deployment_dir = base_dir + '/cloud'
-    macs_yaml = base_dir + '/macs.yaml'
-    dea_yaml = base_dir + '/dea.yaml'
-    check_file_exists(dea_yaml)
-
-    cloud.copy_to_fuel_master(dir_path=deployment_dir)
-    cloud.copy_to_fuel_master(file_path=dea_yaml, target=deploy_dir)
-
-    dea = DeploymentEnvironmentAdapter()
-    dea.parse_yaml(dea_yaml)
-
-    cloud.collect_blade_ids_per_shelves(dea)
-
-    cloud.power_off_blades(dea)
-
-    cloud.set_boot_order(dea)
-
-    cloud.power_on_blades(dea)
-
-    cloud.get_mac_addresses(dea, macs_yaml)
-    check_file_exists(dea_yaml)
-
-    cloud.copy_to_fuel_master(file_path=macs_yaml, target=deploy_dir)
-
-    cloud.run_cloud_deploy(deploy_dir, 'deploy.py')
-
-
-if __name__ == '__main__':
-    main()
\ No newline at end of file
diff --git a/fuel/deploy/cloud_deploy/hardware_adapters/__init__.py b/fuel/deploy/cloud_deploy/hardware_adapters/__init__.py
deleted file mode 100644 (file)
index c274feb..0000000
+++ /dev/null
@@ -1 +0,0 @@
-__author__ = 'eszicse'
diff --git a/fuel/deploy/cloud_deploy/hardware_adapters/dha.py b/fuel/deploy/cloud_deploy/hardware_adapters/dha.py
deleted file mode 100644 (file)
index 2764aeb..0000000
+++ /dev/null
@@ -1,61 +0,0 @@
-from hp.hp_adapter import HpAdapter
-from libvirt.libvirt_adapter import LibvirtAdapter
-
-class DeploymentHardwareAdapter(object):
-    def __new__(cls, server_type, *args):
-        if cls is DeploymentHardwareAdapter:
-            if server_type == 'esxi':  return EsxiAdapter(*args)
-            if server_type == 'hp': return HpAdapter(*args)
-            if server_type == 'dell': return DellAdapter(*args)
-            if server_type == 'libvirt': return LibvirtAdapter(*args)
-        return super(DeploymentHardwareAdapter, cls).__new__(cls)
-
-
-class HardwareAdapter(object):
-
-    def power_off_blades(self, shelf, blade_list):
-        raise NotImplementedError
-
-    def power_off_blade(self, shelf, blade):
-        raise NotImplementedError
-
-    def power_on_blades(self, shelf, blade_list):
-        raise NotImplementedError
-
-    def power_on_blade(self, shelf, blade):
-        raise NotImplementedError
-
-    def power_cycle_blade(self):
-        raise NotImplementedError
-
-    def set_boot_order_blades(self, shelf, blade_list):
-        raise NotImplementedError
-
-    def set_boot_order_blade(self, shelf, blade):
-        raise NotImplementedError
-
-    def reset_to_factory_defaults(self):
-        raise NotImplementedError
-
-    def configure_networking(self):
-        raise NotImplementedError
-
-    def get_blade_mac_addresses(self, shelf, blade):
-        raise NotImplementedError
-
-    def get_hardware_info(self, shelf, blade):
-        raise NotImplementedError
-
-
-class EsxiAdapter(HardwareAdapter):
-
-    def __init__(self):
-        self.environment = {1: {1: {'mac': ['00:50:56:8c:05:85']},
-                                2: {'mac': ['00:50:56:8c:21:92']}}}
-
-    def get_blade_mac_addresses(self, shelf, blade):
-        return self.environment[shelf][blade]['mac']
-
-
-class DellAdapter(HardwareAdapter):
-    pass
diff --git a/fuel/deploy/cloud_deploy/hardware_adapters/hp/__init__.py b/fuel/deploy/cloud_deploy/hardware_adapters/hp/__init__.py
deleted file mode 100644 (file)
index c274feb..0000000
+++ /dev/null
@@ -1 +0,0 @@
-__author__ = 'eszicse'
diff --git a/fuel/deploy/cloud_deploy/hardware_adapters/hp/hp_adapter.py b/fuel/deploy/cloud_deploy/hardware_adapters/hp/hp_adapter.py
deleted file mode 100644 (file)
index 930d234..0000000
+++ /dev/null
@@ -1,288 +0,0 @@
-import re
-import time
-from netaddr import EUI, mac_unix
-from cloud import common
-from ssh_client import SSHClient
-
-LOG = common.LOG
-err = common.err
-
-S = {'bay': 0, 'ilo_name': 1, 'ilo_ip': 2, 'status': 3, 'power': 4,
-     'uid_partner': 5}
-
-class HpAdapter(object):
-
-    def __init__(self, mgmt_ip, username, password):
-        self.mgmt_ip = mgmt_ip
-        self.username = username
-        self.password = password
-
-    class mac_dhcp(mac_unix):
-        word_fmt = '%.2x'
-
-    def next_ip(self):
-        digit_list = self.mgmt_ip.split('.')
-        digit_list[3] = str(int(digit_list[3]) + 1)
-        self.mgmt_ip = '.'.join(digit_list)
-
-    def connect(self):
-        verified_ips = [self.mgmt_ip]
-        ssh = SSHClient(self.mgmt_ip, self.username, self.password)
-        try:
-            ssh.open()
-        except Exception:
-            self.next_ip()
-            verified_ips.append(self.mgmt_ip)
-            ssh = SSHClient(self.mgmt_ip, self.username, self.password)
-            try:
-                ssh.open()
-            except Exception as e:
-                err('Could not connect to HP Onboard Administrator through '
-                    'these IPs: %s, reason: %s' % (verified_ips, e))
-
-        lines = self.clean_lines(ssh.execute('show oa status'))
-        for line in lines:
-            if 'Role:   Standby' in line:
-                ssh.close()
-                if self.mgmt_ip != verified_ips[0]:
-                    err('Can only talk to OA %s which is the standby OA\n'
-                        % self.mgmt_ip)
-                else:
-                    LOG.debug('%s is the standby OA, trying next OA\n'
-                              % self.mgmt_ip)
-                    self.next_ip()
-                    verified_ips.append(self.mgmt_ip)
-                    ssh = SSHClient(self.mgmt_ip, self.username, self.password)
-                    try:
-                        ssh.open()
-                    except Exception as e:
-                        err('Could not connect to HP Onboard Administrator'
-                            ' through these IPs: %s, reason: %s'
-                            % (verified_ips, e))
-
-            elif 'Role:   Active' in line:
-                return ssh
-        err('Could not reach Active OA through these IPs %s' % verified_ips)
-
-    def get_blades_mac_addresses(self, shelf, blade_list):
-        macs_per_blade_dict = {}
-        LOG.debug('Getting MAC addresses for shelf %s, blades %s'
-                  % (shelf, blade_list))
-        ssh = self.connect()
-        for blade in blade_list:
-            lines = self.clean_lines(
-                ssh.execute('show server info %s' % blade))
-            left, right = self.find_mac(lines, shelf, blade)
-
-            left = EUI(left, dialect=self.mac_dhcp)
-            right = EUI(right, dialect=self.mac_dhcp)
-            macs_per_blade_dict[blade] = [str(left), str(right)]
-        ssh.close()
-        return macs_per_blade_dict
-
-    def find_mac(self, printout, shelf, blade):
-        left = False
-        right = False
-        for line in printout:
-            if ('No Server Blade Installed' in line or
-                'Invalid Arguments' in line):
-                err('Blade %d in shelf %d does not exist' % (blade, shelf))
-
-            seobj = re.search(r'LOM1:1-a\s+([0-9A-F:]+)', line, re.I)
-            if seobj:
-                left = seobj.group(1)
-            else:
-                seobj = re.search(r'LOM1:2-a\s+([0-9A-F:]+)', line, re.I)
-                if seobj:
-                    right = seobj.group(1)
-            if left and right:
-                return left, right
-
-    def get_hardware_info(self, shelf, blade=None):
-        ssh = self.connect()
-        if ssh and not blade:
-            ssh.close()
-            return 'HP'
-
-        lines = self.clean_lines(ssh.execute('show server info %s' % blade))
-        ssh.close()
-
-        match = r'Product Name:\s+(.+)\Z'
-        if not re.search(match, str(lines[:])):
-            LOG.debug('Blade %s in shelf %s does not exist\n' % (blade, shelf))
-            return False
-
-        for line in lines:
-            seobj = re.search(match, line)
-            if seobj:
-                return 'HP %s' % seobj.group(1)
-        return False
-
-    def power_off_blades(self, shelf, blade_list):
-        return self.set_state(shelf, 'locked', blade_list)
-
-    def power_on_blades(self, shelf, blade_list):
-        return self.set_state(shelf, 'unlocked', blade_list)
-
-    def set_boot_order_blades(self, shelf, blade_list):
-        return self.set_boot_order(shelf, blade_list=blade_list)
-
-    def parse(self, lines):
-        parsed_list = []
-        for l in lines[5:-2]:
-             parsed = []
-             cluttered = [e.strip() for e in l.split(' ')]
-             for p in cluttered:
-                 if p:
-                     parsed.append(p)
-             parsed_list.append(parsed)
-        return parsed_list
-
-    def set_state(self, shelf, state, blade_list):
-        if state not in ['locked', 'unlocked']:
-            LOG.debug('Incorrect state: %s' % state)
-            return None
-
-        LOG.debug('Setting state %s for blades %s in shelf %s'
-                  % (state, blade_list, shelf))
-
-        blade_list = sorted(blade_list)
-        ssh = self.connect()
-
-        LOG.debug('Check if blades are present')
-        server_list = self.parse(
-            self.clean_lines(ssh.execute('show server list')))
-
-        for blade in blade_list:
-            if server_list[S['status']] == 'Absent':
-                LOG.debug('Blade %s in shelf %s is missing. '
-                          'Set state %s not performed\n'
-                          % (blade, shelf, state))
-                blade_list.remove(blade)
-
-        bladelist = ','.join(blade_list)
-
-        # Use leading upper case on On/Off so it can be reused in match
-        force = ''
-        if state == 'locked':
-            powerstate = 'Off'
-            force = 'force'
-        else:
-            powerstate = 'On'
-        cmd = 'power%s server %s' % (powerstate, bladelist)
-        if force:
-            cmd += ' %s' % force
-
-        LOG.debug(cmd)
-        ssh.execute(cmd)
-
-        # Check that all blades reach the state which can take some time,
-        # so re-try a couple of times
-        LOG.debug('Check if state %s successfully set' % state)
-
-        WAIT_LOOP = 2
-        SLEEP_TIME = 3
-
-        set_blades = []
-
-        for i in range(WAIT_LOOP):
-            server_list = self.parse(
-                self.clean_lines(ssh.execute('show server list')))
-
-            for blade in blade_list:
-                for server in server_list:
-                    if (server[S['bay']] == blade and
-                        server[S['power']] == powerstate):
-                        set_blades.append(blade)
-                        break
-
-            all_set = set(blade_list) == set(set_blades)
-            if all_set:
-                break
-            else:
-                time.sleep(SLEEP_TIME)
-
-        ssh.close()
-
-        if all_set:
-            LOG.debug('State %s successfully set on blades %s in shelf %d'
-                      % (state, set_blades, shelf))
-            return True
-        else:
-            LOG.debug('Could not set state %s on blades %s in shelf %s\n'
-                      % (state, set(blade_list) - set(set_blades), shelf))
-        return False
-
-
-    def clean_lines(self, printout):
-        lines = []
-        for p in [l.strip() for l in printout.splitlines()]:
-            if p:
-                lines.append(p)
-        return lines
-
-
-    def set_boot_order_blades(self, shelf, blade_list, boot_dev_list=None):
-
-        boot_dict = {'Hard Drive': 'hdd',
-                     'PXE NIC': 'pxe',
-                     'CD-ROM': 'cd',
-                     'USB': 'usb',
-                     'Diskette Driver': 'disk'}
-
-        boot_options = [b for b in boot_dict.itervalues()]
-        diff = list(set(boot_dev_list) - set(boot_options))
-        if diff:
-            err('The following boot options %s are not valid' % diff)
-
-        blade_list = sorted(blade_list)
-        LOG.debug('Setting boot order %s for blades %s in shelf %s'
-                  % (boot_dev_list, blade_list, shelf))
-
-        ssh = self.connect()
-
-        LOG.debug('Check if blades are present')
-        server_list = self.parse(
-            self.clean_lines(ssh.execute('show server list')))
-
-        for blade in blade_list:
-            if server_list[S['status']] == 'Absent':
-                LOG.debug('Blade %s in shelf %s is missing. '
-                          'Change boot order %s not performed.\n'
-                          % (blade, shelf, boot_dev_list))
-                blade_list.remove(blade)
-
-        bladelist = ','.join(blade_list)
-
-        for boot_dev in reversed(boot_dev_list):
-            ssh.execute('set server boot first %s %s' % (boot_dev, bladelist))
-
-        LOG.debug('Check if boot order is successfully set')
-
-        success_list = []
-        boot_keys = [b for b in boot_dict.iterkeys()]
-        for blade in blade_list:
-            lines = self.clean_lines(ssh.execute('show server boot %s'
-                                                 % blade))
-            boot_order = lines[lines.index('IPL Devices (Boot Order):')+1:]
-            boot_list = []
-            success = False
-            for b in boot_order:
-                for k in boot_keys:
-                    if k in b:
-                        boot_list.append(boot_dict[k])
-                        break
-                if boot_list == boot_dev_list:
-                    success = True
-                    break
-
-            success_list.append(success)
-            if success:
-                LOG.debug('Boot order %s successfully set on blade %s in '
-                          'shelf %s\n' % (boot_dev_list, blade, shelf))
-            else:
-                LOG.debug('Failed to set boot order %s on blade %s in '
-                          'shelf %s\n' % (boot_dev_list, blade, shelf))
-
-        ssh.close()
-        return all(success_list)
diff --git a/fuel/deploy/cloud_deploy/hardware_adapters/libvirt/__init__.py b/fuel/deploy/cloud_deploy/hardware_adapters/libvirt/__init__.py
deleted file mode 100644 (file)
index c274feb..0000000
+++ /dev/null
@@ -1 +0,0 @@
-__author__ = 'eszicse'
diff --git a/fuel/deploy/cloud_deploy/hardware_adapters/libvirt/libvirt_adapter.py b/fuel/deploy/cloud_deploy/hardware_adapters/libvirt/libvirt_adapter.py
deleted file mode 100644 (file)
index d332e59..0000000
+++ /dev/null
@@ -1,153 +0,0 @@
-from lxml import etree
-from cloud import common
-from ssh_client import SSHClient
-
-exec_cmd = common.exec_cmd
-err = common.err
-LOG = common.LOG
-
-
-class LibvirtAdapter(object):
-
-    def __init__(self, mgmt_ip, username, password):
-        self.mgmt_ip = mgmt_ip
-        self.username = username
-        self.password = password
-        self.parser = etree.XMLParser(remove_blank_text=True)
-
-    def power_off_blades(self, shelf, blade_list):
-        ssh = SSHClient(self.mgmt_ip, self.username, self.password)
-        ssh.open()
-        for blade in blade_list:
-            LOG.debug('Power off blade %s in shelf %s' % (blade, shelf))
-            vm_name = 's%s_b%s' % (shelf, blade)
-            resp = ssh.execute('virsh destroy %s' % vm_name)
-            LOG.debug('response: %s' % resp)
-        ssh.close()
-
-    def power_on_blades(self, shelf, blade_list):
-        ssh = SSHClient(self.mgmt_ip, self.username, self.password)
-        ssh.open()
-        for blade in blade_list:
-            LOG.debug('Power on blade %s in shelf %s' % (blade, shelf))
-            vm_name = 's%s_b%s' % (shelf, blade)
-            resp = ssh.execute('virsh start %s' % vm_name)
-            LOG.debug('response: %s' % resp)
-        ssh.close()
-
-    def set_boot_order_blades(self, shelf, blade_list, boot_dev_list=None):
-        if not boot_dev_list:
-            boot_dev_list = ['network', 'hd']
-        ssh = SSHClient(self.mgmt_ip, self.username, self.password)
-        ssh.open()
-        temp_dir= ssh.execute('mktemp -d').strip()
-        for blade in blade_list:
-            LOG.debug('Set boot order %s on blade %s in shelf %s'
-                  % (boot_dev_list, blade, shelf))
-            vm_name = 's%s_b%s' % (shelf, blade)
-            resp = ssh.execute('virsh dumpxml %s' % vm_name)
-            xml_dump = etree.fromstring(resp, self.parser)
-            os = xml_dump.xpath('/domain/os')
-            for o in os:
-                for bootelem in ['boot', 'bootmenu']:
-                    boot = o.xpath(bootelem)
-                    for b in boot:
-                        b.getparent().remove(b)
-                for dev in boot_dev_list:
-                    b = etree.Element('boot')
-                    b.set('dev', dev)
-                    o.append(b)
-                bmenu = etree.Element('bootmenu')
-                bmenu.set('enable', 'no')
-                o.append(bmenu)
-            tree = etree.ElementTree(xml_dump)
-            xml_file = temp_dir + '/%s.xml' % vm_name
-            with open(xml_file, 'w') as f:
-                tree.write(f, pretty_print=True, xml_declaration=True)
-            ssh.execute('virsh define %s' % xml_file)
-        ssh.execute('rm -fr %s' % temp_dir)
-        ssh.close()
-
-    def get_blades_mac_addresses(self, shelf, blade_list):
-        LOG.debug('Get the MAC addresses of blades %s in shelf %s'
-              % (blade_list, shelf))
-        macs_per_blade_dict = {}
-        ssh = SSHClient(self.mgmt_ip, self.username, self.password)
-        ssh.open()
-        for blade in blade_list:
-            vm_name = 's%s_b%s' % (shelf, blade)
-            mac_list = macs_per_blade_dict[blade] = []
-            resp = ssh.execute('virsh dumpxml %s' % vm_name)
-            xml_dump = etree.fromstring(resp)
-            interfaces = xml_dump.xpath('/domain/devices/interface')
-            for interface in interfaces:
-                macs = interface.xpath('mac')
-                for mac in macs:
-                    mac_list.append(mac.get('address'))
-        ssh.close()
-        return macs_per_blade_dict
-
-    def load_image_file(self, shelf=None, blade=None, vm=None,
-                        image_path=None):
-        if shelf and blade:
-            vm_name = 's%s_b%s' % (shelf, blade)
-        else:
-            vm_name = vm
-
-        LOG.debug('Load media file %s into %s '
-                  % (image_path, 'vm %s' % vm if vm else 'blade %s in shelf %s'
-                                                         % (shelf, blade)))
-
-        ssh = SSHClient(self.mgmt_ip, self.username, self.password)
-        ssh.open()
-        temp_dir= ssh.execute('mktemp -d').strip()
-        resp = ssh.execute('virsh dumpxml %s' % vm_name)
-        xml_dump = etree.fromstring(resp)
-
-        disks = xml_dump.xpath('/domain/devices/disk')
-        for disk in disks:
-            if disk.get('device') == 'cdrom':
-                disk.set('type', 'file')
-                sources = disk.xpath('source')
-                for source in sources:
-                    disk.remove(source)
-                source = etree.SubElement(disk, 'source')
-                source.set('file', image_path)
-        tree = etree.ElementTree(xml_dump)
-        xml_file = temp_dir + '/%s.xml' % vm_name
-        with open(xml_file, 'w') as f:
-            tree.write(f, pretty_print=True, xml_declaration=True)
-        ssh.execute('virsh define %s' % xml_file)
-        ssh.execute('rm -fr %s' % temp_dir)
-        ssh.close()
-
-    def eject_image_file(self, shelf=None, blade=None, vm=None):
-        if shelf and blade:
-            vm_name = 's%s_b%s' % (shelf, blade)
-        else:
-            vm_name = vm
-
-        LOG.debug('Eject media file from %s '
-                  % 'vm %s' % vm if vm else 'blade %s in shelf %s'
-                                            % (shelf, blade))
-
-        ssh = SSHClient(self.mgmt_ip, self.username, self.password)
-        ssh.open()
-        temp_dir= ssh.execute('mktemp -d').strip()
-        resp = ssh.execute('virsh dumpxml %s' % vm_name)
-        xml_dump = etree.fromstring(resp)
-
-        disks = xml_dump.xpath('/domain/devices/disk')
-        for disk in disks:
-            if disk.get('device') == 'cdrom':
-                disk.set('type', 'block')
-                sources = disk.xpath('source')
-                for source in sources:
-                    disk.remove(source)
-        tree = etree.ElementTree(xml_dump)
-        xml_file = temp_dir + '/%s.xml' % vm_name
-        with open(xml_file, 'w') as f:
-            tree.write(f, pretty_print=True, xml_declaration=True)
-        ssh.execute('virsh define %s' % xml_file)
-        ssh.execute('rm -fr %s' % temp_dir)
-        ssh.close()
similarity index 59%
rename from fuel/deploy/cloud_deploy/cloud/common.py
rename to fuel/deploy/common.py
index 365f6fb..6dbda67 100644 (file)
@@ -20,12 +20,19 @@ out_handler = logging.FileHandler('autodeploy.log', mode='w')
 out_handler.setFormatter(formatter)
 LOG.addHandler(out_handler)
 
-def exec_cmd(cmd):
+def exec_cmd(cmd, check=True):
     process = subprocess.Popen(cmd,
                                stdout=subprocess.PIPE,
                                stderr=subprocess.STDOUT,
                                shell=True)
-    return process.communicate()[0], process.returncode
+    response = process.communicate()[0].strip()
+    return_code = process.returncode
+    if check:
+        if return_code > 0:
+            err(response)
+        else:
+            return response
+    return response, return_code
 
 def run_proc(cmd):
     process = subprocess.Popen(cmd,
@@ -34,18 +41,42 @@ def run_proc(cmd):
                                shell=True)
     return process
 
-def parse(printout, *args):
+def parse(printout):
     parsed_list = []
-    lines = printout[0].splitlines()
+    lines = printout.splitlines()
     for l in lines[2:]:
          parsed = [e.strip() for e in l.split('|')]
          parsed_list.append(parsed)
     return parsed_list
 
-def err(error_message):
-    LOG.error(error_message)
+def clean(lines):
+    parsed_list = []
+    parsed = []
+    for l in lines.strip().splitlines():
+        parsed = []
+        cluttered = [e.strip() for e in l.split(' ')]
+        for p in cluttered:
+            if p:
+                parsed.append(p)
+        parsed_list.append(parsed)
+    return parsed if len(parsed_list) == 1 else parsed_list
+
+def err(message):
+    LOG.error('%s\n' % message)
     sys.exit(1)
 
 def check_file_exists(file_path):
     if not os.path.isfile(file_path):
         err('ERROR: File %s not found\n' % file_path)
+
+def check_dir_exists(dir_path):
+    if not os.path.isdir(dir_path):
+        err('ERROR: Directory %s not found\n' % dir_path)
+
+def check_if_root():
+    r = exec_cmd('whoami')
+    if r != 'root':
+        err('You need be root to run this application')
+
+def log(message):
+    LOG.debug('%s\n' % message)
diff --git a/fuel/deploy/dea.py b/fuel/deploy/dea.py
new file mode 100644 (file)
index 0000000..8066b6a
--- /dev/null
@@ -0,0 +1,80 @@
+import yaml
+import io
+import netaddr
+
+class DeploymentEnvironmentAdapter(object):
+    def __init__(self, yaml_path):
+        self.dea_struct = None
+        self.parse_yaml(yaml_path)
+        self.network_names = []
+        self.collect_network_names()
+
+    def modify_ip(self, ip_addr, index, val):
+        ip_str = str(netaddr.IPAddress(ip_addr))
+        decimal_list = map(int, ip_str.split('.'))
+        decimal_list[index] = val
+        return '.'.join(map(str, decimal_list))
+
+    def parse_yaml(self, yaml_path):
+        with io.open(yaml_path) as yaml_file:
+            self.dea_struct = yaml.load(yaml_file)
+
+    def get_fuel_config(self):
+        return self.dea_struct['fuel']
+
+    def get_fuel_ip(self):
+        fuel_conf = self.get_fuel_config()
+        return fuel_conf['ADMIN_NETWORK']['ipaddress']
+
+    def get_fuel_netmask(self):
+        fuel_conf = self.get_fuel_config()
+        return fuel_conf['ADMIN_NETWORK']['netmask']
+
+    def get_fuel_gateway(self):
+        ip = self.get_fuel_ip()
+        return self.modify_ip(ip, 3, 1)
+
+    def get_fuel_hostname(self):
+        fuel_conf = self.get_fuel_config()
+        return fuel_conf['HOSTNAME']
+
+    def get_fuel_dns(self):
+        fuel_conf = self.get_fuel_config()
+        return fuel_conf['DNS_UPSTREAM']
+
+    def get_node_property(self, node_id, property_name):
+        for node in self.dea_struct['nodes']:
+            if node['id'] == node_id and property_name in node:
+                return node[property_name]
+
+    def get_node_role(self, node_id):
+        return self.get_node_property(node_id, 'role')
+
+    def get_node_ids(self):
+        node_ids = []
+        for node in self.dea_struct['nodes']:
+            node_ids.append(node['id'])
+        return node_ids
+
+    def get_property(self, property_name):
+        return self.dea_struct[property_name]
+
+    def collect_network_names(self):
+        self.network_names = []
+        for network in self.dea_struct['network']['networks']:
+            self.network_names.append(network['name'])
+
+    def get_network_names(self):
+        return self.network_names
+
+    def get_interfaces(self, type):
+        return self.dea_struct['interfaces'][type]
+
+    def get_transformations(self, type):
+        return self.dea_struct['transformations'][type]
+
+    def get_opnfv(self, role):
+        return {'opnfv': self.dea_struct['opnfv'][role]}
+
+    def get_wanted_release(self):
+        return self.dea_struct['wanted_release']
\ No newline at end of file
diff --git a/fuel/deploy/deploy.py b/fuel/deploy/deploy.py
new file mode 100644 (file)
index 0000000..9d1a3d2
--- /dev/null
@@ -0,0 +1,199 @@
+import sys
+import os
+import shutil
+import io
+import re
+import netaddr
+
+from dea import DeploymentEnvironmentAdapter
+from dha import DeploymentHardwareAdapter
+from install_fuel_master import InstallFuelMaster
+from deploy_env import CloudDeploy
+import common
+
+log = common.log
+exec_cmd = common.exec_cmd
+err = common.err
+check_file_exists = common.check_file_exists
+check_if_root = common.check_if_root
+
+FUEL_VM = 'fuel'
+TMP_DIR = '%s/fueltmp' % os.getenv('HOME')
+PATCH_DIR = 'fuel_patch'
+WORK_DIR = 'deploy'
+
+class cd:
+    def __init__(self, new_path):
+        self.new_path = os.path.expanduser(new_path)
+
+    def __enter__(self):
+        self.saved_path = os.getcwd()
+        os.chdir(self.new_path)
+
+    def __exit__(self, etype, value, traceback):
+        os.chdir(self.saved_path)
+
+
+class AutoDeploy(object):
+
+    def __init__(self, without_fuel, iso_file, dea_file, dha_file):
+        self.without_fuel = without_fuel
+        self.iso_file = iso_file
+        self.dea_file = dea_file
+        self.dha_file = dha_file
+        self.dea = DeploymentEnvironmentAdapter(dea_file)
+        self.dha = DeploymentHardwareAdapter(dha_file)
+        self.fuel_conf = {}
+        self.fuel_node_id = self.dha.get_fuel_node_id()
+        self.fuel_custom = self.dha.use_fuel_custom_install()
+        self.fuel_username, self.fuel_password = self.dha.get_fuel_access()
+
+    def setup_dir(self, dir):
+        self.cleanup_dir(dir)
+        os.makedirs(dir)
+
+    def cleanup_dir(self, dir):
+        if os.path.isdir(dir):
+            shutil.rmtree(dir)
+
+    def power_off_blades(self):
+        node_ids = self.dha.get_all_node_ids()
+        node_ids = list(set(node_ids) - set([self.fuel_node_id]))
+        for node_id in node_ids:
+            self.dha.node_power_off(node_id)
+
+    def modify_ip(self, ip_addr, index, val):
+        ip_str = str(netaddr.IPAddress(ip_addr))
+        decimal_list = map(int, ip_str.split('.'))
+        decimal_list[index] = val
+        return '.'.join(map(str, decimal_list))
+
+    def collect_fuel_info(self):
+        self.fuel_conf['ip'] = self.dea.get_fuel_ip()
+        self.fuel_conf['gw'] = self.dea.get_fuel_gateway()
+        self.fuel_conf['dns1'] = self.dea.get_fuel_dns()
+        self.fuel_conf['netmask'] = self.dea.get_fuel_netmask()
+        self.fuel_conf['hostname'] = self.dea.get_fuel_hostname()
+        self.fuel_conf['showmenu'] = 'yes'
+
+    def install_fuel_master(self):
+        if self.without_fuel:
+            log('Not Installing Fuel Master')
+            return
+        log('Install Fuel Master')
+        new_iso = '%s/deploy-%s' % (TMP_DIR, os.path.basename(self.iso_file))
+        self.patch_iso(new_iso)
+        self.iso_file = new_iso
+        self.install_iso()
+
+    def install_iso(self):
+        fuel = InstallFuelMaster(self.dea_file, self.dha_file,
+                                 self.fuel_conf['ip'], self.fuel_username,
+                                 self.fuel_password, self.fuel_node_id,
+                                 self.iso_file, WORK_DIR)
+        if self.fuel_custom:
+            log('Custom Fuel install')
+            fuel.custom_install()
+        else:
+            log('Ordinary Fuel install')
+            fuel.install()
+
+    def patch_iso(self, new_iso):
+        tmp_orig_dir = '%s/origiso' % TMP_DIR
+        tmp_new_dir = '%s/newiso' % TMP_DIR
+        self.copy(tmp_orig_dir, tmp_new_dir)
+        self.patch(tmp_new_dir, new_iso)
+
+    def copy(self, tmp_orig_dir, tmp_new_dir):
+        log('Copying...')
+        self.setup_dir(tmp_orig_dir)
+        self.setup_dir(tmp_new_dir)
+        exec_cmd('fuseiso %s %s' % (self.iso_file, tmp_orig_dir))
+        with cd(tmp_orig_dir):
+            exec_cmd('find . | cpio -pd %s' % tmp_new_dir)
+        with cd(tmp_new_dir):
+            exec_cmd('fusermount -u %s' % tmp_orig_dir)
+        shutil.rmtree(tmp_orig_dir)
+        exec_cmd('chmod -R 755 %s' % tmp_new_dir)
+
+    def patch(self, tmp_new_dir, new_iso):
+        log('Patching...')
+        patch_dir = '%s/%s' % (os.getcwd(), PATCH_DIR)
+        ks_path = '%s/ks.cfg.patch' % patch_dir
+
+        with cd(tmp_new_dir):
+            exec_cmd('cat %s | patch -p0' % ks_path)
+            shutil.rmtree('.rr_moved')
+            isolinux = 'isolinux/isolinux.cfg'
+            log('isolinux.cfg before: %s'
+                % exec_cmd('grep netmask %s' % isolinux))
+            self.update_fuel_isolinux(isolinux)
+            log('isolinux.cfg after: %s'
+                % exec_cmd('grep netmask %s' % isolinux))
+
+            iso_linux_bin = 'isolinux/isolinux.bin'
+            exec_cmd('mkisofs -quiet -r -J -R -b %s '
+                     '-no-emul-boot -boot-load-size 4 '
+                     '-boot-info-table -hide-rr-moved '
+                     '-x "lost+found:" -o %s .'
+                     % (iso_linux_bin, new_iso))
+
+    def update_fuel_isolinux(self, file):
+        with io.open(file) as f:
+            data = f.read()
+        for key, val in self.fuel_conf.iteritems():
+            pattern = r'%s=[^ ]\S+' % key
+            replace = '%s=%s' % (key, val)
+            data = re.sub(pattern, replace, data)
+        with io.open(file, 'w') as f:
+            f.write(data)
+
+    def deploy_env(self):
+        dep = CloudDeploy(self.dha, self.fuel_conf['ip'], self.fuel_username,
+                          self.fuel_password, self.dea_file, WORK_DIR)
+        dep.deploy()
+
+    def deploy(self):
+        check_if_root()
+        self.setup_dir(TMP_DIR)
+        self.collect_fuel_info()
+        self.power_off_blades()
+        self.install_fuel_master()
+        self.cleanup_dir(TMP_DIR)
+        self.deploy_env()
+
+def usage():
+    print '''
+    Usage:
+    python deploy.py [-nf] <isofile> <deafile> <dhafile>
+
+    Optional arguments:
+      -nf   Do not install Fuel master
+    '''
+
+def parse_arguments():
+    if (len(sys.argv) < 4 or len(sys.argv) > 5
+        or (len(sys.argv) == 5 and sys.argv[1] != '-nf')):
+        log('Incorrect number of arguments')
+        usage()
+        sys.exit(1)
+    without_fuel = False
+    if len(sys.argv) == 5 and sys.argv[1] == '-nf':
+        without_fuel = True
+    iso_file = sys.argv[-3]
+    dea_file = sys.argv[-2]
+    dha_file = sys.argv[-1]
+    check_file_exists(iso_file)
+    check_file_exists(dea_file)
+    check_file_exists(dha_file)
+    return (without_fuel, iso_file, dea_file, dha_file)
+
+def main():
+
+    without_fuel, iso_file, dea_file, dha_file = parse_arguments()
+
+    d = AutoDeploy(without_fuel, iso_file, dea_file, dha_file)
+    d.deploy()
+
+if __name__ == '__main__':
+    main()
\ No newline at end of file
diff --git a/fuel/deploy/deploy.sh b/fuel/deploy/deploy.sh
deleted file mode 100755 (executable)
index 916125e..0000000
+++ /dev/null
@@ -1,107 +0,0 @@
-#!/bin/bash
-##############################################################################
-# Copyright (c) 2015 Ericsson AB and others.
-# stefan.k.berg@ericsson.com
-# jonas.bjurel@ericsson.com
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-# Setup locations
-topdir=$(cd `dirname $0`; pwd)
-functions=${topdir}/functions
-tmpdir=$HOME/fueltmp
-deployiso=${tmpdir}/deploy.iso
-cloud_deploy=$(cd ${topdir}/cloud_deploy; pwd)
-
-# Define common functions
-. ${functions}/common.sh
-
-exit_handler() {
-  # Remove safety catch
-  kill -9 `ps -p $killpid -o pid --no-headers` \
-        `ps --ppid $killpid -o pid --no-headers`\
-  > /dev/null 2>&1
-}
-
-# Set maximum allowed deploy time (default three hours)
-MAXDEPLOYTIME=${MAXDEPLOYTIME-3h}
-
-####### MAIN ########
-
-if [ "`whoami`" != "root" ]; then
-  error_exit "You need be root to run this script"
-fi
-
-if [ $# -eq 0 -o $# -gt 2 ]; then
-  error_exit "Argument error"
-fi
-
-# Setup tmpdir
-if [ -d $tmpdir ]; then
-  rm -Rf $tmpdir || error_exit "Could not remove tmpdir $tmpdir"
-fi
-
-mkdir $tmpdir || error_exit "Could not create tmpdir $tmpdir"  
-
-if [ ! -f $1 ]; then
-  error_exit "Could not find ISO file $1"
-else
-  isofile=$(cd `dirname $1`; echo `pwd`/`basename $1`)
-fi
-
-# If no DEA specified, use the example one
-if [ $# -eq 1 ]; then
-  deafile=${topdir}/dea.yaml
-else
-  deafile=$(cd `dirname $2`; echo `pwd`/`basename $2`)
-fi
-cp ${deafile} ${cloud_deploy}/
-
-if [ ! -f $deafile ]; then
-  error-exit "Could not find DEA file $deafile"
-fi
-
-# Enable safety catch
-echo "Enabling auto-kill if deployment exceeds $MAXDEPLOYTIME"
-(sleep $MAXDEPLOYTIME; echo "Auto-kill of deploy after a timeout of $MAXDEPLOYTIME"; kill $$) &
-killpid=$!
-
-# Enable exit handler
-trap exit_handler exit
-
-# Stop all VMs
-for node in `ls libvirt/vms`
-do
-  virsh destroy $node >/dev/null 2>&1
-done
-
-
-# Install the Fuel master
-# (Convert to functions at later stage)
-echo "Patching iso file"
-${functions}/patch-iso.sh $isofile $deployiso $tmpdir || error_exit "Failed to patch ISO"
-# Swap isofiles from now on
-isofile=$deployiso
-. ${functions}/install_iso.sh
-
-python ${cloud_deploy}/cloud_deploy.py
-
-echo "Waiting for five minutes for deploy to stabilize"
-sleep 5m
-
-echo "Verifying node status after deployment"
-# Any node with non-ready status?
-ssh root@10.20.0.2 fuel node 2>/dev/null | tail -n +3 | cut -d "|" -f 2 | \
-  sed 's/ //g' | grep -v ready | wc -l | grep -q "^0$"
-if [ $? -ne 0 ]; then
-  echo "Deploy failed to verify"
-  ssh root@10.20.0.2 fuel node 2>/dev/null
-  error_exit "Exiting with error status"
-else
-  ssh root@10.20.0.2 fuel node 2>/dev/null
-  echo "Deployment verified"
-fi
-
diff --git a/fuel/deploy/deploy_env.py b/fuel/deploy/deploy_env.py
new file mode 100644 (file)
index 0000000..9bc8fbb
--- /dev/null
@@ -0,0 +1,87 @@
+import os
+import io
+import yaml
+import glob
+
+from ssh_client import SSHClient
+import common
+
+exec_cmd = common.exec_cmd
+err = common.err
+check_file_exists = common.check_file_exists
+log = common.log
+
+CLOUD_DEPLOY_FILE = 'deploy.py'
+
+
+class CloudDeploy(object):
+
+    def __init__(self, dha, fuel_ip, fuel_username, fuel_password, dea_file,
+                 work_dir):
+        self.dha = dha
+        self.fuel_ip = fuel_ip
+        self.fuel_username = fuel_username
+        self.fuel_password = fuel_password
+        self.dea_file = dea_file
+        self.work_dir = work_dir
+        self.file_dir = os.path.dirname(os.path.realpath(__file__))
+        self.ssh = SSHClient(self.fuel_ip, self.fuel_username,
+                             self.fuel_password)
+        self.macs_file = '%s/macs.yaml' % self.file_dir
+        self.node_ids = self.dha.get_node_ids()
+
+    def upload_cloud_deployment_files(self):
+        dest ='~/%s/' % self.work_dir
+
+        with self.ssh as s:
+            s.exec_cmd('rm -rf %s' % self.work_dir, check=False)
+            s.exec_cmd('mkdir ~/%s' % self.work_dir)
+            s.scp_put(self.dea_file, dest)
+            s.scp_put(self.macs_file, dest)
+            s.scp_put('%s/common.py' % self.file_dir, dest)
+            s.scp_put('%s/dea.py' % self.file_dir, dest)
+            for f in glob.glob('%s/cloud/*' % self.file_dir):
+                s.scp_put(f, dest)
+
+    def power_off_nodes(self):
+        for node_id in self.node_ids:
+            self.dha.node_power_off(node_id)
+
+    def power_on_nodes(self):
+        for node_id in self.node_ids:
+            self.dha.node_power_on(node_id)
+
+    def set_boot_order(self, boot_order_list):
+        for node_id in self.node_ids:
+            self.dha.node_set_boot_order(node_id, boot_order_list)
+
+    def get_mac_addresses(self):
+        macs_per_node = {}
+        for node_id in self.node_ids:
+            macs_per_node[node_id] = self.dha.get_node_pxe_mac(node_id)
+        with io.open(self.macs_file, 'w') as stream:
+            yaml.dump(macs_per_node, stream, default_flow_style=False)
+
+    def run_cloud_deploy(self, deploy_app):
+        log('START CLOUD DEPLOYMENT')
+        deploy_app = '%s/%s' % (self.work_dir, deploy_app)
+        dea_file = '%s/%s' % (self.work_dir, os.path.basename(self.dea_file))
+        macs_file = '%s/%s' % (self.work_dir, os.path.basename(self.macs_file))
+        with self.ssh:
+            self.ssh.run('python %s %s %s' % (deploy_app, dea_file, macs_file))
+
+    def deploy(self):
+
+        self.power_off_nodes()
+
+        self.set_boot_order(['pxe', 'disk'])
+
+        self.power_on_nodes()
+
+        self.get_mac_addresses()
+
+        check_file_exists(self.macs_file)
+
+        self.upload_cloud_deployment_files()
+
+        self.run_cloud_deploy(CLOUD_DEPLOY_FILE)
diff --git a/fuel/deploy/dha.py b/fuel/deploy/dha.py
new file mode 100644 (file)
index 0000000..bf9a951
--- /dev/null
@@ -0,0 +1,19 @@
+import yaml
+import io
+
+from dha_adapters.libvirt_adapter import LibvirtAdapter
+from dha_adapters.ipmi_adapter import IpmiAdapter
+from dha_adapters.hp_adapter import HpAdapter
+
+class DeploymentHardwareAdapter(object):
+    def __new__(cls, yaml_path):
+        with io.open(yaml_path) as yaml_file:
+            dha_struct = yaml.load(yaml_file)
+        type = dha_struct['adapter']
+
+        if cls is DeploymentHardwareAdapter:
+            if type == 'libvirt': return LibvirtAdapter(yaml_path)
+            if type == 'ipmi': return IpmiAdapter(yaml_path)
+            if type == 'hp': return HpAdapter(yaml_path)
+
+        return super(DeploymentHardwareAdapter, cls).__new__(cls)
diff --git a/fuel/deploy/dha_adapters/__init__.py b/fuel/deploy/dha_adapters/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/fuel/deploy/dha_adapters/hardware_adapter.py b/fuel/deploy/dha_adapters/hardware_adapter.py
new file mode 100644 (file)
index 0000000..884e9ce
--- /dev/null
@@ -0,0 +1,51 @@
+import yaml
+import io
+
+class HardwareAdapter(object):
+    def __init__(self, yaml_path):
+        self.dha_struct = None
+        self.parse_yaml(yaml_path)
+
+    def parse_yaml(self, yaml_path):
+        with io.open(yaml_path) as yaml_file:
+            self.dha_struct = yaml.load(yaml_file)
+
+    def get_adapter_type(self):
+        return self.dha_struct['adapter']
+
+    def get_all_node_ids(self):
+        node_ids = []
+        for node in self.dha_struct['nodes']:
+            node_ids.append(node['id'])
+        node_ids.sort()
+        return node_ids
+
+    def get_fuel_node_id(self):
+        for node in self.dha_struct['nodes']:
+            if 'isFuel' in node and node['isFuel']:
+                return node['id']
+
+    def get_node_ids(self):
+        node_ids = []
+        fuel_node_id = self.get_fuel_node_id()
+        for node in self.dha_struct['nodes']:
+            if node['id'] != fuel_node_id:
+                node_ids.append(node['id'])
+        node_ids.sort()
+        return node_ids
+
+    def use_fuel_custom_install(self):
+        return self.dha_struct['fuelCustomInstall']
+
+    def get_node_property(self, node_id, property_name):
+        for node in self.dha_struct['nodes']:
+            if node['id'] == node_id and property_name in node:
+                return node[property_name]
+
+    def node_can_zero_mbr(self, node_id):
+        return self.get_node_property(node_id, 'nodeCanZeroMBR')
+
+    def get_fuel_access(self):
+        for node in self.dha_struct['nodes']:
+            if 'isFuel' in node and node['isFuel']:
+                return node['username'], node['password']
diff --git a/fuel/deploy/dha_adapters/hp_adapter.py b/fuel/deploy/dha_adapters/hp_adapter.py
new file mode 100644 (file)
index 0000000..8fc38ad
--- /dev/null
@@ -0,0 +1,25 @@
+import common
+from ipmi_adapter import IpmiAdapter
+from ssh_client import SSHClient
+
+log = common.log
+
+DEV = {'pxe': 'bootsource5',
+       'disk': 'bootsource3',
+       'iso': 'bootsource1'}
+
+ROOT = '/system1/bootconfig1'
+
+class HpAdapter(IpmiAdapter):
+
+    def __init__(self, yaml_path):
+        super(HpAdapter, self).__init__(yaml_path)
+
+    def node_set_boot_order(self, node_id, boot_order_list):
+        log('Set boot order %s on Node %s' % (boot_order_list, node_id))
+        ip, username, password = self.get_access_info(node_id)
+        ssh = SSHClient(ip, username, password)
+        for order, dev in enumerate(boot_order_list):
+            with ssh as s:
+                s.exec_cmd('set %s/%s bootorder=%s'
+                           % (ROOT, DEV[dev], order+1))
diff --git a/fuel/deploy/dha_adapters/ipmi_adapter.py b/fuel/deploy/dha_adapters/ipmi_adapter.py
new file mode 100644 (file)
index 0000000..d97fd2d
--- /dev/null
@@ -0,0 +1,61 @@
+import common
+from hardware_adapter import HardwareAdapter
+
+log = common.log
+exec_cmd = common.exec_cmd
+
+class IpmiAdapter(HardwareAdapter):
+
+    def __init__(self, yaml_path):
+        super(IpmiAdapter, self).__init__(yaml_path)
+
+    def get_access_info(self, node_id):
+        ip = self.get_node_property(node_id, 'ipmiIp')
+        username = self.get_node_property(node_id, 'ipmiUser')
+        password = self.get_node_property(node_id, 'ipmiPass')
+        return ip, username, password
+
+    def ipmi_cmd(self, node_id):
+        ip, username, password = self.get_access_info(node_id)
+        cmd = 'ipmitool -I lanplus -A password'
+        cmd += ' -H %s -U %s -P %s' % (ip, username, password)
+        return cmd
+
+    def get_node_pxe_mac(self, node_id):
+        mac_list = []
+        mac_list.append(self.get_node_property(node_id, 'pxeMac').lower())
+        return mac_list
+
+    def node_power_on(self, node_id):
+        log('Power ON Node %s' % node_id)
+        cmd_prefix = self.ipmi_cmd(node_id)
+        state = exec_cmd('%s chassis power status' % cmd_prefix)
+        if state == 'Chassis Power is off':
+            exec_cmd('%s chassis power on' % cmd_prefix)
+
+    def node_power_off(self, node_id):
+        log('Power OFF Node %s' % node_id)
+        cmd_prefix = self.ipmi_cmd(node_id)
+        state = exec_cmd('%s chassis power status' % cmd_prefix)
+        if state == 'Chassis Power is on':
+            exec_cmd('%s chassis power off' % cmd_prefix)
+
+    def node_reset(self, node_id):
+        log('Reset Node %s' % node_id)
+        cmd_prefix = self.ipmi_cmd(node_id)
+        state = exec_cmd('%s chassis power status' % cmd_prefix)
+        if state == 'Chassis Power is on':
+            exec_cmd('%s chassis power reset' % cmd_prefix)
+
+    def node_set_boot_order(self, node_id, boot_order_list):
+        log('Set boot order %s on Node %s' % (boot_order_list, node_id))
+        cmd_prefix = self.ipmi_cmd(node_id)
+        for dev in boot_order_list:
+            if dev == 'pxe':
+                exec_cmd('%s chassis bootdev pxe options=persistent'
+                         % cmd_prefix)
+            elif dev == 'iso':
+                exec_cmd('%s chassis bootdev cdrom' % cmd_prefix)
+            elif dev == 'disk':
+                exec_cmd('%s chassis bootdev disk options=persistent'
+                         % cmd_prefix)
diff --git a/fuel/deploy/dha_adapters/libvirt_adapter.py b/fuel/deploy/dha_adapters/libvirt_adapter.py
new file mode 100644 (file)
index 0000000..dde4946
--- /dev/null
@@ -0,0 +1,127 @@
+import common
+from lxml import etree
+from hardware_adapter import HardwareAdapter
+
+log = common.log
+exec_cmd = common.exec_cmd
+err = common.err
+
+DEV = {'pxe': 'network',
+       'disk': 'hd',
+       'iso': 'cdrom'}
+
+class LibvirtAdapter(HardwareAdapter):
+
+    def __init__(self, yaml_path):
+        super(LibvirtAdapter, self).__init__(yaml_path)
+        self.parser = etree.XMLParser(remove_blank_text=True)
+
+    def node_power_off(self, node_id):
+        vm_name = self.get_node_property(node_id, 'libvirtName')
+        log('Power OFF Node %s' % vm_name)
+        state = exec_cmd('virsh domstate %s' % vm_name)
+        if state == 'running':
+            exec_cmd('virsh destroy %s' % vm_name, False)
+
+    def node_power_on(self, node_id):
+        vm_name = self.get_node_property(node_id, 'libvirtName')
+        log('Power ON Node %s' % vm_name)
+        state = exec_cmd('virsh domstate %s' % vm_name)
+        if state == 'shut off':
+            exec_cmd('virsh start %s' % vm_name)
+
+    def node_reset(self, node_id):
+        vm_name = self.get_node_property(node_id, 'libvirtName')
+        log('Reset Node %s' % vm_name)
+        exec_cmd('virsh reset %s' % vm_name)
+
+    def translate(self, boot_order_list):
+        translated = []
+        for boot_dev in boot_order_list:
+            if boot_dev in DEV:
+                translated.append(DEV[boot_dev])
+            else:
+                err('Boot device %s not recognized' % boot_dev)
+        return translated
+
+    def node_set_boot_order(self, node_id, boot_order_list):
+        boot_order_list = self.translate(boot_order_list)
+        vm_name = self.get_node_property(node_id, 'libvirtName')
+        temp_dir = exec_cmd('mktemp -d')
+        log('Set boot order %s on Node %s' % (boot_order_list, vm_name))
+        resp = exec_cmd('virsh dumpxml %s' % vm_name)
+        xml_dump = etree.fromstring(resp, self.parser)
+        os = xml_dump.xpath('/domain/os')
+        for o in os:
+            for bootelem in ['boot', 'bootmenu']:
+                boot = o.xpath(bootelem)
+                for b in boot:
+                    o.remove(b)
+            for dev in boot_order_list:
+                b = etree.Element('boot')
+                b.set('dev', dev)
+                o.append(b)
+            bmenu = etree.Element('bootmenu')
+            bmenu.set('enable', 'no')
+            o.append(bmenu)
+        tree = etree.ElementTree(xml_dump)
+        xml_file = temp_dir + '/%s.xml' % vm_name
+        with open(xml_file, 'w') as f:
+            tree.write(f, pretty_print=True, xml_declaration=True)
+        exec_cmd('virsh define %s' % xml_file)
+        exec_cmd('rm -fr %s' % temp_dir)
+
+    def node_zero_mbr(self, node_id):
+        vm_name = self.get_node_property(node_id, 'libvirtName')
+        resp = exec_cmd('virsh dumpxml %s' % vm_name)
+        xml_dump = etree.fromstring(resp)
+        disks = xml_dump.xpath('/domain/devices/disk')
+        for disk in disks:
+            if disk.get('device') == 'disk':
+                sources = disk.xpath('source')
+                for source in sources:
+                    disk_file = source.get('file')
+                    disk_size = exec_cmd('ls -l %s' % disk_file).split()[4]
+                    exec_cmd('rm -f %s' % disk_file)
+                    exec_cmd('fallocate -l %s %s' % (disk_size, disk_file))
+
+    def node_eject_iso(self, node_id):
+        vm_name = self.get_node_property(node_id, 'libvirtName')
+        device = self.get_name_of_device(vm_name, 'cdrom')
+        exec_cmd('virsh change-media %s --eject %s' % (vm_name, device), False)
+
+    def node_insert_iso(self, node_id, iso_file):
+        vm_name = self.get_node_property(node_id, 'libvirtName')
+        device = self.get_name_of_device(vm_name, 'cdrom')
+        exec_cmd('virsh change-media %s --insert %s %s'
+                 % (vm_name, device, iso_file))
+
+    def get_disks(self):
+        return self.dha_struct['disks']
+
+    def get_node_role(self, node_id):
+        return self.get_node_property(node_id, 'role')
+
+    def get_node_pxe_mac(self, node_id):
+        mac_list = []
+        vm_name = self.get_node_property(node_id, 'libvirtName')
+        resp = exec_cmd('virsh dumpxml %s' % vm_name)
+        xml_dump = etree.fromstring(resp)
+        interfaces = xml_dump.xpath('/domain/devices/interface')
+        for interface in interfaces:
+            macs = interface.xpath('mac')
+            for mac in macs:
+                mac_list.append(mac.get('address').lower())
+        return mac_list
+
+    def get_name_of_device(self, vm_name, device_type):
+        resp = exec_cmd('virsh dumpxml %s' % vm_name)
+        xml_dump = etree.fromstring(resp)
+        disks = xml_dump.xpath('/domain/devices/disk')
+        for disk in disks:
+            if disk.get('device') == device_type:
+                targets = disk.xpath('target')
+                for target in targets:
+                    device = target.get('dev')
+                    if device:
+                        return device
diff --git a/fuel/deploy/functions/common.sh b/fuel/deploy/functions/common.sh
deleted file mode 100755 (executable)
index f6cceb4..0000000
+++ /dev/null
@@ -1,109 +0,0 @@
-##############################################################################
-# Copyright (c) 2015 Ericsson AB and others.
-# stefan.k.berg@ericsson.com
-# jonas.bjurel@ericsson.com
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-# Common functions
-
-error_exit () {
-  echo "Error: $@" >&2
-  exit 1
-}
-
-ssh() {
-  SSHPASS="r00tme" sshpass -e ssh -o UserKnownHostsFile=/dev/null \
-    -o StrictHostKeyChecking=no -o ConnectTimeout=15 "$@"
-}
-
-scp() {
-  SSHPASS="r00tme" sshpass -e scp  -o UserKnownHostsFile=/dev/null \
-    -o StrictHostKeyChecking=no -o ConnectTimeout=15 "$@"
-}
-
-noNodesUp () {
-  fuel node | grep True | wc -l
-}
-
-fuel () {
-  ssh root@10.20.0.2 "fuel $@"
-}
-
-# Return MAC id for virsh node
-getNodeId() {
-  virsh dumpxml $1 | grep "mac address"  | head -1 | sed "s/.*'..:..:..:..:\(.*\)'.*/\1/"
-}
-
-# Wait for node with virtid name to come up
-waitForHost() {
-  mac=`getNodeId $1`
-
-  while true
-  do
-    fuel node --node-id $mac 2>/dev/null | grep -q True && break
-    sleep 3
-    echo -n "."
-  done
-  echo -e "\n"
-}
-
-# Currently not used!
-# Wait for node count to increase
-waitForNode() {
-  local cnt
-  local initCnt
-  local expectCnt
-
-  initCnt=`noNodesUp`
-  expectCnt=$[initCnt+1]
-  while true
-  do
-    cnt=`noNodesUp`
-    if [ $cnt -eq $expectCnt ]; then
-      break
-    elif [ $cnt -lt $initCnt ]; then
-      error_exit "Node count decreased while waiting, $initCnt -> $cnt"
-    elif [ $cnt -gt $expectCnt ]; then
-      error_exit "Node count exceeded expect count, $cnt > $expectCnt"
-    fi
-    sleep 3
-    echo -n "."
-  done
-  echo -e "\n"
-}
-
-bootorder_dvdhd() {
-  virsh dumpxml $1 | grep -v "<boot.*>" | \
-  sed "/<\/os>/i\
-    <boot dev='cdrom'/\>\n\
-    <boot dev='hd'/\>\n\
-    <bootmenu enable='no'/\>" > $tmpdir/vm.xml || error_exit "Could not set bootorder"
-  virsh define $tmpdir/vm.xml || error_exit "Could not set bootorder"
-}
-
-bootorder_hddvd() {
-  virsh dumpxml $1 | grep -v "<boot.*>" | \
-  sed "/<\/os>/i\
-    <boot dev='hd'/\>\n\
-    <boot dev='cdrom'/\>\n\
-    <bootmenu enable='no'/\>" > $tmpdir/vm.xml || error_exit "Could not set bootorder"
-  virsh define $tmpdir/vm.xml || error_exit "Could not set bootorder"
-}
-
-addisofile() {
-  virsh dumpxml $1 | grep -v '\.iso' | sed "s/<.*device='cdrom'.*/<disk type='file' device='cdrom'>/" | \
-    sed "/<.*device='cdrom'.*/a       <source file='$2'/>" > $tmpdir/vm.xml \
-      || error_exit "Could not add isofile"
-  virsh define $tmpdir/vm.xml || error_exit "Could not add isofile"
-}
-
-removeisofile() {
-  virsh dumpxml $1 | grep -v '\.iso' | sed "s/<.*device='cdrom'.*/<disk type='block' device='cdrom'>/" \
-     > $tmpdir/vm.xml \
-      || error_exit "Could not remove isofile"
-  virsh define $tmpdir/vm.xml || error_exit "Could not remove isofile"
-}
diff --git a/fuel/deploy/functions/install_iso.sh b/fuel/deploy/functions/install_iso.sh
deleted file mode 100755 (executable)
index 0a92cd5..0000000
+++ /dev/null
@@ -1,62 +0,0 @@
-##############################################################################
-# Copyright (c) 2015 Ericsson AB and others.
-# stefan.k.berg@ericsson.com
-# jonas.bjurel@ericsson.com
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-# Recreate disk - needed for the reboot to work
-fueldisk=`virsh dumpxml fuel-master | \
-  grep fuel-master.raw | sed "s/.*'\(.*\)'.*/\1/"`
-disksize=`ls -l $fueldisk | awk '{ print $5 }'`
-rm -f $fueldisk
-fallocate -l $disksize $fueldisk
-
-bootorder_hddvd fuel-master
-sleep 3
-addisofile fuel-master $isofile
-sleep 3
-virsh start fuel-master
-
-# wait for node up
-echo "Waiting for Fuel master to accept SSH"
-while true
-do
-  ssh root@10.20.0.2 date 2>/dev/null
-  if [ $? -eq 0 ]; then
-    break
-  fi
-  sleep 10
-done
-
-# Wait until fuelmenu is up
-echo "Waiting for fuelmenu to come up"
-menuPid=""
-while [ -z "$menuPid" ]
-do
-  menuPid=`ssh root@10.20.0.2 "ps -ef" 2>&1 | grep fuelmenu | grep -v grep | awk '{ print $2 }'`
-  sleep 10
-done
-
-# This is where we would inject our own astute.yaml
-
-echo "Found menu as PID $menuPid, now killing it"
-ssh root@10.20.0.2 "kill $menuPid" 2>/dev/null
-
-# Wait until installation complete
-echo "Waiting for bootstrap of Fuel node to complete"
-while true
-do
-  ssh root@10.20.0.2 "ps -ef" 2>/dev/null \
-    | grep -q /usr/local/sbin/bootstrap_admin_node
-  if [ $? -ne 0 ]; then
-    break
-  fi
-  sleep 10
-done
-
-echo "Waiting two minutes for Fuel to stabilize"
-sleep 2m
diff --git a/fuel/deploy/functions/isolinux.cfg.patch b/fuel/deploy/functions/isolinux.cfg.patch
deleted file mode 100644 (file)
index 298a057..0000000
+++ /dev/null
@@ -1,14 +0,0 @@
-*** isolinux/isolinux.cfg.orig 2015-04-15 08:29:52.026868322 -0400
---- isolinux/isolinux.cfg      2015-04-15 08:30:34.350868343 -0400
-***************
-*** 19,22 ****
-    menu label Fuel Install (^Static IP)
-    menu default
-    kernel vmlinuz
-!   append initrd=initrd.img biosdevname=0 ks=cdrom:/ks.cfg ip=10.20.0.2 gw=10.20.0.1 dns1=10.20.0.1 netmask=255.255.255.0 hostname=fuel.domain.tld showmenu=no
---- 19,22 ----
-    menu label Fuel Install (^Static IP)
-    menu default
-    kernel vmlinuz
-!   append initrd=initrd.img biosdevname=0 ks=cdrom:/ks.cfg ip=10.20.0.2 gw=10.20.0.1 dns1=10.20.0.1 netmask=255.255.255.0 hostname=fuel.domain.tld showmenu=yes
-
diff --git a/fuel/deploy/functions/ks.cfg.patch b/fuel/deploy/functions/ks.cfg.patch
deleted file mode 100644 (file)
index 1896957..0000000
+++ /dev/null
@@ -1,19 +0,0 @@
-*** ks.cfg.orig        Wed Apr 15 21:47:09 2015
---- ks.cfg     Wed Apr 15 21:47:24 2015
-***************
-*** 35,41 ****
-  default_drive=`echo ${drives} ${removable_drives} | awk '{print $1}'`
-  
-  installdrive="undefined"
-! forceformat="no"
-  for I in `cat /proc/cmdline`; do case "$I" in *=*) eval $I;; esac ; done
-  
-  set ${drives} ${removable_drives}
---- 35,41 ----
-  default_drive=`echo ${drives} ${removable_drives} | awk '{print $1}'`
-  
-  installdrive="undefined"
-! forceformat="yes"
-  for I in `cat /proc/cmdline`; do case "$I" in *=*) eval $I;; esac ; done
-  
-  set ${drives} ${removable_drives}
diff --git a/fuel/deploy/functions/patch-iso.sh b/fuel/deploy/functions/patch-iso.sh
deleted file mode 100755 (executable)
index 782737e..0000000
+++ /dev/null
@@ -1,69 +0,0 @@
-#!/bin/bash
-##############################################################################
-# Copyright (c) 2015 Ericsson AB and others.
-# stefan.k.berg@ericsson.com
-# jonas.bjurel@ericsson.com
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-# This is a temporary script - this should be rolled into a separate
-# build target "make ci-iso" instead!
-
-exit_handler() {
-  rm -Rf $tmpnewdir
-  fusermount -u $tmporigdir 2>/dev/null
-  test -d $tmporigdir && mdir $tmporigdir
-}
-
-trap exit_handler exit
-
-error_exit() {
-  echo "$@"
-  exit 1
-}
-
-
-top=$(cd `dirname $0`; pwd)
-origiso=$(cd `dirname $1`; echo `pwd`/`basename $1`)
-newiso=$(cd `dirname $2`; echo `pwd`/`basename $2`)
-tmpdir=$3
-tmporigdir=/${tmpdir}/origiso
-tmpnewdir=/${tmpdir}/newiso
-
-test -f $origiso || error_exit "Could not find origiso $origiso"
-test -d $tmpdir || error_exit "Could not find tmpdir $tmpdir"
-
-
-if [ "`whoami`" != "root" ]; then
-  error_exit "You need be root to run this script"
-fi
-
-echo "Copying..."
-rm -Rf $tmporigdir $tmpnewdir
-mkdir -p $tmporigdir $tmpnewdir
-fuseiso $origiso $tmporigdir || error_exit "Failed fuseiso"
-cd $tmporigdir
-find . | cpio -pd $tmpnewdir
-cd $tmpnewdir
-fusermount -u $tmporigdir
-rmdir $tmporigdir
-chmod -R 755 $tmpnewdir
-
-echo "Patching..."
-cd $tmpnewdir
-# Patch ISO to make it suitable for automatic deployment
-cat $top/ks.cfg.patch | patch -p0 || error_exit "Failed patch 1"
-cat $top/isolinux.cfg.patch | patch -p0 || error_exit "Failed patch 2"
-rm -rf .rr_moved
-
-echo "Creating iso $newiso"
-mkisofs -quiet -r  \
-  -J -R -b isolinux/isolinux.bin \
-  -no-emul-boot \
-  -boot-load-size 4 -boot-info-table \
-  --hide-rr-moved \
-  -x "lost+found" -o $newiso . || error_exit "Failed making iso"
-
similarity index 90%
rename from fuel/deploy/setup_vms/setup-vm-host.sh
rename to fuel/deploy/install-ubuntu-packages.sh
index fd469e6..1ebd7c0 100755 (executable)
@@ -13,5 +13,6 @@
 #
 apt-get install -y libvirt-bin qemu-kvm tightvncserver virt-manager \
    sshpass fuseiso genisoimage blackbox xterm python-yaml python-netaddr \
-   python-paramiko python-lxml
-restart libvirt-bin
+   python-paramiko python-lxml python-pip
+pip install scp
+restart libvirt-bin
\ No newline at end of file
diff --git a/fuel/deploy/install_fuel_master.py b/fuel/deploy/install_fuel_master.py
new file mode 100644 (file)
index 0000000..bb8e7e1
--- /dev/null
@@ -0,0 +1,177 @@
+import common
+import time
+import os
+from ssh_client import SSHClient
+from dha_adapters.libvirt_adapter import LibvirtAdapter
+
+log = common.log
+err = common.err
+clean = common.clean
+
+TRANSPLANT_FUEL_SETTINGS = 'transplant_fuel_settings.py'
+BOOTSTRAP_ADMIN = '/usr/local/sbin/bootstrap_admin_node'
+
+class InstallFuelMaster(object):
+
+    def __init__(self, dea_file, dha_file, fuel_ip, fuel_username, fuel_password,
+                 fuel_node_id, iso_file, work_dir):
+        self.dea_file = dea_file
+        self.dha = LibvirtAdapter(dha_file)
+        self.fuel_ip = fuel_ip
+        self.fuel_username = fuel_username
+        self.fuel_password = fuel_password
+        self.fuel_node_id = fuel_node_id
+        self.iso_file = iso_file
+        self.work_dir = work_dir
+        self.file_dir = os.path.dirname(os.path.realpath(__file__))
+        self.ssh = SSHClient(self.fuel_ip, self.fuel_username,
+                             self.fuel_password)
+
+    def install(self):
+        log('Start Fuel Installation')
+
+        self.dha.node_power_off(self.fuel_node_id)
+
+        self.zero_mbr_set_boot_order()
+
+        self.proceed_with_installation()
+
+    def custom_install(self):
+        log('Start Custom Fuel Installation')
+
+        self.dha.node_power_off(self.fuel_node_id)
+
+        log('Zero the MBR')
+        self.dha.node_zero_mbr(self.fuel_node_id)
+
+        self.dha.node_set_boot_order(self.fuel_node_id, ['disk', 'iso'])
+
+        self.proceed_with_installation()
+
+    def proceed_with_installation(self):
+        log('Eject ISO')
+        self.dha.node_eject_iso(self.fuel_node_id)
+
+        log('Insert ISO %s' % self.iso_file)
+        self.dha.node_insert_iso(self.fuel_node_id, self.iso_file)
+
+        self.dha.node_power_on(self.fuel_node_id)
+
+        log('Waiting for Fuel master to accept SSH')
+        self.wait_for_node_up()
+
+        log('Wait until Fuel menu is up')
+        fuel_menu_pid = self.wait_until_fuel_menu_up()
+
+        log('Inject our own astute.yaml settings')
+        self.inject_own_astute_yaml()
+
+        log('Let the Fuel deployment continue')
+        log('Found FUEL menu as PID %s, now killing it' % fuel_menu_pid)
+        self.ssh_exec_cmd('kill %s' % fuel_menu_pid)
+
+        log('Wait until installation complete')
+        self.wait_until_installation_completed()
+
+        log('Waiting for one minute for Fuel to stabilize')
+        time.sleep(60)
+
+        log('Eject ISO')
+        self.dha.node_eject_iso(self.fuel_node_id)
+
+        log('Fuel Master installed successfully !')
+
+    def zero_mbr_set_boot_order(self):
+        if self.dha.node_can_zero_mbr(self.fuel_node_id):
+            log('Fuel Node %s capable of zeroing MBR so doing that...'
+                % self.fuel_node_id)
+            self.dha.node_zero_mbr(self.fuel_node_id)
+            self.dha.node_set_boot_order(self.fuel_node_id, ['disk', 'iso'])
+        elif self.dha.node_can_set_boot_order_live(self.fuel_node_id):
+            log('Node %s can change ISO boot order live' % self.fuel_node_id)
+            self.dha.node_set_boot_order(self.fuel_node_id, ['iso', 'disk'])
+        else:
+            err('No way to install Fuel node')
+
+    def wait_for_node_up(self):
+        WAIT_LOOP = 60
+        SLEEP_TIME = 10
+        success = False
+        for i in range(WAIT_LOOP):
+            try:
+                self.ssh.open()
+                success = True
+                break
+            except Exception as e:
+                log('EXCEPTION [%s] received when SSH-ing into Fuel VM %s ... '
+                    'sleeping %s seconds' % (e, self.fuel_ip, SLEEP_TIME))
+                time.sleep(SLEEP_TIME)
+            finally:
+                self.ssh.close()
+
+        if not success:
+           err('Could not SSH into Fuel VM %s' % self.fuel_ip)
+
+    def wait_until_fuel_menu_up(self):
+        WAIT_LOOP = 60
+        SLEEP_TIME = 10
+        CMD = 'ps -ef'
+        SEARCH = 'fuelmenu'
+        fuel_menu_pid = None
+        with self.ssh:
+            for i in range(WAIT_LOOP):
+                ret = self.ssh.exec_cmd(CMD)
+                fuel_menu_pid = self.get_fuel_menu_pid(ret, SEARCH)
+                if not fuel_menu_pid:
+                    time.sleep(SLEEP_TIME)
+                else:
+                    break
+        if not fuel_menu_pid:
+            err('Could not find the Fuel Menu Process ID')
+        return fuel_menu_pid
+
+    def get_fuel_menu_pid(self, printout, search):
+        fuel_menu_pid = None
+        for line in printout.splitlines():
+            if search in line:
+                fuel_menu_pid = clean(line)[1]
+                break
+        return fuel_menu_pid
+
+    def ssh_exec_cmd(self, cmd):
+        with self.ssh:
+            ret = self.ssh.exec_cmd(cmd)
+        return ret
+
+    def inject_own_astute_yaml(self):
+        dest ='~/%s/' % self.work_dir
+
+        with self.ssh as s:
+            s.exec_cmd('rm -rf %s' % self.work_dir, check=False)
+            s.exec_cmd('mkdir ~/%s' % self.work_dir)
+            s.scp_put(self.dea_file, dest)
+            s.scp_put('%s/common.py' % self.file_dir, dest)
+            s.scp_put('%s/dea.py' % self.file_dir, dest)
+            s.scp_put('%s/transplant_fuel_settings.py' % self.file_dir, dest)
+            log('Modifying Fuel astute')
+            s.run('python ~/%s/%s ~/%s/%s'
+                  % (self.work_dir, TRANSPLANT_FUEL_SETTINGS,
+                     self.work_dir, os.path.basename(self.dea_file)))
+
+    def wait_until_installation_completed(self):
+        WAIT_LOOP = 180
+        SLEEP_TIME = 10
+        CMD = 'ps -ef | grep %s | grep -v grep' % BOOTSTRAP_ADMIN
+
+        install_completed = False
+        with self.ssh:
+            for i in range(WAIT_LOOP):
+                ret = self.ssh.exec_cmd(CMD)
+                if not ret:
+                    install_completed = True
+                    break
+                else:
+                    time.sleep(SLEEP_TIME)
+
+        if not install_completed:
+            err('Fuel installation did not complete')
similarity index 87%
rename from fuel/deploy/dea.yaml
rename to fuel/deploy/libvirt/dea.yaml
index b83ddea..802293f 100644 (file)
----
-name: ENV-1
-shelf:
- - id: 1
-   type: libvirt
-   mgmt_ip: 10.20.0.1
-   username: user
-   password: systemabc
-   blade:
-    - id: 1
-      roles:
-       - controller
-    - id: 2
-      roles:
-       - controller
-    - id: 3
-      roles:
-       - controller
-    - id: 4
-    - id: 5
-    - id: 6
-networks:
-  management_vip: 192.168.0.2
+title: Deployment Environment Adapter (DEA)
+# DEA API version supported
+version: 1.1
+created: Sat Apr 25 16:26:22 UTC 2015
+comment: Small libvirt setup
+environment_name: opnfv59-b
+environment_mode: multinode
+wanted_release: Juno on Ubuntu 12.04.4
+nodes:
+- id: 1
+  interfaces: interface1
+  transformations: controller1
+  role: controller
+- id: 2
+  interfaces: interface1
+  transformations: controller1
+  role: controller
+- id: 3
+  interfaces: interface1
+  transformations: controller1
+  role: controller
+- id: 4
+  interfaces: interface1
+  transformations: compute1
+  role: compute
+- id: 5
+  interfaces: interface1
+  transformations: compute1
+  role: compute
+- id: 6
+  interfaces: interface1
+  transformations: compute1
+  role: compute
+fuel:
+  ADMIN_NETWORK:
+    ipaddress: 10.20.0.2
+    netmask: 255.255.255.0
+    dhcp_pool_start: 10.20.0.3
+    dhcp_pool_end: 10.20.0.254
+  DNS_UPSTREAM: 8.8.8.8
+  DNS_DOMAIN: domain.tld
+  DNS_SEARCH: domain.tld
+  FUEL_ACCESS:
+    user: admin
+    password: admin
+  HOSTNAME: opnfv59
+  NTP1: 0.pool.ntp.org
+  NTP2: 1.pool.ntp.org
+  NTP3: 2.pool.ntp.org
+interfaces:
+  interface1:
+    eth0:
+    - fuelweb_admin
+    - management
+    eth1:
+    - storage
+    eth2:
+    - private
+    eth3:
+    - public
+transformations:
+  controller1:
+    - action: add-br
+      name: br-eth0
+    - action: add-port
+      bridge: br-eth0
+      name: eth0
+    - action: add-br
+      name: br-eth1
+    - action: add-port
+      bridge: br-eth1
+      name: eth1
+    - action: add-br
+      name: br-eth2
+    - action: add-port
+      bridge: br-eth2
+      name: eth2
+    - action: add-br
+      name: br-eth3
+    - action: add-port
+      bridge: br-eth3
+      name: eth3
+    - action: add-br
+      name: br-ex
+    - action: add-br
+      name: br-mgmt
+    - action: add-br
+      name: br-storage
+    - action: add-br
+      name: br-fw-admin
+    - action: add-patch
+      bridges:
+      - br-eth1
+      - br-storage
+      tags:
+      - 102
+      - 0
+      vlan_ids:
+      - 102
+      - 0
+    - action: add-patch
+      bridges:
+      - br-eth0
+      - br-mgmt
+      tags:
+      - 101
+      - 0
+      vlan_ids:
+      - 101
+      - 0
+    - action: add-patch
+      bridges:
+      - br-eth0
+      - br-fw-admin
+      trunks:
+      - 0
+    - action: add-patch
+      bridges:
+      - br-eth3
+      - br-ex
+      trunks:
+      - 0
+    - action: add-br
+      name: br-prv
+    - action: add-patch
+      bridges:
+      - br-eth2
+      - br-prv
+  compute1:
+    - action: add-br
+      name: br-eth0
+    - action: add-port
+      bridge: br-eth0
+      name: eth0
+    - action: add-br
+      name: br-eth1
+    - action: add-port
+      bridge: br-eth1
+      name: eth1
+    - action: add-br
+      name: br-eth2
+    - action: add-port
+      bridge: br-eth2
+      name: eth2
+    - action: add-br
+      name: br-eth3
+    - action: add-port
+      bridge: br-eth3
+      name: eth3
+    - action: add-br
+      name: br-mgmt
+    - action: add-br
+      name: br-storage
+    - action: add-br
+      name: br-fw-admin
+    - action: add-patch
+      bridges:
+      - br-eth1
+      - br-storage
+      tags:
+      - 102
+      - 0
+      vlan_ids:
+      - 102
+      - 0
+    - action: add-patch
+      bridges:
+      - br-eth0
+      - br-mgmt
+      tags:
+      - 101
+      - 0
+      vlan_ids:
+      - 101
+      - 0
+    - action: add-patch
+      bridges:
+      - br-eth0
+      - br-fw-admin
+      trunks:
+      - 0
+    - action: add-br
+      name: br-prv
+    - action: add-patch
+      bridges:
+      - br-eth2
+      - br-prv
+opnfv:
+  compute: {}
+  controller: {}
+network:
   networking_parameters:
     base_mac: fa:16:3e:00:00:00
     dns_nameservers:
@@ -38,7 +206,7 @@ networks:
     segmentation_type: vlan
     vlan_range:
     - 1000
-    - 1200
+    - 1030
   networks:
   - cidr: 172.16.0.0/24
     gateway: 172.16.0.1
@@ -62,27 +230,10 @@ networks:
       vlan_start: null
     name: public
     vlan_start: null
-  - cidr: null
-    gateway: null
-    ip_ranges: []
-    meta:
-      assign_vip: false
-      configurable: false
-      map_priority: 2
-      name: private
-      neutron_vlan_range: true
-      notation: null
-      render_addr_mask: null
-      render_type: null
-      seg_type: vlan
-      use_gateway: false
-      vlan_start: null
-    name: private
-    vlan_start: null
   - cidr: 192.168.0.0/24
     gateway: null
     ip_ranges:
-    - - 192.168.0.2
+    - - 192.168.0.1
       - 192.168.0.254
     meta:
       assign_vip: true
@@ -100,7 +251,7 @@ networks:
   - cidr: 192.168.1.0/24
     gateway: null
     ip_ranges:
-    - - 192.168.1.2
+    - - 192.168.1.1
       - 192.168.1.254
     meta:
       assign_vip: false
@@ -115,6 +266,23 @@ networks:
       vlan_start: 102
     name: storage
     vlan_start: 102
+  - cidr: null
+    gateway: null
+    ip_ranges: []
+    meta:
+      assign_vip: false
+      configurable: false
+      map_priority: 2
+      name: private
+      neutron_vlan_range: true
+      notation: null
+      render_addr_mask: null
+      render_type: null
+      seg_type: vlan
+      use_gateway: false
+      vlan_start: null
+    name: private
+    vlan_start: null
   - cidr: 10.20.0.0/24
     gateway: null
     ip_ranges:
@@ -131,143 +299,6 @@ networks:
       use_gateway: true
     name: fuelweb_admin
     vlan_start: null
-  public_vip: 172.16.0.2
-controller:
-- action: add-br
-  name: br-eth0
-- action: add-port
-  bridge: br-eth0
-  name: eth0
-- action: add-br
-  name: br-eth1
-- action: add-port
-  bridge: br-eth1
-  name: eth1
-- action: add-br
-  name: br-eth2
-- action: add-port
-  bridge: br-eth2
-  name: eth2
-- action: add-br
-  name: br-eth3
-- action: add-port
-  bridge: br-eth3
-  name: eth3
-- action: add-br
-  name: br-ex
-- action: add-br
-  name: br-mgmt
-- action: add-br
-  name: br-storage
-- action: add-br
-  name: br-fw-admin
-- action: add-patch
-  bridges:
-  - br-eth1
-  - br-storage
-  tags:
-  - 102
-  - 0
-  vlan_ids:
-  - 102
-  - 0
-- action: add-patch
-  bridges:
-  - br-eth0
-  - br-mgmt
-  tags:
-  - 101
-  - 0
-  vlan_ids:
-  - 101
-  - 0
-- action: add-patch
-  bridges:
-  - br-eth0
-  - br-fw-admin
-  trunks:
-  - 0
-- action: add-patch
-  bridges:
-  - br-eth3
-  - br-ex
-  trunks:
-  - 0
-- action: add-br
-  name: br-prv
-- action: add-patch
-  bridges:
-  - br-eth2
-  - br-prv
-compute:
-- action: add-br
-  name: br-eth0
-- action: add-port
-  bridge: br-eth0
-  name: eth0
-- action: add-br
-  name: br-eth1
-- action: add-port
-  bridge: br-eth1
-  name: eth1
-- action: add-br
-  name: br-eth2
-- action: add-port
-  bridge: br-eth2
-  name: eth2
-- action: add-br
-  name: br-eth3
-- action: add-port
-  bridge: br-eth3
-  name: eth3
-- action: add-br
-  name: br-mgmt
-- action: add-br
-  name: br-storage
-- action: add-br
-  name: br-fw-admin
-- action: add-patch
-  bridges:
-  - br-eth1
-  - br-storage
-  tags:
-  - 102
-  - 0
-  vlan_ids:
-  - 102
-  - 0
-- action: add-patch
-  bridges:
-  - br-eth0
-  - br-mgmt
-  tags:
-  - 101
-  - 0
-  vlan_ids:
-  - 101
-  - 0
-- action: add-patch
-  bridges:
-  - br-eth0
-  - br-fw-admin
-  trunks:
-  - 0
-- action: add-br
-  name: br-prv
-- action: add-patch
-  bridges:
-  - br-eth2
-  - br-prv
-interfaces:
-  eth0:
-  - fuelweb_admin
-  - management
-  eth1:
-  - storage
-  eth2:
-  - private
-  eth3:
-  - public
 settings:
   editable:
     access:
@@ -309,7 +340,7 @@ settings:
         description: If selected, Ceilometer component will be installed
         label: Install Ceilometer
         type: checkbox
-        value: true
+        value: false
         weight: 40
       heat:
         description: ''
@@ -424,7 +455,7 @@ settings:
           will not be attempted.
         label: Resume guests state on host boot
         type: checkbox
-        value: false
+        value: true
         weight: 60
       use_cow_images:
         description: For most cases you will want qcow format. If it's disabled, raw
@@ -810,7 +841,7 @@ settings:
         restrictions:
         - settings:storage.volumes_ceph.value == true
         type: checkbox
-        value: false
+        value: true
         weight: 10
       volumes_vmdk:
         description: Configures Cinder to store volumes via VMware vCenter.
@@ -902,7 +933,7 @@ settings:
           error: Empty password
           source: \S
         type: password
-        value: ''
+        value: admin
         weight: 30
       vc_user:
         description: vCenter admin username
@@ -911,7 +942,7 @@ settings:
           error: Empty username
           source: \S
         type: text
-        value: ''
+        value: admin
         weight: 20
       vlan_interface:
         description: Physical ESXi host ethernet adapter for VLAN networking (e.g.
@@ -943,5 +974,3 @@ settings:
         type: text
         value: admin
         weight: 10
-...
-
diff --git a/fuel/deploy/libvirt/dha.yaml b/fuel/deploy/libvirt/dha.yaml
new file mode 100644 (file)
index 0000000..ce61e53
--- /dev/null
@@ -0,0 +1,80 @@
+title: Deployment Hardware Adapter (DHA)
+# DHA API version supported
+version: 1.1
+created: Sat Apr 25 16:26:22 UTC 2015
+comment: Small libvirt setup
+
+# Adapter to use for this definition
+adapter: libvirt
+
+# Node list.
+# Mandatory fields are id and role.
+# The MAC address of the PXE boot interface is not mandatory
+#   to be set, but the field must be present.
+# All other fields are adapter specific.
+
+nodes:
+- id: 1
+  pxeMac: 52:54:00:aa:dd:84
+  libvirtName: controller1
+  libvirtTemplate: controller
+  role: controller
+- id: 2
+  pxeMac: 52:54:00:aa:dd:84
+  libvirtName: controller2
+  libvirtTemplate: controller
+  role: controller
+- id: 3
+  pxeMac: 52:54:00:aa:dd:84
+  libvirtName: controller3
+  libvirtTemplate: controller
+  role: controller
+- id: 4
+  pxeMac: 52:54:00:41:64:f3
+  libvirtName: compute1
+  libvirtTemplate: compute
+  role: compute
+- id: 5
+  pxeMac: 52:54:00:69:a0:79
+  libvirtName: compute2
+  libvirtTemplate: compute
+  role: compute
+- id: 6
+  pxeMac: 52:54:00:69:a0:79
+  libvirtName: compute3
+  libvirtTemplate: compute
+  role: compute
+- id: 7
+  pxeMac: 52:54:00:f8:b0:75
+  libvirtName: fuel-master
+  libvirtTemplate: fuel-master
+  isFuel: yes
+  nodeCanZeroMBR: yes
+  nodeCanSetBootOrderLive: yes
+  username: root
+  password: r00tme
+
+disks:
+  fuel: 30G
+  controller: 30G
+  compute: 30G
+
+# Deployment power on strategy
+# all:      Turn on all nodes at once. There will be no correlation
+#           between the DHA and DEA node numbering. MAC addresses
+#           will be used to select the node roles though.
+# sequence: Turn on the nodes in sequence starting with the lowest order
+#           node and wait for the node to be detected by Fuel. Not until
+#           the node has been detected and assigned a role will the next
+#           node be turned on.
+powerOnStrategy: all
+
+# If fuelCustomInstall is set to true, Fuel is assumed to be installed by
+# calling the DHA adapter function "dha_fuelCustomInstall()"  with two
+# arguments: node ID and the ISO file name to deploy. The custom install
+# function is then to handle all necessary logic to boot the Fuel master
+# from the ISO and then return.
+# Allowed values: true, false
+
+fuelCustomInstall: false
+
similarity index 78%
rename from fuel/deploy/libvirt/vms/s1_b4
rename to fuel/deploy/libvirt/vms/compute
index 97384ba..7591509 100644 (file)
@@ -1,5 +1,5 @@
 <domain type='kvm'>
-  <name>s1_b4</name>
+  <name>compute4</name>
   <memory unit='KiB'>8388608</memory>
   <currentMemory unit='KiB'>8388608</currentMemory>
   <vcpu placement='static'>2</vcpu>
       <driver name='qemu' type='raw'/>
       <source file='disk.raw'/>
       <target dev='vda' bus='virtio'/>
-      <address type='pci' domain='0x0000' bus='0x00' slot='0x07' function='0x0'/>
     </disk>
     <controller type='usb' index='0'>
-      <address type='pci' domain='0x0000' bus='0x00' slot='0x01' function='0x2'/>
     </controller>
     <controller type='pci' index='0' model='pci-root'/>
     <interface type='network'>
       <source network='fuel1'/>
       <model type='virtio'/>
-      <address type='pci' domain='0x0000' bus='0x00' slot='0x03' function='0x0'/>
     </interface>
     <interface type='network'>
       <source network='fuel2'/>
       <model type='virtio'/>
-      <address type='pci' domain='0x0000' bus='0x00' slot='0x04' function='0x0'/>
     </interface>
     <interface type='network'>
       <source network='fuel3'/>
       <model type='virtio'/>
-      <address type='pci' domain='0x0000' bus='0x00' slot='0x05' function='0x0'/>
     </interface>
     <interface type='network'>
       <source network='fuel4'/>
       <model type='virtio'/>
-      <address type='pci' domain='0x0000' bus='0x00' slot='0x09' function='0x0'/>
     </interface>
     <serial type='pty'>
       <target port='0'/>
     <input type='keyboard' bus='ps2'/>
     <graphics type='vnc' port='-1' autoport='yes' keymap='sv'/>
     <sound model='ich6'>
-      <address type='pci' domain='0x0000' bus='0x00' slot='0x06' function='0x0'/>
     </sound>
     <video>
       <model type='cirrus' vram='9216' heads='1'/>
-      <address type='pci' domain='0x0000' bus='0x00' slot='0x02' function='0x0'/>
     </video>
     <memballoon model='virtio'>
-      <address type='pci' domain='0x0000' bus='0x00' slot='0x08' function='0x0'/>
     </memballoon>
   </devices>
 </domain>
-
similarity index 78%
rename from fuel/deploy/libvirt/vms/s1_b1
rename to fuel/deploy/libvirt/vms/controller
index a879163..a871262 100644 (file)
@@ -1,5 +1,5 @@
 <domain type='kvm'>
-  <name>s1_b1</name>
+  <name>controller1</name>
   <memory unit='KiB'>2097152</memory>
   <currentMemory unit='KiB'>2097152</currentMemory>
   <vcpu placement='static'>2</vcpu>
       <driver name='qemu' type='raw'/>
       <source file='disk.raw'/>
       <target dev='vda' bus='virtio'/>
-      <address type='pci' domain='0x0000' bus='0x00' slot='0x07' function='0x0'/>
     </disk>
     <controller type='usb' index='0'>
-      <address type='pci' domain='0x0000' bus='0x00' slot='0x01' function='0x2'/>
     </controller>
     <controller type='pci' index='0' model='pci-root'/>
     <interface type='network'>
       <source network='fuel1'/>
       <model type='virtio'/>
-      <address type='pci' domain='0x0000' bus='0x00' slot='0x03' function='0x0'/>
     </interface>
     <interface type='network'>
       <source network='fuel2'/>
       <model type='virtio'/>
-      <address type='pci' domain='0x0000' bus='0x00' slot='0x04' function='0x0'/>
     </interface>
     <interface type='network'>
       <source network='fuel3'/>
       <model type='virtio'/>
-      <address type='pci' domain='0x0000' bus='0x00' slot='0x05' function='0x0'/>
     </interface>
     <interface type='network'>
       <source network='fuel4'/>
       <model type='virtio'/>
-      <address type='pci' domain='0x0000' bus='0x00' slot='0x09' function='0x0'/>
     </interface>
     <serial type='pty'>
       <target port='0'/>
     <input type='keyboard' bus='ps2'/>
     <graphics type='vnc' port='-1' autoport='yes' keymap='sv'/>
     <sound model='ich6'>
-      <address type='pci' domain='0x0000' bus='0x00' slot='0x06' function='0x0'/>
     </sound>
     <video>
       <model type='cirrus' vram='9216' heads='1'/>
-      <address type='pci' domain='0x0000' bus='0x00' slot='0x02' function='0x0'/>
     </video>
     <memballoon model='virtio'>
-      <address type='pci' domain='0x0000' bus='0x00' slot='0x08' function='0x0'/>
     </memballoon>
   </devices>
 </domain>
-
index 1b2d86f..f4e652b 100644 (file)
@@ -3,11 +3,14 @@
   <memory unit='KiB'>2097152</memory>
   <currentMemory unit='KiB'>2097152</currentMemory>
   <vcpu placement='static'>2</vcpu>
+  <resource>
+    <partition>/machine</partition>
+  </resource>
   <os>
     <type arch='x86_64' machine='pc-1.0'>hvm</type>
     <boot dev='hd'/>
     <boot dev='cdrom'/>
-    <bootmenu enable='yes'/>
+    <bootmenu enable='no'/>
   </os>
   <features>
     <acpi/>
   <on_crash>restart</on_crash>
   <devices>
     <emulator>/usr/bin/kvm</emulator>
-    <disk type='file' device='cdrom'>
+    <disk type='block' device='cdrom'>
       <driver name='qemu' type='raw'/>
       <target dev='hdc' bus='ide'/>
       <readonly/>
-      <address type='drive' controller='0' bus='1' target='0' unit='0'/>
     </disk>
     <disk type='file' device='disk'>
       <driver name='qemu' type='raw'/>
       <source file='disk.raw'/>
       <target dev='vda' bus='virtio'/>
-      <address type='pci' domain='0x0000' bus='0x00' slot='0x07' function='0x0'/>
     </disk>
     <controller type='ide' index='0'>
-      <address type='pci' domain='0x0000' bus='0x00' slot='0x01' function='0x1'/>
     </controller>
     <controller type='usb' index='0'>
-      <address type='pci' domain='0x0000' bus='0x00' slot='0x01' function='0x2'/>
     </controller>
     <controller type='pci' index='0' model='pci-root'/>
     <interface type='network'>
       <source network='fuel1'/>
       <model type='virtio'/>
-      <address type='pci' domain='0x0000' bus='0x00' slot='0x04' function='0x0'/>
     </interface>
     <serial type='pty'>
       <target port='0'/>
     </console>
     <input type='mouse' bus='ps2'/>
     <input type='keyboard' bus='ps2'/>
-    <graphics type='vnc' port='-1' autoport='yes' keymap='sv'/>
+    <graphics type='vnc' port='-1' autoport='yes' listen='127.0.0.1' keymap='sv'>
+      <listen type='address' address='127.0.0.1'/>
+    </graphics>
     <sound model='ich6'>
-      <address type='pci' domain='0x0000' bus='0x00' slot='0x06' function='0x0'/>
     </sound>
     <video>
       <model type='cirrus' vram='9216' heads='1'/>
-      <address type='pci' domain='0x0000' bus='0x00' slot='0x02' function='0x0'/>
     </video>
     <memballoon model='virtio'>
-      <address type='pci' domain='0x0000' bus='0x00' slot='0x08' function='0x0'/>
     </memballoon>
   </devices>
+  <seclabel type='dynamic' model='apparmor' relabel='yes'/>
 </domain>
 
diff --git a/fuel/deploy/libvirt/vms/s1_b2 b/fuel/deploy/libvirt/vms/s1_b2
deleted file mode 100644 (file)
index 27eebcf..0000000
+++ /dev/null
@@ -1,100 +0,0 @@
-<domain type='kvm'>
-  <name>s1_b2</name>
-  <memory unit='KiB'>2097152</memory>
-  <currentMemory unit='KiB'>2097152</currentMemory>
-  <vcpu placement='static'>2</vcpu>
-  <os>
-    <type arch='x86_64' machine='pc-1.0'>hvm</type>
-    <boot dev='network'/>
-    <boot dev='hd'/>
-  </os>
-  <features>
-    <acpi/>
-    <apic/>
-    <pae/>
-  </features>
-  <cpu mode='custom' match='exact'>
-    <model fallback='allow'>SandyBridge</model>
-    <vendor>Intel</vendor>
-    <feature policy='require' name='vme'/>
-    <feature policy='require' name='dtes64'/>
-    <feature policy='require' name='vmx'/>
-    <feature policy='require' name='erms'/>
-    <feature policy='require' name='xtpr'/>
-    <feature policy='require' name='smep'/>
-    <feature policy='require' name='pcid'/>
-    <feature policy='require' name='est'/>
-    <feature policy='require' name='monitor'/>
-    <feature policy='require' name='smx'/>
-    <feature policy='require' name='tm'/>
-    <feature policy='require' name='acpi'/>
-    <feature policy='require' name='osxsave'/>
-    <feature policy='require' name='ht'/>
-    <feature policy='require' name='pdcm'/>
-    <feature policy='require' name='fsgsbase'/>
-    <feature policy='require' name='f16c'/>
-    <feature policy='require' name='ds'/>
-    <feature policy='require' name='tm2'/>
-    <feature policy='require' name='ss'/>
-    <feature policy='require' name='pbe'/>
-    <feature policy='require' name='ds_cpl'/>
-    <feature policy='require' name='rdrand'/>
-  </cpu>
-  <clock offset='utc'/>
-  <on_poweroff>destroy</on_poweroff>
-  <on_reboot>restart</on_reboot>
-  <on_crash>restart</on_crash>
-  <devices>
-    <emulator>/usr/bin/kvm</emulator>
-    <disk type='file' device='disk'>
-      <driver name='qemu' type='raw'/>
-      <source file='disk.raw'/>
-      <target dev='vda' bus='virtio'/>
-      <address type='pci' domain='0x0000' bus='0x00' slot='0x07' function='0x0'/>
-    </disk>
-    <controller type='usb' index='0'>
-      <address type='pci' domain='0x0000' bus='0x00' slot='0x01' function='0x2'/>
-    </controller>
-    <controller type='pci' index='0' model='pci-root'/>
-    <interface type='network'>
-      <source network='fuel1'/>
-      <model type='virtio'/>
-      <address type='pci' domain='0x0000' bus='0x00' slot='0x03' function='0x0'/>
-    </interface>
-    <interface type='network'>
-      <source network='fuel2'/>
-      <model type='virtio'/>
-      <address type='pci' domain='0x0000' bus='0x00' slot='0x04' function='0x0'/>
-    </interface>
-    <interface type='network'>
-      <source network='fuel3'/>
-      <model type='virtio'/>
-      <address type='pci' domain='0x0000' bus='0x00' slot='0x05' function='0x0'/>
-    </interface>
-    <interface type='network'>
-      <source network='fuel4'/>
-      <model type='virtio'/>
-      <address type='pci' domain='0x0000' bus='0x00' slot='0x09' function='0x0'/>
-    </interface>
-    <serial type='pty'>
-      <target port='0'/>
-    </serial>
-    <console type='pty'>
-      <target type='serial' port='0'/>
-    </console>
-    <input type='mouse' bus='ps2'/>
-    <input type='keyboard' bus='ps2'/>
-    <graphics type='vnc' port='-1' autoport='yes' keymap='sv'/>
-    <sound model='ich6'>
-      <address type='pci' domain='0x0000' bus='0x00' slot='0x06' function='0x0'/>
-    </sound>
-    <video>
-      <model type='cirrus' vram='9216' heads='1'/>
-      <address type='pci' domain='0x0000' bus='0x00' slot='0x02' function='0x0'/>
-    </video>
-    <memballoon model='virtio'>
-      <address type='pci' domain='0x0000' bus='0x00' slot='0x08' function='0x0'/>
-    </memballoon>
-  </devices>
-</domain>
-
diff --git a/fuel/deploy/libvirt/vms/s1_b3 b/fuel/deploy/libvirt/vms/s1_b3
deleted file mode 100644 (file)
index 37a4d2f..0000000
+++ /dev/null
@@ -1,100 +0,0 @@
-<domain type='kvm'>
-  <name>s1_b3</name>
-  <memory unit='KiB'>2097152</memory>
-  <currentMemory unit='KiB'>2097152</currentMemory>
-  <vcpu placement='static'>2</vcpu>
-  <os>
-    <type arch='x86_64' machine='pc-1.0'>hvm</type>
-    <boot dev='network'/>
-    <boot dev='hd'/>
-  </os>
-  <features>
-    <acpi/>
-    <apic/>
-    <pae/>
-  </features>
-  <cpu mode='custom' match='exact'>
-    <model fallback='allow'>SandyBridge</model>
-    <vendor>Intel</vendor>
-    <feature policy='require' name='vme'/>
-    <feature policy='require' name='dtes64'/>
-    <feature policy='require' name='vmx'/>
-    <feature policy='require' name='erms'/>
-    <feature policy='require' name='xtpr'/>
-    <feature policy='require' name='smep'/>
-    <feature policy='require' name='pcid'/>
-    <feature policy='require' name='est'/>
-    <feature policy='require' name='monitor'/>
-    <feature policy='require' name='smx'/>
-    <feature policy='require' name='tm'/>
-    <feature policy='require' name='acpi'/>
-    <feature policy='require' name='osxsave'/>
-    <feature policy='require' name='ht'/>
-    <feature policy='require' name='pdcm'/>
-    <feature policy='require' name='fsgsbase'/>
-    <feature policy='require' name='f16c'/>
-    <feature policy='require' name='ds'/>
-    <feature policy='require' name='tm2'/>
-    <feature policy='require' name='ss'/>
-    <feature policy='require' name='pbe'/>
-    <feature policy='require' name='ds_cpl'/>
-    <feature policy='require' name='rdrand'/>
-  </cpu>
-  <clock offset='utc'/>
-  <on_poweroff>destroy</on_poweroff>
-  <on_reboot>restart</on_reboot>
-  <on_crash>restart</on_crash>
-  <devices>
-    <emulator>/usr/bin/kvm</emulator>
-    <disk type='file' device='disk'>
-      <driver name='qemu' type='raw'/>
-      <source file='disk.raw'/>
-      <target dev='vda' bus='virtio'/>
-      <address type='pci' domain='0x0000' bus='0x00' slot='0x07' function='0x0'/>
-    </disk>
-    <controller type='usb' index='0'>
-      <address type='pci' domain='0x0000' bus='0x00' slot='0x01' function='0x2'/>
-    </controller>
-    <controller type='pci' index='0' model='pci-root'/>
-    <interface type='network'>
-      <source network='fuel1'/>
-      <model type='virtio'/>
-      <address type='pci' domain='0x0000' bus='0x00' slot='0x03' function='0x0'/>
-    </interface>
-    <interface type='network'>
-      <source network='fuel2'/>
-      <model type='virtio'/>
-      <address type='pci' domain='0x0000' bus='0x00' slot='0x04' function='0x0'/>
-    </interface>
-    <interface type='network'>
-      <source network='fuel3'/>
-      <model type='virtio'/>
-      <address type='pci' domain='0x0000' bus='0x00' slot='0x05' function='0x0'/>
-    </interface>
-    <interface type='network'>
-      <source network='fuel4'/>
-      <model type='virtio'/>
-      <address type='pci' domain='0x0000' bus='0x00' slot='0x09' function='0x0'/>
-    </interface>
-    <serial type='pty'>
-      <target port='0'/>
-    </serial>
-    <console type='pty'>
-      <target type='serial' port='0'/>
-    </console>
-    <input type='mouse' bus='ps2'/>
-    <input type='keyboard' bus='ps2'/>
-    <graphics type='vnc' port='-1' autoport='yes' keymap='sv'/>
-    <sound model='ich6'>
-      <address type='pci' domain='0x0000' bus='0x00' slot='0x06' function='0x0'/>
-    </sound>
-    <video>
-      <model type='cirrus' vram='9216' heads='1'/>
-      <address type='pci' domain='0x0000' bus='0x00' slot='0x02' function='0x0'/>
-    </video>
-    <memballoon model='virtio'>
-      <address type='pci' domain='0x0000' bus='0x00' slot='0x08' function='0x0'/>
-    </memballoon>
-  </devices>
-</domain>
-
diff --git a/fuel/deploy/libvirt/vms/s1_b5 b/fuel/deploy/libvirt/vms/s1_b5
deleted file mode 100644 (file)
index 97218c3..0000000
+++ /dev/null
@@ -1,100 +0,0 @@
-<domain type='kvm'>
-  <name>s1_b5</name>
-  <memory unit='KiB'>8388608</memory>
-  <currentMemory unit='KiB'>8388608</currentMemory>
-  <vcpu placement='static'>2</vcpu>
-  <os>
-    <type arch='x86_64' machine='pc-1.0'>hvm</type>
-    <boot dev='network'/>
-    <boot dev='hd'/>
-  </os>
-  <features>
-    <acpi/>
-    <apic/>
-    <pae/>
-  </features>
-  <cpu mode='custom' match='exact'>
-    <model fallback='allow'>SandyBridge</model>
-    <vendor>Intel</vendor>
-    <feature policy='require' name='vme'/>
-    <feature policy='require' name='dtes64'/>
-    <feature policy='require' name='vmx'/>
-    <feature policy='require' name='erms'/>
-    <feature policy='require' name='xtpr'/>
-    <feature policy='require' name='smep'/>
-    <feature policy='require' name='pcid'/>
-    <feature policy='require' name='est'/>
-    <feature policy='require' name='monitor'/>
-    <feature policy='require' name='smx'/>
-    <feature policy='require' name='tm'/>
-    <feature policy='require' name='acpi'/>
-    <feature policy='require' name='osxsave'/>
-    <feature policy='require' name='ht'/>
-    <feature policy='require' name='pdcm'/>
-    <feature policy='require' name='fsgsbase'/>
-    <feature policy='require' name='f16c'/>
-    <feature policy='require' name='ds'/>
-    <feature policy='require' name='tm2'/>
-    <feature policy='require' name='ss'/>
-    <feature policy='require' name='pbe'/>
-    <feature policy='require' name='ds_cpl'/>
-    <feature policy='require' name='rdrand'/>
-  </cpu>
-  <clock offset='utc'/>
-  <on_poweroff>destroy</on_poweroff>
-  <on_reboot>restart</on_reboot>
-  <on_crash>restart</on_crash>
-  <devices>
-    <emulator>/usr/bin/kvm</emulator>
-    <disk type='file' device='disk'>
-      <driver name='qemu' type='raw'/>
-      <source file='disk.raw'/>
-      <target dev='vda' bus='virtio'/>
-      <address type='pci' domain='0x0000' bus='0x00' slot='0x07' function='0x0'/>
-    </disk>
-    <controller type='usb' index='0'>
-      <address type='pci' domain='0x0000' bus='0x00' slot='0x01' function='0x2'/>
-    </controller>
-    <controller type='pci' index='0' model='pci-root'/>
-    <interface type='network'>
-      <source network='fuel1'/>
-      <model type='virtio'/>
-      <address type='pci' domain='0x0000' bus='0x00' slot='0x03' function='0x0'/>
-    </interface>
-    <interface type='network'>
-      <source network='fuel2'/>
-      <model type='virtio'/>
-      <address type='pci' domain='0x0000' bus='0x00' slot='0x04' function='0x0'/>
-    </interface>
-    <interface type='network'>
-      <source network='fuel3'/>
-      <model type='virtio'/>
-      <address type='pci' domain='0x0000' bus='0x00' slot='0x05' function='0x0'/>
-    </interface>
-    <interface type='network'>
-      <source network='fuel4'/>
-      <model type='virtio'/>
-      <address type='pci' domain='0x0000' bus='0x00' slot='0x09' function='0x0'/>
-    </interface>
-    <serial type='pty'>
-      <target port='0'/>
-    </serial>
-    <console type='pty'>
-      <target type='serial' port='0'/>
-    </console>
-    <input type='mouse' bus='ps2'/>
-    <input type='keyboard' bus='ps2'/>
-    <graphics type='vnc' port='-1' autoport='yes' keymap='sv'/>
-    <sound model='ich6'>
-      <address type='pci' domain='0x0000' bus='0x00' slot='0x06' function='0x0'/>
-    </sound>
-    <video>
-      <model type='cirrus' vram='9216' heads='1'/>
-      <address type='pci' domain='0x0000' bus='0x00' slot='0x02' function='0x0'/>
-    </video>
-    <memballoon model='virtio'>
-      <address type='pci' domain='0x0000' bus='0x00' slot='0x08' function='0x0'/>
-    </memballoon>
-  </devices>
-</domain>
-
diff --git a/fuel/deploy/libvirt/vms/s1_b6 b/fuel/deploy/libvirt/vms/s1_b6
deleted file mode 100644 (file)
index 0cd3028..0000000
+++ /dev/null
@@ -1,100 +0,0 @@
-<domain type='kvm'>
-  <name>s1_b6</name>
-  <memory unit='KiB'>8388608</memory>
-  <currentMemory unit='KiB'>8388608</currentMemory>
-  <vcpu placement='static'>2</vcpu>
-  <os>
-    <type arch='x86_64' machine='pc-1.0'>hvm</type>
-    <boot dev='network'/>
-    <boot dev='hd'/>
-  </os>
-  <features>
-    <acpi/>
-    <apic/>
-    <pae/>
-  </features>
-  <cpu mode='custom' match='exact'>
-    <model fallback='allow'>SandyBridge</model>
-    <vendor>Intel</vendor>
-    <feature policy='require' name='vme'/>
-    <feature policy='require' name='dtes64'/>
-    <feature policy='require' name='vmx'/>
-    <feature policy='require' name='erms'/>
-    <feature policy='require' name='xtpr'/>
-    <feature policy='require' name='smep'/>
-    <feature policy='require' name='pcid'/>
-    <feature policy='require' name='est'/>
-    <feature policy='require' name='monitor'/>
-    <feature policy='require' name='smx'/>
-    <feature policy='require' name='tm'/>
-    <feature policy='require' name='acpi'/>
-    <feature policy='require' name='osxsave'/>
-    <feature policy='require' name='ht'/>
-    <feature policy='require' name='pdcm'/>
-    <feature policy='require' name='fsgsbase'/>
-    <feature policy='require' name='f16c'/>
-    <feature policy='require' name='ds'/>
-    <feature policy='require' name='tm2'/>
-    <feature policy='require' name='ss'/>
-    <feature policy='require' name='pbe'/>
-    <feature policy='require' name='ds_cpl'/>
-    <feature policy='require' name='rdrand'/>
-  </cpu>
-  <clock offset='utc'/>
-  <on_poweroff>destroy</on_poweroff>
-  <on_reboot>restart</on_reboot>
-  <on_crash>restart</on_crash>
-  <devices>
-    <emulator>/usr/bin/kvm</emulator>
-    <disk type='file' device='disk'>
-      <driver name='qemu' type='raw'/>
-      <source file='disk.raw'/>
-      <target dev='vda' bus='virtio'/>
-      <address type='pci' domain='0x0000' bus='0x00' slot='0x07' function='0x0'/>
-    </disk>
-    <controller type='usb' index='0'>
-      <address type='pci' domain='0x0000' bus='0x00' slot='0x01' function='0x2'/>
-    </controller>
-    <controller type='pci' index='0' model='pci-root'/>
-    <interface type='network'>
-      <source network='fuel1'/>
-      <model type='virtio'/>
-      <address type='pci' domain='0x0000' bus='0x00' slot='0x03' function='0x0'/>
-    </interface>
-    <interface type='network'>
-      <source network='fuel2'/>
-      <model type='virtio'/>
-      <address type='pci' domain='0x0000' bus='0x00' slot='0x04' function='0x0'/>
-    </interface>
-    <interface type='network'>
-      <source network='fuel3'/>
-      <model type='virtio'/>
-      <address type='pci' domain='0x0000' bus='0x00' slot='0x05' function='0x0'/>
-    </interface>
-    <interface type='network'>
-      <source network='fuel4'/>
-      <model type='virtio'/>
-      <address type='pci' domain='0x0000' bus='0x00' slot='0x09' function='0x0'/>
-    </interface>
-    <serial type='pty'>
-      <target port='0'/>
-    </serial>
-    <console type='pty'>
-      <target type='serial' port='0'/>
-    </console>
-    <input type='mouse' bus='ps2'/>
-    <input type='keyboard' bus='ps2'/>
-    <graphics type='vnc' port='-1' autoport='yes' keymap='sv'/>
-    <sound model='ich6'>
-      <address type='pci' domain='0x0000' bus='0x00' slot='0x06' function='0x0'/>
-    </sound>
-    <video>
-      <model type='cirrus' vram='9216' heads='1'/>
-      <address type='pci' domain='0x0000' bus='0x00' slot='0x02' function='0x0'/>
-    </video>
-    <memballoon model='virtio'>
-      <address type='pci' domain='0x0000' bus='0x00' slot='0x08' function='0x0'/>
-    </memballoon>
-  </devices>
-</domain>
-
diff --git a/fuel/deploy/setup_environment.py b/fuel/deploy/setup_environment.py
new file mode 100644 (file)
index 0000000..4e0e7ba
--- /dev/null
@@ -0,0 +1,165 @@
+import sys
+from lxml import etree
+import os
+import glob
+import common
+
+from dha import DeploymentHardwareAdapter
+
+exec_cmd = common.exec_cmd
+err = common.err
+log = common.log
+check_dir_exists = common.check_dir_exists
+check_file_exists = common.check_file_exists
+check_if_root = common.check_if_root
+
+
+class LibvirtEnvironment(object):
+
+    def __init__(self, storage_dir, dha_file):
+        self.dha = DeploymentHardwareAdapter(dha_file)
+        self.storage_dir = storage_dir
+        self.parser = etree.XMLParser(remove_blank_text=True)
+        self.file_dir = os.path.dirname(os.path.realpath(__file__))
+        self.network_dir = '%s/libvirt/networks' % self.file_dir
+        self.vm_dir = '%s/libvirt/vms' % self.file_dir
+        self.node_ids = self.dha.get_all_node_ids()
+        self.fuel_node_id = self.dha.get_fuel_node_id()
+        self.net_names = self.collect_net_names()
+
+    def create_storage(self, node_id, disk_path, disk_sizes):
+        if node_id == self.fuel_node_id:
+           disk_size = disk_sizes['fuel']
+        else:
+           role = self.dha.get_node_role(node_id)
+           disk_size = disk_sizes[role]
+        exec_cmd('fallocate -l %s %s' % (disk_size, disk_path))
+
+    def create_vms(self):
+        temp_dir = exec_cmd('mktemp -d')
+        disk_sizes = self.dha.get_disks()
+        for node_id in self.node_ids:
+            vm_name = self.dha.get_node_property(node_id, 'libvirtName')
+            vm_template = self.dha.get_node_property(node_id,
+                                                     'libvirtTemplate')
+            disk_path = '%s/%s.raw' % (self.storage_dir, vm_name)
+            self.create_storage(node_id, disk_path, disk_sizes)
+            self.define_vm(vm_name, vm_template, temp_dir, disk_path)
+        exec_cmd('rm -fr %s' % temp_dir)
+
+    def define_vm(self, vm_name, vm_template, temp_dir, disk_path):
+        log('Creating VM %s with disks %s' % (vm_name, disk_path))
+        temp_vm_file = '%s/%s' % (temp_dir, vm_name)
+        exec_cmd('cp %s/%s %s' % (self.vm_dir, vm_template, temp_vm_file))
+        with open(temp_vm_file) as f:
+            vm_xml = etree.parse(f)
+            names = vm_xml.xpath('/domain/name')
+            for name in names:
+                name.text = vm_name
+            uuids = vm_xml.xpath('/domain/uuid')
+            for uuid in uuids:
+                uuid.getparent().remove(uuid)
+            disks = vm_xml.xpath('/domain/devices/disk')
+            for disk in disks:
+                sources = disk.xpath('source')
+                for source in sources:
+                    source.set('file', disk_path)
+        with open(temp_vm_file, 'w') as f:
+            vm_xml.write(f, pretty_print=True, xml_declaration=True)
+        exec_cmd('virsh define %s' % temp_vm_file)
+
+    def create_networks(self):
+        for net_file in glob.glob('%s/*' % self.network_dir):
+            exec_cmd('virsh net-define %s' % net_file)
+        for net in self.net_names:
+            log('Creating network %s' % net)
+            exec_cmd('virsh net-autostart %s' % net)
+            exec_cmd('virsh net-start %s' % net)
+
+    def delete_networks(self):
+        for net in self.net_names:
+            log('Deleting network %s' % net)
+            exec_cmd('virsh net-destroy %s' % net, False)
+            exec_cmd('virsh net-undefine %s' % net, False)
+
+    def get_net_name(self, net_file):
+        with open(net_file) as f:
+            net_xml = etree.parse(f)
+            name_list = net_xml.xpath('/network/name')
+            for name in name_list:
+                net_name = name.text
+        return net_name
+
+    def collect_net_names(self):
+        net_list = []
+        for net_file in glob.glob('%s/*' % self.network_dir):
+            name = self.get_net_name(net_file)
+            net_list.append(name)
+        return net_list
+
+    def delete_vms(self):
+        for node_id in self.node_ids:
+            vm_name = self.dha.get_node_property(node_id, 'libvirtName')
+            r, c = exec_cmd('virsh dumpxml %s' % vm_name, False)
+            if c > 0:
+                log(r)
+                continue
+            self.undefine_vm_delete_disk(r, vm_name)
+
+    def undefine_vm_delete_disk(self, printout, vm_name):
+        disk_files = []
+        xml_dump = etree.fromstring(printout, self.parser)
+        disks = xml_dump.xpath('/domain/devices/disk')
+        for disk in disks:
+            sources = disk.xpath('source')
+            for source in sources:
+                source_file = source.get('file')
+                if source_file:
+                    disk_files.append(source_file)
+        log('Deleting VM %s with disks %s' % (vm_name, disk_files))
+        exec_cmd('virsh destroy %s' % vm_name, False)
+        exec_cmd('virsh undefine %s' % vm_name, False)
+        for file in disk_files:
+            exec_cmd('rm -f %s' % file)
+
+    def setup_environment(self):
+        check_if_root()
+        check_dir_exists(self.network_dir)
+        check_dir_exists(self.vm_dir)
+        self.cleanup_environment()
+        self.create_vms()
+        self.create_networks()
+
+    def cleanup_environment(self):
+        self.delete_vms()
+        self.delete_networks()
+
+
+def usage():
+    print '''
+    Usage:
+    python setup_environment.py <storage_directory> <dha_file>
+
+    Example:
+            python setup_environment.py /mnt/images dha.yaml
+    '''
+
+def parse_arguments():
+    if len(sys.argv) != 3:
+        log('Incorrect number of arguments')
+        usage()
+        sys.exit(1)
+    storage_dir = sys.argv[-2]
+    dha_file = sys.argv[-1]
+    check_dir_exists(storage_dir)
+    check_file_exists(dha_file)
+    return storage_dir, dha_file
+
+def main():
+    storage_dir, dha_file = parse_arguments()
+
+    virt = LibvirtEnvironment(storage_dir, dha_file)
+    virt.setup_environment()
+
+if __name__ == '__main__':
+    main()
\ No newline at end of file
diff --git a/fuel/deploy/setup_vfuel.py b/fuel/deploy/setup_vfuel.py
new file mode 100644 (file)
index 0000000..65ee013
--- /dev/null
@@ -0,0 +1,143 @@
+import sys
+from lxml import etree
+import os
+
+import common
+from dha import DeploymentHardwareAdapter
+
+exec_cmd = common.exec_cmd
+err = common.err
+log = common.log
+check_dir_exists = common.check_dir_exists
+check_file_exists = common.check_file_exists
+check_if_root = common.check_if_root
+
+VFUELNET = '''
+iface vfuelnet inet static
+        bridge_ports em1
+        address 10.40.0.1
+        netmask 255.255.255.0
+        pre-down iptables -t nat -D POSTROUTING --out-interface p1p1.20 -j MASQUERADE  -m comment --comment "vfuelnet"
+        pre-down iptables -D FORWARD --in-interface vfuelnet --out-interface p1p1.20 -m comment --comment "vfuelnet"
+        post-up iptables -t nat -A POSTROUTING --out-interface p1p1.20 -j MASQUERADE  -m comment --comment "vfuelnet"
+        post-up iptables -A FORWARD --in-interface vfuelnet --out-interface p1p1.20 -m comment --comment "vfuelnet"
+'''
+VM_DIR = 'baremetal/vm'
+FUEL_DISK_SIZE = '30G'
+IFACE = 'vfuelnet'
+INTERFACE_CONFIG = '/etc/network/interfaces'
+
+class VFuel(object):
+
+    def __init__(self, storage_dir, dha_file):
+        self.dha = DeploymentHardwareAdapter(dha_file)
+        self.storage_dir = storage_dir
+        self.parser = etree.XMLParser(remove_blank_text=True)
+        self.fuel_node_id = self.dha.get_fuel_node_id()
+        self.file_dir = os.path.dirname(os.path.realpath(__file__))
+        self.vm_dir = '%s/%s' % (self.file_dir, VM_DIR)
+
+    def setup_environment(self):
+        check_if_root()
+        check_dir_exists(self.vm_dir)
+        self.setup_networking()
+        self.delete_vm()
+        self.create_vm()
+
+    def setup_networking(self):
+        with open(INTERFACE_CONFIG) as f:
+            data = f.read()
+        if VFUELNET not in data:
+            log('Appending to file %s:\n %s' % (INTERFACE_CONFIG, VFUELNET))
+            with open(INTERFACE_CONFIG, 'a') as f:
+                f.write('\n%s\n' % VFUELNET)
+            if exec_cmd('ip link show | grep %s' % IFACE):
+                log('Bring DOWN interface %s' % IFACE)
+                exec_cmd('ifdown %s' % IFACE, False)
+            log('Bring UP interface %s' % IFACE)
+            exec_cmd('ifup %s' % IFACE, False)
+
+    def delete_vm(self):
+        vm_name = self.dha.get_node_property(self.fuel_node_id, 'libvirtName')
+        r, c = exec_cmd('virsh dumpxml %s' % vm_name, False)
+        if c > 0:
+            log(r)
+            return
+        self.undefine_vm_delete_disk(r, vm_name)
+
+    def undefine_vm_delete_disk(self, printout, vm_name):
+        disk_files = []
+        xml_dump = etree.fromstring(printout, self.parser)
+        disks = xml_dump.xpath('/domain/devices/disk')
+        for disk in disks:
+            sources = disk.xpath('source')
+            for source in sources:
+                source_file = source.get('file')
+                if source_file:
+                    disk_files.append(source_file)
+        log('Deleting VM %s with disks %s' % (vm_name, disk_files))
+        exec_cmd('virsh destroy %s' % vm_name, False)
+        exec_cmd('virsh undefine %s' % vm_name, False)
+        for file in disk_files:
+            exec_cmd('rm -f %s' % file)
+
+    def create_vm(self):
+        temp_dir = exec_cmd('mktemp -d')
+        vm_name = self.dha.get_node_property(self.fuel_node_id, 'libvirtName')
+        vm_template = self.dha.get_node_property(self.fuel_node_id,
+                                                 'libvirtTemplate')
+        disk_path = '%s/%s.raw' % (self.storage_dir, vm_name)
+        exec_cmd('fallocate -l %s %s' % (FUEL_DISK_SIZE, disk_path))
+        self.define_vm(vm_name, vm_template, temp_dir, disk_path)
+        exec_cmd('rm -fr %s' % temp_dir)
+
+    def define_vm(self, vm_name, vm_template, temp_dir, disk_path):
+        log('Creating VM %s with disks %s' % (vm_name, disk_path))
+        temp_vm_file = '%s/%s' % (temp_dir, vm_name)
+        exec_cmd('cp %s/%s %s' % (self.vm_dir, vm_template, temp_vm_file))
+        with open(temp_vm_file) as f:
+            vm_xml = etree.parse(f)
+            names = vm_xml.xpath('/domain/name')
+            for name in names:
+                name.text = vm_name
+            uuids = vm_xml.xpath('/domain/uuid')
+            for uuid in uuids:
+                uuid.getparent().remove(uuid)
+            disks = vm_xml.xpath('/domain/devices/disk')
+            for disk in disks:
+                sources = disk.xpath('source')
+                for source in sources:
+                    source.set('file', disk_path)
+        with open(temp_vm_file, 'w') as f:
+            vm_xml.write(f, pretty_print=True, xml_declaration=True)
+        exec_cmd('virsh define %s' % temp_vm_file)
+
+
+def usage():
+    print '''
+    Usage:
+    python setup_vfuel.py <storage_directory> <dha_file>
+
+    Example:
+            python setup_vfuel.py /mnt/images dha.yaml
+    '''
+
+def parse_arguments():
+    if len(sys.argv) != 3:
+        log('Incorrect number of arguments')
+        usage()
+        sys.exit(1)
+    storage_dir = sys.argv[-2]
+    dha_file = sys.argv[-1]
+    check_dir_exists(storage_dir)
+    check_file_exists(dha_file)
+    return storage_dir, dha_file
+
+def main():
+    storage_dir, dha_file = parse_arguments()
+
+    vfuel = VFuel(storage_dir, dha_file)
+    vfuel.setup_environment()
+
+if __name__ == '__main__':
+    main()
diff --git a/fuel/deploy/setup_vms/apply_setup.sh b/fuel/deploy/setup_vms/apply_setup.sh
deleted file mode 100755 (executable)
index b38cf5d..0000000
+++ /dev/null
@@ -1,61 +0,0 @@
-#!/bin/bash
-##############################################################################
-# Copyright (c) 2015 Ericsson AB and others.
-# stefan.k.berg@ericsson.com
-# jonas.bjurel@ericsson.com
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-error_exit () {
-  echo "$@"
-  exit 1
-}
-
-netdir='../libvirt/networks'
-vmdir='../libvirt/vms'
-tmpfile=/tmp/foo
-
-if [ ! -d $netdir ]; then
-  error_exit "No net directory $netdir"
-  exit 1
-elif [ ! -d $vmdir ]; then
-  error_exit "No VM directory $vmdir"
-  exit 1
-fi
-
-if [ $# -ne 2 ]; then
-  echo "Argument error."
-  echo "`basename $0` <path to storage dir> <size in GB of disk per VM>"
-  exit 1
-fi
-
-storagedir=$1
-size=$2
-
-if [ ! -d $storagedir ]; then
-  error_exit "Could not find storagedir directory $storagedir"
-fi
-
-# Create storage space and patch it in
-for vm in $vmdir/*
-do
-  storage=${storagedir}/`basename ${vm}`.raw
-  if [ -f ${storage} ]; then
-     error_exit "Storage already present: ${storage}"
-  fi
-  echo "Creating ${size} GB of storage in ${storage}"
-  fallocate -l ${size}G ${storage} || \
-    error_exit "Could not create storage"
-  sed "s:<source file='disk.raw':<source file='${storage}':" $vm >$tmpfile
-  virsh define $tmpfile
-done
-
-for net in $netdir/*
-do
-  virsh net-define $net
-  virsh net-autostart `basename $net`
-  virsh net-start `basename $net`
-done
similarity index 61%
rename from fuel/deploy/cloud_deploy/ssh_client.py
rename to fuel/deploy/ssh_client.py
index b9aad6c..9ea227a 100644 (file)
@@ -1,8 +1,10 @@
 import paramiko
-from cloud import common
+import common
+import scp
 
 TIMEOUT = 600
-LOG = common.LOG
+log = common.log
+err = common.err
 
 class SSHClient(object):
 
@@ -23,7 +25,14 @@ class SSHClient(object):
             self.client.close()
             self.client = None
 
-    def execute(self, command, sudo=False, timeout=TIMEOUT):
+    def __enter__(self):
+        self.open()
+        return self
+
+    def __exit__(self, type, value, traceback):
+        self.close()
+
+    def exec_cmd(self, command, sudo=False, timeout=TIMEOUT, check=True):
         if sudo and self.username != 'root':
             command = "sudo -S -p '' %s" % command
         stdin, stdout, stderr = self.client.exec_command(command,
@@ -31,15 +40,22 @@ class SSHClient(object):
         if sudo:
             stdin.write(self.password + '\n')
             stdin.flush()
-        return ''.join(''.join(stderr.readlines()) +
-                       ''.join(stdout.readlines()))
+        response = stdout.read().strip()
+        error = stderr.read().strip()
+
+        if check:
+            if error:
+                self.close()
+                err(error)
+            else:
+                return response
+        return response, error
 
     def run(self, command):
         transport = self.client.get_transport()
         transport.set_keepalive(1)
         chan = transport.open_session()
         chan.exec_command(command)
-
         while not chan.exit_status_ready():
             if chan.recv_ready():
                 data = chan.recv(1024)
@@ -53,4 +69,18 @@ class SSHClient(object):
                     print error_buff
                     error_buff = chan.recv_stderr(1024)
         exit_status = chan.recv_exit_status()
-        LOG.debug('Exit status %s' % exit_status)
\ No newline at end of file
+        log('Exit status %s' % exit_status)
+
+    def scp_get(self, remote, local='.', dir=False):
+        try:
+            with scp.SCPClient(self.client.get_transport()) as _scp:
+                _scp.get(remote, local, dir)
+        except Exception as e:
+            err(e)
+
+    def scp_put(self, local, remote='.', dir=False):
+        try:
+            with scp.SCPClient(self.client.get_transport()) as _scp:
+                _scp.put(local, remote, dir)
+        except Exception as e:
+            err(e)
diff --git a/fuel/deploy/transplant_fuel_settings.py b/fuel/deploy/transplant_fuel_settings.py
new file mode 100644 (file)
index 0000000..bb4f9b6
--- /dev/null
@@ -0,0 +1,46 @@
+import sys
+import common
+import io
+import yaml
+from dea import DeploymentEnvironmentAdapter
+
+check_file_exists = common.check_file_exists
+
+def usage():
+    print '''
+    Usage:
+    python transplant_fuel_settings.py <deafile>
+    '''
+
+def parse_arguments():
+    if len(sys.argv) != 2:
+        usage()
+        sys.exit(1)
+    dea_file = sys.argv[-1]
+    check_file_exists(dea_file)
+    return dea_file
+
+def transplant(dea, astute):
+    fuel_conf = dea.get_fuel_config()
+    for key in fuel_conf.iterkeys():
+        if key == 'ADMIN_NETWORK':
+            for skey in fuel_conf[key].iterkeys():
+                astute[key][skey] = fuel_conf[key][skey]
+        else:
+            astute[key] = fuel_conf[key]
+    return astute
+
+def main():
+    dea_file = parse_arguments()
+    astute_yaml = '/etc/fuel/astute.yaml'
+    check_file_exists(astute_yaml)
+    dea = DeploymentEnvironmentAdapter(dea_file)
+    with io.open(astute_yaml) as stream:
+        astute = yaml.load(stream)
+    transplant(dea, astute)
+    with io.open(astute_yaml, 'w') as stream:
+        yaml.dump(astute, stream, default_flow_style=False)
+
+
+if __name__ == '__main__':
+    main()
\ No newline at end of file