From 9c2b6848566a0b80bb44f27cca155a240d69f061 Mon Sep 17 00:00:00 2001 From: Szilard Cserey Date: Tue, 14 Apr 2015 15:47:45 +0200 Subject: [PATCH] Automatic Deployment - node discovery - refactoring to support multiple shelves - configure nodes and interfaces - provisioning - deployment - extending with autodeployment scripts from libvirt prototype JIRA: [BGS-2] Create Fuel deployment scrip Signed-off-by: Szilard Cserey Change-Id: Ic48f93594914d5bef6c9de34d87434c7cd567198 --- fuel/deploy/README.rst | 47 + fuel/deploy/cloud_deploy/__init__.py | 1 + fuel/deploy/cloud_deploy/cloud/__init__.py | 1 + fuel/deploy/cloud_deploy/cloud/common.py | 51 ++ .../cloud_deploy/cloud/configure_environment.py | 74 ++ .../deploy/cloud_deploy/cloud/configure_network.py | 62 ++ fuel/deploy/cloud_deploy/cloud/configure_nodes.py | 108 +++ .../cloud_deploy/cloud/configure_settings.py | 47 + fuel/deploy/{ => cloud_deploy/cloud}/dea.py | 56 +- fuel/deploy/cloud_deploy/cloud/deploy.py | 208 +++++ fuel/deploy/cloud_deploy/cloud/deployment.py | 100 +++ fuel/deploy/cloud_deploy/cloud_deploy.py | 117 +++ .../cloud_deploy/hardware_adapters/__init__.py | 1 + .../{ => cloud_deploy/hardware_adapters}/dha.py | 5 +- .../cloud_deploy/hardware_adapters/hp/__init__.py | 1 + .../hardware_adapters/hp/hp_adapter.py | 54 +- .../hardware_adapters/hp/run_oa_command.py | 9 +- .../hardware_adapters/libvirt/__init__.py | 1 + .../hardware_adapters/libvirt/libvirt_adapter.py | 153 ++++ fuel/deploy/cloud_deploy/ssh_client.py | 56 ++ fuel/deploy/common.py | 29 - fuel/deploy/configure_environment.py | 70 -- fuel/deploy/configure_network.py | 91 -- fuel/deploy/configure_settings.py | 88 -- fuel/deploy/dea.yaml | 958 ++++++++++++++++++++- fuel/deploy/deploy.py | 212 ----- fuel/deploy/deploy.sh | 107 +++ fuel/deploy/deploy_fuel.sh | 106 --- fuel/deploy/functions/common.sh | 109 +++ fuel/deploy/functions/install_iso.sh | 62 ++ fuel/deploy/functions/isolinux.cfg.patch | 14 + fuel/deploy/functions/ks.cfg.patch | 19 + fuel/deploy/functions/patch-iso.sh | 69 ++ fuel/deploy/libvirt/networks/fuel1 | 12 + fuel/deploy/libvirt/networks/fuel2 | 5 + fuel/deploy/libvirt/networks/fuel3 | 5 + fuel/deploy/libvirt/networks/fuel4 | 12 + fuel/deploy/libvirt/vms/fuel-master | 95 ++ fuel/deploy/libvirt/vms/s1_b1 | 100 +++ fuel/deploy/libvirt/vms/s1_b2 | 100 +++ fuel/deploy/libvirt/vms/s1_b3 | 100 +++ fuel/deploy/libvirt/vms/s1_b4 | 101 +++ fuel/deploy/libvirt/vms/s1_b5 | 100 +++ fuel/deploy/libvirt/vms/s1_b6 | 100 +++ fuel/deploy/setup_vms/apply_setup.sh | 61 ++ fuel/deploy/setup_vms/setup-vm-host.sh | 17 + 46 files changed, 3228 insertions(+), 666 deletions(-) create mode 100644 fuel/deploy/README.rst create mode 100644 fuel/deploy/cloud_deploy/__init__.py create mode 100644 fuel/deploy/cloud_deploy/cloud/__init__.py create mode 100644 fuel/deploy/cloud_deploy/cloud/common.py create mode 100644 fuel/deploy/cloud_deploy/cloud/configure_environment.py create mode 100644 fuel/deploy/cloud_deploy/cloud/configure_network.py create mode 100644 fuel/deploy/cloud_deploy/cloud/configure_nodes.py create mode 100644 fuel/deploy/cloud_deploy/cloud/configure_settings.py rename fuel/deploy/{ => cloud_deploy/cloud}/dea.py (52%) create mode 100644 fuel/deploy/cloud_deploy/cloud/deploy.py create mode 100644 fuel/deploy/cloud_deploy/cloud/deployment.py create mode 100644 fuel/deploy/cloud_deploy/cloud_deploy.py create mode 100644 fuel/deploy/cloud_deploy/hardware_adapters/__init__.py rename fuel/deploy/{ => cloud_deploy/hardware_adapters}/dha.py (94%) create mode 100644 fuel/deploy/cloud_deploy/hardware_adapters/hp/__init__.py rename fuel/deploy/{ => cloud_deploy}/hardware_adapters/hp/hp_adapter.py (90%) rename fuel/deploy/{ => cloud_deploy}/hardware_adapters/hp/run_oa_command.py (93%) create mode 100644 fuel/deploy/cloud_deploy/hardware_adapters/libvirt/__init__.py create mode 100644 fuel/deploy/cloud_deploy/hardware_adapters/libvirt/libvirt_adapter.py create mode 100644 fuel/deploy/cloud_deploy/ssh_client.py delete mode 100644 fuel/deploy/common.py delete mode 100644 fuel/deploy/configure_environment.py delete mode 100644 fuel/deploy/configure_network.py delete mode 100644 fuel/deploy/configure_settings.py delete mode 100644 fuel/deploy/deploy.py create mode 100755 fuel/deploy/deploy.sh delete mode 100755 fuel/deploy/deploy_fuel.sh create mode 100755 fuel/deploy/functions/common.sh create mode 100755 fuel/deploy/functions/install_iso.sh create mode 100644 fuel/deploy/functions/isolinux.cfg.patch create mode 100644 fuel/deploy/functions/ks.cfg.patch create mode 100755 fuel/deploy/functions/patch-iso.sh create mode 100644 fuel/deploy/libvirt/networks/fuel1 create mode 100644 fuel/deploy/libvirt/networks/fuel2 create mode 100644 fuel/deploy/libvirt/networks/fuel3 create mode 100644 fuel/deploy/libvirt/networks/fuel4 create mode 100644 fuel/deploy/libvirt/vms/fuel-master create mode 100644 fuel/deploy/libvirt/vms/s1_b1 create mode 100644 fuel/deploy/libvirt/vms/s1_b2 create mode 100644 fuel/deploy/libvirt/vms/s1_b3 create mode 100644 fuel/deploy/libvirt/vms/s1_b4 create mode 100644 fuel/deploy/libvirt/vms/s1_b5 create mode 100644 fuel/deploy/libvirt/vms/s1_b6 create mode 100755 fuel/deploy/setup_vms/apply_setup.sh create mode 100755 fuel/deploy/setup_vms/setup-vm-host.sh diff --git a/fuel/deploy/README.rst b/fuel/deploy/README.rst new file mode 100644 index 0000000..f7b5711 --- /dev/null +++ b/fuel/deploy/README.rst @@ -0,0 +1,47 @@ +**DEA libvirt deployment prototype** + +This is an example of how to deploy a libvirt KVM setup with a DEA +YAML file. + +The file is created from an already deployed Fuel installation using +the create_dea script and helper files which are to be present on the +Fuel master and run from there. + +The install is kicked off from the host by running deploy.sh and +providing the ISO file to deploy and (optionally) an DEA file name as +an argument. If the DEA file is omitted the example one will be used +instead. + +Pre-condition 1: The host needs to be Ubuntu 14.x + +Pre-condition 2: Necessary packages installed by running +sudo genesis/fuel/prototypes/libvirt/setup_vms/setup-vm-host.sh + +Pre-condition 3: Example VM configuration deployed by running +genesis/fuel/prototypes/libvirt/setup_vms/apply_setup.sh The VMs and +networks to be setup are in genesis/fuel/prototypes/libvirt/examples: +"vms" and "networks" +sudo mkdir /mnt/images +cd setup-vms +sudo ./apply_setup.sh /mnt/images 50 + +In order to run the automated install, it's just a matter of running +genesis/fuel/prototypes/libvirt/deploy.sh [] The +deafile will be optional, if not specified the example one in +genesis/fuel/prototypes/libvirt/examples/libvirt_dea.yaml will be +used. +sudo ./deploy.sh ~/ISO/opnfv-P0000.iso ~/DEPLOY/deploy/dea.yaml + +Now either this will succeed (return code 0) or fail. I'll have a +three hours safety catch to kill off things if something is hanging, +may need to be adjusted for slow environments (see deploy.sh). + +All the steps above should be run with sudo. + +In principle the deploy.sh is assuming the example vm setup (one fuel, +three controllers, two computes) and will always deploy with full HA +and Ceilometer. + +TODO: Copy also the deployment mode in my dea.yaml creation script +genesis/fuel/prototypes/libvirt/create_dea/create_dea.sh so it's a +real xerox of the running deploy. diff --git a/fuel/deploy/cloud_deploy/__init__.py b/fuel/deploy/cloud_deploy/__init__.py new file mode 100644 index 0000000..c274feb --- /dev/null +++ b/fuel/deploy/cloud_deploy/__init__.py @@ -0,0 +1 @@ +__author__ = 'eszicse' diff --git a/fuel/deploy/cloud_deploy/cloud/__init__.py b/fuel/deploy/cloud_deploy/cloud/__init__.py new file mode 100644 index 0000000..c274feb --- /dev/null +++ b/fuel/deploy/cloud_deploy/cloud/__init__.py @@ -0,0 +1 @@ +__author__ = 'eszicse' diff --git a/fuel/deploy/cloud_deploy/cloud/common.py b/fuel/deploy/cloud_deploy/cloud/common.py new file mode 100644 index 0000000..365f6fb --- /dev/null +++ b/fuel/deploy/cloud_deploy/cloud/common.py @@ -0,0 +1,51 @@ +import subprocess +import sys +import os +import logging + +N = {'id': 0, 'status': 1, 'name': 2, 'cluster': 3, 'ip': 4, 'mac': 5, + 'roles': 6, 'pending_roles': 7, 'online': 8} +E = {'id': 0, 'status': 1, 'name': 2, 'mode': 3, 'release_id': 4, + 'changes': 5, 'pending_release_id': 6} +R = {'id': 0, 'name': 1, 'state': 2, 'operating_system': 3, 'version': 4} +RO = {'name': 0, 'conflicts': 1} + +LOG = logging.getLogger(__name__) +LOG.setLevel(logging.DEBUG) +formatter = logging.Formatter('%(message)s') +out_handler = logging.StreamHandler(sys.stdout) +out_handler.setFormatter(formatter) +LOG.addHandler(out_handler) +out_handler = logging.FileHandler('autodeploy.log', mode='w') +out_handler.setFormatter(formatter) +LOG.addHandler(out_handler) + +def exec_cmd(cmd): + process = subprocess.Popen(cmd, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + shell=True) + return process.communicate()[0], process.returncode + +def run_proc(cmd): + process = subprocess.Popen(cmd, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + shell=True) + return process + +def parse(printout, *args): + parsed_list = [] + lines = printout[0].splitlines() + for l in lines[2:]: + parsed = [e.strip() for e in l.split('|')] + parsed_list.append(parsed) + return parsed_list + +def err(error_message): + LOG.error(error_message) + sys.exit(1) + +def check_file_exists(file_path): + if not os.path.isfile(file_path): + err('ERROR: File %s not found\n' % file_path) diff --git a/fuel/deploy/cloud_deploy/cloud/configure_environment.py b/fuel/deploy/cloud_deploy/cloud/configure_environment.py new file mode 100644 index 0000000..426bbd1 --- /dev/null +++ b/fuel/deploy/cloud_deploy/cloud/configure_environment.py @@ -0,0 +1,74 @@ +import common +import os +import shutil + +from configure_settings import ConfigureSettings +from configure_network import ConfigureNetwork +from configure_nodes import ConfigureNodes + +N = common.N +E = common.E +R = common.R +RO = common.RO +exec_cmd = common.exec_cmd +parse = common.parse +err = common.err +LOG = common.LOG + +class ConfigureEnvironment(object): + + def __init__(self, dea, yaml_config_dir, release_id, node_id_roles_dict): + self.env_id = None + self.dea = dea + self.yaml_config_dir = yaml_config_dir + self.env_name = dea.get_environment_name() + self.release_id = release_id + self.node_id_roles_dict = node_id_roles_dict + self.required_networks = [] + + def env_exists(self, env_name): + env_list = parse(exec_cmd('fuel env --list')) + for env in env_list: + if env[E['name']] == env_name and env[E['status']] == 'new': + self.env_id = env[E['id']] + return True + return False + + def configure_environment(self): + LOG.debug('Configure environment\n') + if os.path.exists(self.yaml_config_dir): + LOG.debug('Deleting existing config directory %s\n' + % self.yaml_config_dir) + shutil.rmtree(self.yaml_config_dir) + LOG.debug('Creating new config directory %s\n' % self.yaml_config_dir) + os.makedirs(self.yaml_config_dir) + + LOG.debug('Creating environment %s release %s, mode ha, network-mode ' + 'neutron, net-segment-type vlan\n' + % (self.env_name, self.release_id)) + exec_cmd('fuel env create --name %s --release %s --mode ha ' + '--network-mode neutron --net-segment-type vlan' + % (self.env_name, self.release_id)) + + if not self.env_exists(self.env_name): + err("Failed to create environment %s\n" % self.env_name) + self.config_settings() + self.config_network() + self.config_nodes() + + def config_settings(self): + settings = ConfigureSettings(self.yaml_config_dir, self.env_id, + self.dea) + settings.config_settings() + + def config_network(self): + network = ConfigureNetwork(self.yaml_config_dir, self.env_id, self.dea) + network.config_network() + + def config_nodes(self): + nodes = ConfigureNodes(self.yaml_config_dir, self.env_id, + self.node_id_roles_dict, self.dea) + nodes.config_nodes() + + + diff --git a/fuel/deploy/cloud_deploy/cloud/configure_network.py b/fuel/deploy/cloud_deploy/cloud/configure_network.py new file mode 100644 index 0000000..f4d6f87 --- /dev/null +++ b/fuel/deploy/cloud_deploy/cloud/configure_network.py @@ -0,0 +1,62 @@ +import common +import yaml +import io + +N = common.N +E = common.E +R = common.R +RO = common.RO +exec_cmd = common.exec_cmd +parse = common.parse +err = common.err +check_file_exists = common.check_file_exists +LOG = common.LOG + +class ConfigureNetwork(object): + + def __init__(self, yaml_config_dir, env_id, dea): + self.yaml_config_dir = yaml_config_dir + self.env_id = env_id + self.dea = dea + self.required_networks = [] + + def download_network_config(self): + LOG.debug('Download network config for environment %s\n' % self.env_id) + exec_cmd('fuel network --env %s --download --dir %s' + % (self.env_id, self.yaml_config_dir)) + + def upload_network_config(self): + LOG.debug('Upload network config for environment %s\n' % self.env_id) + exec_cmd('fuel network --env %s --upload --dir %s' + % (self.env_id, self.yaml_config_dir)) + + def config_network(self): + LOG.debug('Configure network\n') + self.download_network_config() + self.modify_network_config() + self.upload_network_config() + + def modify_network_config(self): + LOG.debug('Modify network config for environment %s\n' % self.env_id) + network_yaml = (self.yaml_config_dir + '/network_%s.yaml' + % self.env_id) + check_file_exists(network_yaml) + + network_config = self.dea.get_networks() + + + with io.open(network_yaml) as stream: + network = yaml.load(stream) + + net_names = self.dea.get_network_names() + net_id = {} + for net in network['networks']: + if net['name'] in net_names: + net_id[net['name']] = {'id': net['id'], + 'group_id': net['group_id']} + + for network in network_config['networks']: + network.update(net_id[network['name']]) + + with io.open(network_yaml, 'w') as stream: + yaml.dump(network_config, stream, default_flow_style=False) \ No newline at end of file diff --git a/fuel/deploy/cloud_deploy/cloud/configure_nodes.py b/fuel/deploy/cloud_deploy/cloud/configure_nodes.py new file mode 100644 index 0000000..a5e24a8 --- /dev/null +++ b/fuel/deploy/cloud_deploy/cloud/configure_nodes.py @@ -0,0 +1,108 @@ +import common +import yaml +import io +import glob + +N = common.N +E = common.E +R = common.R +RO = common.RO +exec_cmd = common.exec_cmd +parse = common.parse +err = common.err +check_file_exists = common.check_file_exists +LOG = common.LOG + + +class ConfigureNodes(object): + + def __init__(self, yaml_config_dir, env_id, node_id_roles_dict, dea): + self.yaml_config_dir = yaml_config_dir + self.env_id = env_id + self.node_id_roles_dict = node_id_roles_dict + self.dea = dea + + def config_nodes(self): + LOG.debug('Configure nodes\n') + for node_id, roles_shelf_blade in self.node_id_roles_dict.iteritems(): + exec_cmd('fuel node set --node-id %s --role %s --env %s' + % (node_id, ','.join(roles_shelf_blade[0]), self.env_id)) + + self.download_deployment_config() + self.modify_node_network_schemes() + self.upload_deployment_config() + + for node_id, roles_shelf_blade in self.node_id_roles_dict.iteritems(): + self.download_interface_config(node_id) + self.modify_node_interface(node_id) + self.upload_interface_config(node_id) + + def modify_node_network_schemes(self): + LOG.debug('Modify node network schemes in environment %s\n' % self.env_id) + for node_file in glob.glob('%s/deployment_%s/*.yaml' + % (self.yaml_config_dir, self.env_id)): + check_file_exists(node_file) + + if 'compute' in node_file: + node_type = 'compute' + else: + node_type = 'controller' + + network_scheme = self.dea.get_network_scheme(node_type) + + with io.open(node_file) as stream: + node = yaml.load(stream) + + node['network_scheme']['transformations'] = network_scheme + + with io.open(node_file, 'w') as stream: + yaml.dump(node, stream, default_flow_style=False) + + + def download_deployment_config(self): + LOG.debug('Download deployment config for environment %s\n' % self.env_id) + r, c = exec_cmd('fuel deployment --env %s --default --dir %s' + % (self.env_id, self.yaml_config_dir)) + + def upload_deployment_config(self): + LOG.debug('Upload deployment config for environment %s\n' % self.env_id) + r, c = exec_cmd('fuel deployment --env %s --upload --dir %s' + % (self.env_id, self.yaml_config_dir)) + + def download_interface_config(self, node_id): + LOG.debug('Download interface config for node %s\n' % node_id) + r, c = exec_cmd('fuel node --env %s --node %s --network --download ' + '--dir %s' % (self.env_id, node_id, + self.yaml_config_dir)) + + def upload_interface_config(self, node_id): + LOG.debug('Upload interface config for node %s\n' % node_id) + r, c = exec_cmd('fuel node --env %s --node %s --network --upload ' + '--dir %s' % (self.env_id, node_id, + self.yaml_config_dir)) + + def modify_node_interface(self, node_id): + LOG.debug('Modify interface config for node %s\n' % node_id) + interface_yaml = (self.yaml_config_dir + '/node_%s/interfaces.yaml' + % node_id) + + with io.open(interface_yaml) as stream: + interfaces = yaml.load(stream) + + net_name_id = {} + for interface in interfaces: + for network in interface['assigned_networks']: + net_name_id[network['name']] = network['id'] + + interface_config = self.dea.get_interfaces() + + for interface in interfaces: + interface['assigned_networks'] = [] + for net_name in interface_config[interface['name']]: + net = {} + net['id'] = net_name_id[net_name] + net['name'] = net_name + interface['assigned_networks'].append(net) + + with io.open(interface_yaml, 'w') as stream: + yaml.dump(interfaces, stream, default_flow_style=False) \ No newline at end of file diff --git a/fuel/deploy/cloud_deploy/cloud/configure_settings.py b/fuel/deploy/cloud_deploy/cloud/configure_settings.py new file mode 100644 index 0000000..3a3e4d5 --- /dev/null +++ b/fuel/deploy/cloud_deploy/cloud/configure_settings.py @@ -0,0 +1,47 @@ +import common +import yaml +import io + +N = common.N +E = common.E +R = common.R +RO = common.RO +exec_cmd = common.exec_cmd +parse = common.parse +err = common.err +check_file_exists = common.check_file_exists +LOG = common.LOG + +class ConfigureSettings(object): + + def __init__(self, yaml_config_dir, env_id, dea): + self.yaml_config_dir = yaml_config_dir + self.env_id = env_id + self.dea = dea + + def download_settings(self): + LOG.debug('Download settings for environment %s\n' % self.env_id) + r, c = exec_cmd('fuel settings --env %s --download --dir %s' + % (self.env_id, self.yaml_config_dir)) + + def upload_settings(self): + LOG.debug('Upload settings for environment %s\n' % self.env_id) + r, c = exec_cmd('fuel settings --env %s --upload --dir %s' + % (self.env_id, self.yaml_config_dir)) + + def config_settings(self): + LOG.debug('Configure settings\n') + self.download_settings() + self.modify_settings() + self.upload_settings() + + def modify_settings(self): + LOG.debug('Modify settings for environment %s\n' % self.env_id) + settings_yaml = (self.yaml_config_dir + '/settings_%s.yaml' + % self.env_id) + check_file_exists(settings_yaml) + + settings = self.dea.get_settings() + + with io.open(settings_yaml, 'w') as stream: + yaml.dump(settings, stream, default_flow_style=False) diff --git a/fuel/deploy/dea.py b/fuel/deploy/cloud_deploy/cloud/dea.py similarity index 52% rename from fuel/deploy/dea.py rename to fuel/deploy/cloud_deploy/cloud/dea.py index 5f306a2..295636a 100644 --- a/fuel/deploy/dea.py +++ b/fuel/deploy/cloud_deploy/cloud/dea.py @@ -7,13 +7,15 @@ class DeploymentEnvironmentAdapter(object): self.blade_ids_per_shelves = {} self.blades_per_shelves = {} self.shelf_ids = [] - self.networks = {} + self.info_per_shelves = {} + self.network_names = [] def parse_yaml(self, yaml_path): with io.open(yaml_path) as yaml_file: self.dea_struct = yaml.load(yaml_file) self.collect_shelf_and_blade_info() - self.collect_network_info() + self.collect_shelf_info() + self.collect_network_names() def get_no_of_blades(self): no_of_blades = 0 @@ -21,14 +23,16 @@ class DeploymentEnvironmentAdapter(object): no_of_blades += len(shelf['blade']) return no_of_blades - def get_server_type(self): - return self.dea_struct['server']['type'] + def collect_shelf_info(self): + self.info_per_shelves = {} + for shelf in self.dea_struct['shelf']: + self.info_per_shelves[shelf['id']] = shelf - def get_server_info(self): - return (self.dea_struct['server']['type'], - self.dea_struct['server']['mgmt_ip'], - self.dea_struct['server']['username'], - self.dea_struct['server']['password']) + def get_shelf_info(self, shelf): + return (self.info_per_shelves[shelf]['type'], + self.info_per_shelves[shelf]['mgmt_ip'], + self.info_per_shelves[shelf]['username'], + self.info_per_shelves[shelf]['password']) def get_environment_name(self): return self.dea_struct['name'] @@ -54,19 +58,29 @@ class DeploymentEnvironmentAdapter(object): blade_ids.append(blade['id']) blades[blade['id']] = blade - def is_controller(self, shelf_id, blade_id): - blade = self.blades[shelf_id][blade_id] - return (True if 'role' in blade and blade['role'] == 'controller' + def has_role(self, role, shelf, blade): + blade = self.blades_per_shelves[shelf][blade] + if role == 'compute': + return True if 'roles' not in blade else False + return (True if 'roles' in blade and role in blade['roles'] else False) - def is_compute_host(self, shelf_id, blade_id): - blade = self.blades[shelf_id][blade_id] - return True if 'role' not in blade else False - - def collect_network_info(self): - self.networks = {} - for network in self.dea_struct['network']: - self.networks[network['name']] = network + def collect_network_names(self): + self.network_names = [] + for network in self.dea_struct['networks']['networks']: + self.network_names.append(network['name']) def get_networks(self): - return self.networks \ No newline at end of file + return self.dea_struct['networks'] + + def get_network_names(self): + return self.network_names + + def get_settings(self): + return self.dea_struct['settings'] + + def get_network_scheme(self, node_type): + return self.dea_struct[node_type] + + def get_interfaces(self): + return self.dea_struct['interfaces'] \ No newline at end of file diff --git a/fuel/deploy/cloud_deploy/cloud/deploy.py b/fuel/deploy/cloud_deploy/cloud/deploy.py new file mode 100644 index 0000000..ea33f8b --- /dev/null +++ b/fuel/deploy/cloud_deploy/cloud/deploy.py @@ -0,0 +1,208 @@ +import time +import yaml +import io +import os + +import common +from dea import DeploymentEnvironmentAdapter +from configure_environment import ConfigureEnvironment +from deployment import Deployment + +SUPPORTED_RELEASE = 'Juno on CentOS 6.5' + +N = common.N +E = common.E +R = common.R +RO = common.RO +exec_cmd = common.exec_cmd +parse = common.parse +err = common.err +check_file_exists = common.check_file_exists +LOG = common.LOG + +class Deploy(object): + + def __init__(self, yaml_config_dir): + self.supported_release = None + self.yaml_config_dir = yaml_config_dir + self.macs_per_shelf_dict = {} + self.node_ids_dict = {} + self.node_id_roles_dict = {} + self.env_id = None + self.shelf_blades_dict = {} + + def cleanup_fuel_environments(self, env_list): + WAIT_LOOP = 60 + SLEEP_TIME = 10 + for env in env_list: + LOG.debug('Deleting environment %s\n' % env[E['id']]) + exec_cmd('fuel env --env %s --delete' % env[E['id']]) + all_env_erased = False + for i in range(WAIT_LOOP): + env_list = parse(exec_cmd('fuel env list')) + if env_list[0][0]: + time.sleep(SLEEP_TIME) + else: + all_env_erased = True + break + if not all_env_erased: + err('Could not erase these environments %s' + % [(env[E['id']], env[E['status']]) for env in env_list]) + + def cleanup_fuel_nodes(self, node_list): + for node in node_list: + if node[N['status']] == 'discover': + LOG.debug('Deleting node %s\n' % node[N['id']]) + exec_cmd('fuel node --node-id %s --delete-from-db' + % node[N['id']]) + exec_cmd('cobbler system remove --name node-%s' + % node[N['id']]) + + def check_previous_installation(self): + LOG.debug('Check previous installation\n') + env_list = parse(exec_cmd('fuel env list')) + if env_list[0][0]: + self.cleanup_fuel_environments(env_list) + node_list = parse(exec_cmd('fuel node list')) + if node_list[0][0]: + self.cleanup_fuel_nodes(node_list) + + def check_supported_release(self): + LOG.debug('Check supported release: %s\n' % SUPPORTED_RELEASE) + release_list = parse(exec_cmd('fuel release -l')) + for release in release_list: + if release[R['name']] == SUPPORTED_RELEASE: + self.supported_release = release + break + if not self.supported_release: + err('This Fuel does not contain the following ' + 'release: %s\n' % SUPPORTED_RELEASE) + + def check_prerequisites(self): + LOG.debug('Check prerequisites\n') + self.check_supported_release() + self.check_previous_installation() + + def find_mac_in_dict(self, mac): + for shelf, blade_dict in self.macs_per_shelf_dict.iteritems(): + for blade, mac_list in blade_dict.iteritems(): + if mac in mac_list: + return shelf, blade + + def all_blades_discovered(self): + for shelf, blade_dict in self.node_ids_dict.iteritems(): + for blade, node_id in blade_dict.iteritems(): + if not node_id: + return False + return True + + def not_discovered_blades_summary(self): + summary = '' + for shelf, blade_dict in self.node_ids_dict.iteritems(): + for blade, node_id in blade_dict.iteritems(): + if not node_id: + summary += '[shelf %s, blade %s]\n' % (shelf, blade) + return summary + + def collect_blade_ids_per_shelves(self, dea): + self.shelf_blades_dict = dea.get_blade_ids_per_shelves() + + def node_discovery(self, node_list, discovered_macs): + for node in node_list: + if (node[N['status']] == 'discover' and + node[N['online']] == 'True' and + node[N['mac']] not in discovered_macs): + discovered_macs.append(node[N['mac']]) + shelf_blade = self.find_mac_in_dict(node[N['mac']]) + if shelf_blade: + self.node_ids_dict[shelf_blade[0]][shelf_blade[1]] = \ + node[N['id']] + + def discovery_waiting_loop(self, discovered_macs): + WAIT_LOOP = 180 + SLEEP_TIME = 10 + all_discovered = False + for i in range(WAIT_LOOP): + node_list = parse(exec_cmd('fuel node list')) + if node_list[0][0]: + self.node_discovery(node_list, discovered_macs) + if self.all_blades_discovered(): + all_discovered = True + break + else: + time.sleep(SLEEP_TIME) + return all_discovered + + def wait_for_discovered_blades(self): + LOG.debug('Wait for discovered blades\n') + discovered_macs = [] + for shelf, blade_list in self.shelf_blades_dict.iteritems(): + self.node_ids_dict[shelf] = {} + for blade in blade_list: + self.node_ids_dict[shelf][blade] = None + all_discovered = self.discovery_waiting_loop(discovered_macs) + if not all_discovered: + err('Not all blades have been discovered: %s\n' + % self.not_discovered_blades_summary()) + + def get_mac_addresses(self, macs_yaml): + with io.open(macs_yaml, 'r') as stream: + self.macs_per_shelf_dict = yaml.load(stream) + + def assign_roles_to_cluster_node_ids(self, dea): + self.node_id_roles_dict = {} + for shelf, blades_dict in self.node_ids_dict.iteritems(): + for blade, node_id in blades_dict.iteritems(): + role_list = [] + if dea.has_role('controller', shelf, blade): + role_list.extend(['controller', 'mongo']) + if dea.has_role('cinder', shelf, blade): + role_list.extend(['cinder']) + elif dea.has_role('compute', shelf, blade): + role_list.extend(['compute']) + self.node_id_roles_dict[node_id] = (role_list, shelf, blade) + + def configure_environment(self, dea): + config_env = ConfigureEnvironment(dea, self.yaml_config_dir, + self.supported_release[R['id']], + self.node_id_roles_dict) + config_env.configure_environment() + self.env_id = config_env.env_id + + def deploy(self, dea): + dep = Deployment(dea, self.yaml_config_dir, self.env_id, + self.node_id_roles_dict) + dep.deploy() + + +def main(): + + base_dir = os.path.dirname(os.path.realpath(__file__)) + dea_yaml = base_dir + '/dea.yaml' + check_file_exists(dea_yaml) + macs_yaml = base_dir + '/macs.yaml' + check_file_exists(macs_yaml) + + yaml_config_dir = '/var/lib/opnfv/pre_deploy' + + deploy = Deploy(yaml_config_dir) + dea = DeploymentEnvironmentAdapter() + dea.parse_yaml(dea_yaml) + + deploy.get_mac_addresses(macs_yaml) + + deploy.collect_blade_ids_per_shelves(dea) + + deploy.check_prerequisites() + + deploy.wait_for_discovered_blades() + + deploy.assign_roles_to_cluster_node_ids(dea) + + deploy.configure_environment(dea) + + deploy.deploy(dea) + + +if __name__ == '__main__': + main() \ No newline at end of file diff --git a/fuel/deploy/cloud_deploy/cloud/deployment.py b/fuel/deploy/cloud_deploy/cloud/deployment.py new file mode 100644 index 0000000..831059b --- /dev/null +++ b/fuel/deploy/cloud_deploy/cloud/deployment.py @@ -0,0 +1,100 @@ +import common +import os +import shutil +import glob +import yaml +import io +import time + +N = common.N +E = common.E +R = common.R +RO = common.RO +exec_cmd = common.exec_cmd +run_proc = common.run_proc +parse = common.parse +err = common.err +LOG = common.LOG + + +class Deployment(object): + + def __init__(self, dea, yaml_config_dir, env_id, node_id_roles_dict): + self.dea = dea + self.env_name = dea.get_environment_name() + self.yaml_config_dir = yaml_config_dir + self.env_id = env_id + self.node_id_roles_dict = node_id_roles_dict + self.node_id_list = [] + for node_id in self.node_id_roles_dict.iterkeys(): + self.node_id_list.append(node_id) + self.node_id_list.sort() + + def download_deployment_info(self): + LOG.debug('Download deployment info for environment %s\n' % self.env_id) + deployment_dir = self.yaml_config_dir + '/deployment_%s' % self.env_id + if os.path.exists(deployment_dir): + shutil.rmtree(deployment_dir) + r, c = exec_cmd('fuel --env %s deployment --default --dir %s' + % (self.env_id, self.yaml_config_dir)) + if c > 0: + err('Error: Could not download deployment info for env %s,' + ' reason: %s\n' % (self.env_id, r)) + + def upload_deployment_info(self): + LOG.debug('Upload deployment info for environment %s\n' % self.env_id) + r, c = exec_cmd('fuel --env %s deployment --upload --dir %s' + % (self.env_id, self.yaml_config_dir)) + if c > 0: + err('Error: Could not upload deployment info for env %s,' + ' reason: %s\n' % (self.env_id, r)) + + def pre_deploy(self): + LOG.debug('Running pre-deploy on environment %s\n' % self.env_name) + self.download_deployment_info() + opnfv = {'opnfv': {}} + + for node_file in glob.glob('%s/deployment_%s/*.yaml' + % (self.yaml_config_dir, self.env_id)): + with io.open(node_file) as stream: + node = yaml.load(stream) + + if 'opnfv' not in node: + node.update(opnfv) + + with io.open(node_file, 'w') as stream: + yaml.dump(node, stream, default_flow_style=False) + self.upload_deployment_info() + + + def deploy(self): + WAIT_LOOP = 180 + SLEEP_TIME = 60 + + self.pre_deploy() + + log_file = 'cloud.log' + + LOG.debug('Starting deployment of environment %s\n' % self.env_name) + run_proc('fuel --env %s deploy-changes | strings | tee %s' + % (self.env_id, log_file)) + + ready = False + for i in range(WAIT_LOOP): + env = parse(exec_cmd('fuel env --env %s' % self.env_id)) + LOG.debug('Environment status: %s\n' % env[0][E['status']]) + r, _ = exec_cmd('tail -2 %s | head -1' % log_file) + if r: + LOG.debug('%s\n' % r) + if env[0][E['status']] == 'operational': + ready = True + break + else: + time.sleep(SLEEP_TIME) + exec_cmd('rm %s' % log_file) + + if ready: + LOG.debug('Environment %s successfully deployed\n' % self.env_name) + else: + err('Deployment failed, environment %s is not operational\n' + % self.env_name) diff --git a/fuel/deploy/cloud_deploy/cloud_deploy.py b/fuel/deploy/cloud_deploy/cloud_deploy.py new file mode 100644 index 0000000..4197519 --- /dev/null +++ b/fuel/deploy/cloud_deploy/cloud_deploy.py @@ -0,0 +1,117 @@ +import os +import io +import yaml + +from cloud import common +from cloud.dea import DeploymentEnvironmentAdapter +from hardware_adapters.dha import DeploymentHardwareAdapter +from ssh_client import SSHClient + +exec_cmd = common.exec_cmd +err = common.err +check_file_exists = common.check_file_exists +LOG = common.LOG + +class CloudDeploy(object): + + def __init__(self, fuel_ip, fuel_username, fuel_password): + self.fuel_ip = fuel_ip + self.fuel_username = fuel_username + self.fuel_password = fuel_password + self.shelf_blades_dict = {} + self.macs_per_shelf_dict = {} + + def copy_to_fuel_master(self, dir_path=None, file_path=None, target='~'): + if dir_path: + path = '-r ' + dir_path + elif file_path: + path = file_path + LOG.debug('Copying %s to Fuel Master %s' % (path, target)) + if path: + exec_cmd('sshpass -p %s scp -o UserKnownHostsFile=/dev/null' + ' -o StrictHostKeyChecking=no -o ConnectTimeout=15' + ' %s %s@%s:%s' + % (self.fuel_password, path, self.fuel_username, + self.fuel_ip, target)) + + def run_cloud_deploy(self, deploy_dir, deploy_app): + LOG.debug('START CLOUD DEPLOYMENT') + ssh = SSHClient(self.fuel_ip, self.fuel_username, self.fuel_password) + ssh.open() + ssh.run('python %s/%s' % (deploy_dir, deploy_app)) + ssh.close() + + def power_off_blades(self, dea): + for shelf, blade_list in self.shelf_blades_dict.iteritems(): + type, mgmt_ip, username, password = dea.get_shelf_info(shelf) + dha = DeploymentHardwareAdapter(type, mgmt_ip, username, password) + dha.power_off_blades(shelf, blade_list) + + def power_on_blades(self, dea): + for shelf, blade_list in self.shelf_blades_dict.iteritems(): + type, mgmt_ip, username, password = dea.get_shelf_info(shelf) + dha = DeploymentHardwareAdapter(type, mgmt_ip, username, password) + dha.power_on_blades(shelf, blade_list) + + def set_boot_order(self, dea): + for shelf, blade_list in self.shelf_blades_dict.iteritems(): + type, mgmt_ip, username, password = dea.get_shelf_info(shelf) + dha = DeploymentHardwareAdapter(type, mgmt_ip, username, password) + dha.set_boot_order_blades(shelf, blade_list) + + def get_mac_addresses(self, dea, macs_yaml): + self.macs_per_shelf_dict = {} + for shelf, blade_list in self.shelf_blades_dict.iteritems(): + type, mgmt_ip, username, password = dea.get_shelf_info(shelf) + dha = DeploymentHardwareAdapter(type, mgmt_ip, username, password) + self.macs_per_shelf_dict[shelf] = dha.get_blades_mac_addresses( + shelf, blade_list) + + with io.open(macs_yaml, 'w') as stream: + yaml.dump(self.macs_per_shelf_dict, stream, + default_flow_style=False) + + def collect_blade_ids_per_shelves(self, dea): + self.shelf_blades_dict = dea.get_blade_ids_per_shelves() + + + +def main(): + + fuel_ip = '10.20.0.2' + fuel_username = 'root' + fuel_password = 'r00tme' + deploy_dir = '~/cloud' + + cloud = CloudDeploy(fuel_ip, fuel_username, fuel_password) + + base_dir = os.path.dirname(os.path.realpath(__file__)) + deployment_dir = base_dir + '/cloud' + macs_yaml = base_dir + '/macs.yaml' + dea_yaml = base_dir + '/dea.yaml' + check_file_exists(dea_yaml) + + cloud.copy_to_fuel_master(dir_path=deployment_dir) + cloud.copy_to_fuel_master(file_path=dea_yaml, target=deploy_dir) + + dea = DeploymentEnvironmentAdapter() + dea.parse_yaml(dea_yaml) + + cloud.collect_blade_ids_per_shelves(dea) + + cloud.power_off_blades(dea) + + cloud.set_boot_order(dea) + + cloud.power_on_blades(dea) + + cloud.get_mac_addresses(dea, macs_yaml) + check_file_exists(dea_yaml) + + cloud.copy_to_fuel_master(file_path=macs_yaml, target=deploy_dir) + + cloud.run_cloud_deploy(deploy_dir, 'deploy.py') + + +if __name__ == '__main__': + main() \ No newline at end of file diff --git a/fuel/deploy/cloud_deploy/hardware_adapters/__init__.py b/fuel/deploy/cloud_deploy/hardware_adapters/__init__.py new file mode 100644 index 0000000..c274feb --- /dev/null +++ b/fuel/deploy/cloud_deploy/hardware_adapters/__init__.py @@ -0,0 +1 @@ +__author__ = 'eszicse' diff --git a/fuel/deploy/dha.py b/fuel/deploy/cloud_deploy/hardware_adapters/dha.py similarity index 94% rename from fuel/deploy/dha.py rename to fuel/deploy/cloud_deploy/hardware_adapters/dha.py index 87ac6e2..2764aeb 100644 --- a/fuel/deploy/dha.py +++ b/fuel/deploy/cloud_deploy/hardware_adapters/dha.py @@ -1,4 +1,5 @@ -from hardware_adapters.hp.hp_adapter import HpAdapter +from hp.hp_adapter import HpAdapter +from libvirt.libvirt_adapter import LibvirtAdapter class DeploymentHardwareAdapter(object): def __new__(cls, server_type, *args): @@ -55,8 +56,6 @@ class EsxiAdapter(HardwareAdapter): def get_blade_mac_addresses(self, shelf, blade): return self.environment[shelf][blade]['mac'] -class LibvirtAdapter(HardwareAdapter): - pass class DellAdapter(HardwareAdapter): pass diff --git a/fuel/deploy/cloud_deploy/hardware_adapters/hp/__init__.py b/fuel/deploy/cloud_deploy/hardware_adapters/hp/__init__.py new file mode 100644 index 0000000..c274feb --- /dev/null +++ b/fuel/deploy/cloud_deploy/hardware_adapters/hp/__init__.py @@ -0,0 +1 @@ +__author__ = 'eszicse' diff --git a/fuel/deploy/hardware_adapters/hp/hp_adapter.py b/fuel/deploy/cloud_deploy/hardware_adapters/hp/hp_adapter.py similarity index 90% rename from fuel/deploy/hardware_adapters/hp/hp_adapter.py rename to fuel/deploy/cloud_deploy/hardware_adapters/hp/hp_adapter.py index 7ce0dc9..916d4dc 100644 --- a/fuel/deploy/hardware_adapters/hp/hp_adapter.py +++ b/fuel/deploy/cloud_deploy/hardware_adapters/hp/hp_adapter.py @@ -1,16 +1,11 @@ import re import time from netaddr import EUI, mac_unix -import logging +from cloud import common from run_oa_command import RunOACommand - -LOG = logging.getLogger(__name__) -out_hdlr = logging.FileHandler(__file__.split('.')[0] + '.log', mode='w') -out_hdlr.setFormatter(logging.Formatter('%(levelname)s: %(message)s')) -LOG.addHandler(out_hdlr) -LOG.setLevel(logging.DEBUG) +LOG = common.LOG class HpAdapter(object): @@ -66,8 +61,38 @@ class HpAdapter(object): right = EUI(right, dialect=self.mac_dhcp) return [str(left), str(right)] - def get_blade_hardware_info(self, shelf, blade=None): + def get_blades_mac_addresses(self, shelf, blade_list): + macs_per_blade_dict = {} + LOG.debug("Getting MAC addresses for shelf %s, blades %s" + % (shelf, blade_list)) + self.oa_error_message = '' + oa = RunOACommand(self.mgmt_ip, self.username, self.password) + LOG.debug("Connect to active OA for shelf %d" % shelf) + try: + res = oa.connect_to_active() + except: + raise self.InternalConnectError(oa.error_message) + if res is None: + raise self.InternalConnectError(oa.error_message) + if not oa.connected(): + raise self.NoInfoFoundError(oa.error_message) + try: + for blade in blade_list: + LOG.debug("Send command to OA: %s" % cmd) + cmd = ("show server info %s" % blade) + printout = oa.send_command(cmd) + left, right = self.find_mac(printout, shelf, blade) + left = EUI(left, dialect=self.mac_dhcp) + right = EUI(right, dialect=self.mac_dhcp) + macs_per_blade_dict[blade] = [str(left), str(right)] + except: + raise self.NoInfoFoundError(oa.error_message) + finally: + oa.close() + return macs_per_blade_dict + + def get_blade_hardware_info(self, shelf, blade=None): if blade: LOG.debug("Entering: get_hp_info(%d,%d)" % (shelf, blade)) else: @@ -118,6 +143,9 @@ class HpAdapter(object): def power_on_blades(self, shelf, blade_list): return self.set_state(shelf, 'unlocked', blade_list=blade_list) + def set_boot_order_blades(self, shelf, blade_list): + return self.set_boot_order(shelf, blade_list=blade_list) + def power_off_blade(self, shelf, blade): return self.set_state(shelf, 'locked', one_blade=blade) @@ -127,16 +155,11 @@ class HpAdapter(object): def set_boot_order_blade(self, shelf, blade): return self.set_boot_order(shelf, one_blade=blade) - def set_boot_order_blades(self, shelf, blade_list): - return self.set_boot_order(shelf, blade_list=blade_list) - - - # Search HP's OA server info for MAC for left and right control - def find_mac(self, serverinfo, shelf, blade): + def find_mac(self, printout, shelf, blade): left = False right = False - for line in serverinfo: + for line in printout: if ("No Server Blade Installed" in line or "Invalid Arguments" in line): raise self.NoInfoFoundError("Blade %d in shelf %d " @@ -160,7 +183,6 @@ class HpAdapter(object): # Return True to indicate that power state succesfully updated # state: locked, unlocked def set_state(self, shelf, state, one_blade=None, blade_list=None): - if state not in ['locked', 'unlocked']: return None diff --git a/fuel/deploy/hardware_adapters/hp/run_oa_command.py b/fuel/deploy/cloud_deploy/hardware_adapters/hp/run_oa_command.py similarity index 93% rename from fuel/deploy/hardware_adapters/hp/run_oa_command.py rename to fuel/deploy/cloud_deploy/hardware_adapters/hp/run_oa_command.py index 32135c3..36fac77 100644 --- a/fuel/deploy/hardware_adapters/hp/run_oa_command.py +++ b/fuel/deploy/cloud_deploy/hardware_adapters/hp/run_oa_command.py @@ -1,12 +1,9 @@ import socket import paramiko -import logging -LOG = logging.getLogger(__name__) -out_hdlr = logging.FileHandler(__file__.split('.')[0] + '.log', mode='w') -out_hdlr.setFormatter(logging.Formatter('%(levelname)s: %(message)s')) -LOG.addHandler(out_hdlr) -LOG.setLevel(logging.DEBUG) +from cloud import common + +LOG = common.LOG class RunOACommand: diff --git a/fuel/deploy/cloud_deploy/hardware_adapters/libvirt/__init__.py b/fuel/deploy/cloud_deploy/hardware_adapters/libvirt/__init__.py new file mode 100644 index 0000000..c274feb --- /dev/null +++ b/fuel/deploy/cloud_deploy/hardware_adapters/libvirt/__init__.py @@ -0,0 +1 @@ +__author__ = 'eszicse' diff --git a/fuel/deploy/cloud_deploy/hardware_adapters/libvirt/libvirt_adapter.py b/fuel/deploy/cloud_deploy/hardware_adapters/libvirt/libvirt_adapter.py new file mode 100644 index 0000000..d332e59 --- /dev/null +++ b/fuel/deploy/cloud_deploy/hardware_adapters/libvirt/libvirt_adapter.py @@ -0,0 +1,153 @@ +from lxml import etree +from cloud import common +from ssh_client import SSHClient + +exec_cmd = common.exec_cmd +err = common.err +LOG = common.LOG + + +class LibvirtAdapter(object): + + def __init__(self, mgmt_ip, username, password): + self.mgmt_ip = mgmt_ip + self.username = username + self.password = password + self.parser = etree.XMLParser(remove_blank_text=True) + + def power_off_blades(self, shelf, blade_list): + ssh = SSHClient(self.mgmt_ip, self.username, self.password) + ssh.open() + for blade in blade_list: + LOG.debug('Power off blade %s in shelf %s' % (blade, shelf)) + vm_name = 's%s_b%s' % (shelf, blade) + resp = ssh.execute('virsh destroy %s' % vm_name) + LOG.debug('response: %s' % resp) + ssh.close() + + def power_on_blades(self, shelf, blade_list): + ssh = SSHClient(self.mgmt_ip, self.username, self.password) + ssh.open() + for blade in blade_list: + LOG.debug('Power on blade %s in shelf %s' % (blade, shelf)) + vm_name = 's%s_b%s' % (shelf, blade) + resp = ssh.execute('virsh start %s' % vm_name) + LOG.debug('response: %s' % resp) + ssh.close() + + def set_boot_order_blades(self, shelf, blade_list, boot_dev_list=None): + if not boot_dev_list: + boot_dev_list = ['network', 'hd'] + ssh = SSHClient(self.mgmt_ip, self.username, self.password) + ssh.open() + temp_dir= ssh.execute('mktemp -d').strip() + for blade in blade_list: + LOG.debug('Set boot order %s on blade %s in shelf %s' + % (boot_dev_list, blade, shelf)) + vm_name = 's%s_b%s' % (shelf, blade) + resp = ssh.execute('virsh dumpxml %s' % vm_name) + xml_dump = etree.fromstring(resp, self.parser) + os = xml_dump.xpath('/domain/os') + for o in os: + for bootelem in ['boot', 'bootmenu']: + boot = o.xpath(bootelem) + for b in boot: + b.getparent().remove(b) + for dev in boot_dev_list: + b = etree.Element('boot') + b.set('dev', dev) + o.append(b) + bmenu = etree.Element('bootmenu') + bmenu.set('enable', 'no') + o.append(bmenu) + tree = etree.ElementTree(xml_dump) + xml_file = temp_dir + '/%s.xml' % vm_name + with open(xml_file, 'w') as f: + tree.write(f, pretty_print=True, xml_declaration=True) + ssh.execute('virsh define %s' % xml_file) + ssh.execute('rm -fr %s' % temp_dir) + ssh.close() + + def get_blades_mac_addresses(self, shelf, blade_list): + LOG.debug('Get the MAC addresses of blades %s in shelf %s' + % (blade_list, shelf)) + macs_per_blade_dict = {} + ssh = SSHClient(self.mgmt_ip, self.username, self.password) + ssh.open() + for blade in blade_list: + vm_name = 's%s_b%s' % (shelf, blade) + mac_list = macs_per_blade_dict[blade] = [] + resp = ssh.execute('virsh dumpxml %s' % vm_name) + xml_dump = etree.fromstring(resp) + interfaces = xml_dump.xpath('/domain/devices/interface') + for interface in interfaces: + macs = interface.xpath('mac') + for mac in macs: + mac_list.append(mac.get('address')) + ssh.close() + return macs_per_blade_dict + + def load_image_file(self, shelf=None, blade=None, vm=None, + image_path=None): + if shelf and blade: + vm_name = 's%s_b%s' % (shelf, blade) + else: + vm_name = vm + + LOG.debug('Load media file %s into %s ' + % (image_path, 'vm %s' % vm if vm else 'blade %s in shelf %s' + % (shelf, blade))) + + ssh = SSHClient(self.mgmt_ip, self.username, self.password) + ssh.open() + temp_dir= ssh.execute('mktemp -d').strip() + resp = ssh.execute('virsh dumpxml %s' % vm_name) + xml_dump = etree.fromstring(resp) + + disks = xml_dump.xpath('/domain/devices/disk') + for disk in disks: + if disk.get('device') == 'cdrom': + disk.set('type', 'file') + sources = disk.xpath('source') + for source in sources: + disk.remove(source) + source = etree.SubElement(disk, 'source') + source.set('file', image_path) + tree = etree.ElementTree(xml_dump) + xml_file = temp_dir + '/%s.xml' % vm_name + with open(xml_file, 'w') as f: + tree.write(f, pretty_print=True, xml_declaration=True) + ssh.execute('virsh define %s' % xml_file) + ssh.execute('rm -fr %s' % temp_dir) + ssh.close() + + def eject_image_file(self, shelf=None, blade=None, vm=None): + if shelf and blade: + vm_name = 's%s_b%s' % (shelf, blade) + else: + vm_name = vm + + LOG.debug('Eject media file from %s ' + % 'vm %s' % vm if vm else 'blade %s in shelf %s' + % (shelf, blade)) + + ssh = SSHClient(self.mgmt_ip, self.username, self.password) + ssh.open() + temp_dir= ssh.execute('mktemp -d').strip() + resp = ssh.execute('virsh dumpxml %s' % vm_name) + xml_dump = etree.fromstring(resp) + + disks = xml_dump.xpath('/domain/devices/disk') + for disk in disks: + if disk.get('device') == 'cdrom': + disk.set('type', 'block') + sources = disk.xpath('source') + for source in sources: + disk.remove(source) + tree = etree.ElementTree(xml_dump) + xml_file = temp_dir + '/%s.xml' % vm_name + with open(xml_file, 'w') as f: + tree.write(f, pretty_print=True, xml_declaration=True) + ssh.execute('virsh define %s' % xml_file) + ssh.execute('rm -fr %s' % temp_dir) + ssh.close() diff --git a/fuel/deploy/cloud_deploy/ssh_client.py b/fuel/deploy/cloud_deploy/ssh_client.py new file mode 100644 index 0000000..b9aad6c --- /dev/null +++ b/fuel/deploy/cloud_deploy/ssh_client.py @@ -0,0 +1,56 @@ +import paramiko +from cloud import common + +TIMEOUT = 600 +LOG = common.LOG + +class SSHClient(object): + + def __init__(self, host, username, password): + self.host = host + self.username = username + self.password = password + self.client = None + + def open(self, timeout=TIMEOUT): + self.client = paramiko.SSHClient() + self.client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + self.client.connect(self.host, username=self.username, + password=self.password, timeout=timeout) + + def close(self): + if self.client is not None: + self.client.close() + self.client = None + + def execute(self, command, sudo=False, timeout=TIMEOUT): + if sudo and self.username != 'root': + command = "sudo -S -p '' %s" % command + stdin, stdout, stderr = self.client.exec_command(command, + timeout=timeout) + if sudo: + stdin.write(self.password + '\n') + stdin.flush() + return ''.join(''.join(stderr.readlines()) + + ''.join(stdout.readlines())) + + def run(self, command): + transport = self.client.get_transport() + transport.set_keepalive(1) + chan = transport.open_session() + chan.exec_command(command) + + while not chan.exit_status_ready(): + if chan.recv_ready(): + data = chan.recv(1024) + while data: + print data + data = chan.recv(1024) + + if chan.recv_stderr_ready(): + error_buff = chan.recv_stderr(1024) + while error_buff: + print error_buff + error_buff = chan.recv_stderr(1024) + exit_status = chan.recv_exit_status() + LOG.debug('Exit status %s' % exit_status) \ No newline at end of file diff --git a/fuel/deploy/common.py b/fuel/deploy/common.py deleted file mode 100644 index cd5085c..0000000 --- a/fuel/deploy/common.py +++ /dev/null @@ -1,29 +0,0 @@ -import subprocess -import sys - - -N = {'id': 0, 'status': 1, 'name': 2, 'cluster': 3, 'ip': 4, 'mac': 5, - 'roles': 6, 'pending_roles': 7, 'online': 8} -E = {'id': 0, 'status': 1, 'name': 2, 'mode': 3, 'release_id': 4, - 'changes': 5, 'pending_release_id': 6} -R = {'id': 0, 'name': 1, 'state': 2, 'operating_system': 3, 'version': 4} -RO = {'name': 0, 'conflicts': 1} - -def exec_cmd(cmd): - process = subprocess.Popen(cmd, - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, - shell=True) - return process.communicate()[0] - -def parse(printout): - parsed_list = [] - lines = printout.splitlines() - for l in lines[2:]: - parsed = [e.strip() for e in l.split('|')] - parsed_list.append(parsed) - return parsed_list - -def err(error_message): - sys.stderr.write(error_message) - sys.exit(1) diff --git a/fuel/deploy/configure_environment.py b/fuel/deploy/configure_environment.py deleted file mode 100644 index 9aca904..0000000 --- a/fuel/deploy/configure_environment.py +++ /dev/null @@ -1,70 +0,0 @@ -import common -import os -import shutil -import yaml - - -from configure_settings import ConfigureSettings -from configure_network import ConfigureNetwork - -N = common.N -E = common.E -R = common.R -RO = common.RO -exec_cmd = common.exec_cmd -parse = common.parse -err = common.err - -class ConfigureEnvironment(object): - - def __init__(self, dea, yaml_config_dir): - self.env_id = None - self.dea = dea - self.yaml_config_dir = yaml_config_dir - self.env_name = dea.get_environment_name() - - def env_exists(self, env_name): - env_list = parse(exec_cmd('fuel env --list')) - for env in env_list: - if env[E['name']] == env_name and env[E['status']] == 'new': - return True - return False - - def get_env_id(self, env_name): - env_list = parse(exec_cmd('fuel env --list')) - for env in env_list: - if env[E['name']] == env_name: - return env[E['id']] - - def configure_environment(self, dea): - exec_cmd('fuel env -c --name %s --release %s --mode ha --net neutron ' - '--nst vlan' % (self.env_name, - self.supported_release[R['id']])) - - self.env_id = self.get_env_id(self.env_name) - if not self.env_exists(self.env_name): - err("Failed to create environment %s" % self.env_name) - - self.config_settings() - self.config_network() - - def config_settings(self): - if os.path.exists(self.yaml_config_dir): - shutil.rmtree(self.yaml_config_dir) - os.makedirs(self.yaml_config_dir) - - settings = ConfigureSettings(self.yaml_config_dir, self.env_id) - settings.config_settings() - - - def config_network(self): - network_yaml=self.yaml_config_dir + '/network_%s.yaml' % self.env_id - os.remove(network_yaml) - - network = ConfigureNetwork(self.yaml_config_dir, network_yaml, - self.env_id, self.dea) - network.config_network() - - - - diff --git a/fuel/deploy/configure_network.py b/fuel/deploy/configure_network.py deleted file mode 100644 index 0b298e5..0000000 --- a/fuel/deploy/configure_network.py +++ /dev/null @@ -1,91 +0,0 @@ -import common -import os -import yaml -import io -import re - -N = common.N -E = common.E -R = common.R -RO = common.RO -exec_cmd = common.exec_cmd -parse = common.parse -err = common.err - -P1 = re.compile('!\s.*') - -class ConfigureNetwork(object): - - def __init__(self, yaml_config_dir, network_yaml, env_id, dea): - self.yaml_config_dir = yaml_config_dir - self.network_yaml = network_yaml - self.env_id = env_id - self.dea = dea - - def download_settings(self): - exec_cmd('fuel network --env %s --download --dir %s' - % (self.env_id, self.yaml_config_dir)) - - def upload_settings(self): - exec_cmd('fuel network --env %s --upload --dir %s' - % (self.env_id, self.yaml_config_dir)) - - def config_network(self): - - self.download_settings() - - self.apply_network_config() - - self.upload_settings() - - self.verify() - - def apply_network_config(self): - - with io.open(self.network_yaml) as stream: - network_config = yaml.load(stream) - networks = network_config['networks'] - - net = self.dea.get_networks() - net['fuelweb_admin'] = net['management'] - if 'vlan' in net['fuelweb_admin']: - del net['fuelweb_admin']['vlan'] - del net['management'] - net_names = [n for n in net.iterkeys()] - - for i in range(len(networks)): - if networks[i]['name'] == 'management': - networks = networks[:i] + networks[i+1:] - network_config['networks'] = networks - break - - for network in networks: - name = network['name'] - if name in net_names: - if ('vlan' in net[name] and net[name]['vlan'] is not None): - network['vlan_start'] = net[name]['vlan'] - network['cidr'] = net[name]['cidr'] - network['ip_ranges'][0][0] = net[name]['start'] - network['ip_ranges'][0][1] = net[name]['end'] - - with io.open(self.network_yaml, 'w') as stream: - yaml.dump(network_config, stream, default_flow_style=False) - - def verify(self): - ret = exec_cmd('mktemp -d') - temp_dir = ret.splitlines()[0] - - exec_cmd('fuel network --env %s --download --dir %s' - % (self.env_id, temp_dir)) - - ret = exec_cmd('diff -C0 %s %s' - % (self.network_yaml, - temp_dir + '/network_%s.yaml' % self.env_id)) - diff_list = [] - for l in ret.splitlines(): - m = P1.match(l) - if m and '_vip' not in l: - diff_list.append(l) - if diff_list: - err('Uploaded network yaml rejected by Fuel\n') - \ No newline at end of file diff --git a/fuel/deploy/configure_settings.py b/fuel/deploy/configure_settings.py deleted file mode 100644 index cdeea49..0000000 --- a/fuel/deploy/configure_settings.py +++ /dev/null @@ -1,88 +0,0 @@ -import common -import os -import yaml -import io -import re - -N = common.N -E = common.E -R = common.R -RO = common.RO -exec_cmd = common.exec_cmd -parse = common.parse -err = common.err - -class ConfigureSettings(object): - - def __init__(self, yaml_config_dir, env_id): - self.yaml_config_dir = yaml_config_dir - self.env_id = env_id - - def download_settings(self): - exec_cmd('fuel --env %s settings --download' % self.env_id) - - def upload_settings(self): - exec_cmd('fuel --env %s settings --upload' % self.env_id) - - - def config_settings(self): - self.download_settings() - self.modify_settings() - self.upload_settings() - - # Fix console speed - def fix_console_speed(data): - # First remove all console= from the kernel cmdline - cmdline = data["editable"]["kernel_params"]["kernel"]["value"] - pat = re.compile(r"console=[\w,]+\s+") - repl = 1 - while repl != 0: - cmdline, repl = pat.subn("", cmdline) - - # Then add the console info we want - cmdline = re.sub(r"^", "console=tty0 console=ttyS0,115200 ", cmdline) - data["editable"]["kernel_params"]["kernel"]["value"] = cmdline - - # Initialize kernel audit - def initialize_kernel_audit(data): - cmdline = data["editable"]["kernel_params"]["kernel"]["value"] - cmdline = "audit=1 " + cmdline - data["editable"]["kernel_params"]["kernel"]["value"] = cmdline - - # Add crashkernel parameter to boot parameters. W/o this we can't - # make crash dumps after initial deploy. Standard grub setup will add - # crashkernel= options - with bad values but that is another issue - but - # that only enables crash dumps after first reboot - def add_crashkernel_support(data): - cmdline = data["editable"]["kernel_params"]["kernel"]["value"] - cmdline += " crashkernel=256M" - data["editable"]["kernel_params"]["kernel"]["value"] = cmdline - - - def modify_settings(self): - - filename = "%s/settings_%d.yaml" % (self.yaml_config_dir, self.env_id) - if not os.path.isfile(filename): - err("Failed to find %s\n" % filename) - - with io.open(filename) as stream: - data = yaml.load(stream) - - self.fix_console_speed(data) - - self.initialize_kernel_audit(data) - - self.add_crashkernel_support(data) - - # Make sure we have the correct libvirt type - data["editable"]["common"]["libvirt_type"]["value"] = "kvm" - - - # Save the settings into the file from which we loaded them - with io.open(filename, "w") as stream: - yaml.dump(data, stream, default_flow_style=False) - - - - - diff --git a/fuel/deploy/dea.yaml b/fuel/deploy/dea.yaml index 420dae7..b83ddea 100644 --- a/fuel/deploy/dea.yaml +++ b/fuel/deploy/dea.yaml @@ -1,37 +1,947 @@ --- name: ENV-1 -server: - type: hp - mgmt_ip: 10.118.32.197 - username: opnfv - password: E///@work shelf: - id: 1 + type: libvirt + mgmt_ip: 10.20.0.1 + username: user + password: systemabc blade: - id: 1 - role: controller + roles: + - controller - id: 2 + roles: + - controller - id: 3 - role: controller + roles: + - controller - id: 4 - id: 5 - id: 6 -network: - - name: management - cidr: 192.168.0.0/24 - start: 192.168.0.1 - end: 192.168.0.253 - - name: private - vlan: - cidr: 192.168.11.0/24 - start: 192.168.11.1 - end: 192.168.11.253 - - name: storage - vlan: - cidr: 192.168.12.0/24 - start: 192.168.12.1 - end: 192.168.12.253 - - name: public - vlan: +networks: + management_vip: 192.168.0.2 + networking_parameters: + base_mac: fa:16:3e:00:00:00 + dns_nameservers: + - 8.8.4.4 + - 8.8.8.8 + floating_ranges: + - - 172.16.0.130 + - 172.16.0.254 + gre_id_range: + - 2 + - 65535 + internal_cidr: 192.168.111.0/24 + internal_gateway: 192.168.111.1 + net_l23_provider: ovs + segmentation_type: vlan + vlan_range: + - 1000 + - 1200 + networks: + - cidr: 172.16.0.0/24 + gateway: 172.16.0.1 + ip_ranges: + - - 172.16.0.2 + - 172.16.0.126 + meta: + assign_vip: true + cidr: 172.16.0.0/24 + configurable: true + floating_range_var: floating_ranges + ip_range: + - 172.16.0.2 + - 172.16.0.126 + map_priority: 1 + name: public + notation: ip_ranges + render_addr_mask: public + render_type: null + use_gateway: true + vlan_start: null + name: public + vlan_start: null + - cidr: null + gateway: null + ip_ranges: [] + meta: + assign_vip: false + configurable: false + map_priority: 2 + name: private + neutron_vlan_range: true + notation: null + render_addr_mask: null + render_type: null + seg_type: vlan + use_gateway: false + vlan_start: null + name: private + vlan_start: null + - cidr: 192.168.0.0/24 + gateway: null + ip_ranges: + - - 192.168.0.2 + - 192.168.0.254 + meta: + assign_vip: true + cidr: 192.168.0.0/24 + configurable: true + map_priority: 2 + name: management + notation: cidr + render_addr_mask: internal + render_type: cidr + use_gateway: false + vlan_start: 101 + name: management + vlan_start: 101 + - cidr: 192.168.1.0/24 + gateway: null + ip_ranges: + - - 192.168.1.2 + - 192.168.1.254 + meta: + assign_vip: false + cidr: 192.168.1.0/24 + configurable: true + map_priority: 2 + name: storage + notation: cidr + render_addr_mask: storage + render_type: cidr + use_gateway: false + vlan_start: 102 + name: storage + vlan_start: 102 + - cidr: 10.20.0.0/24 + gateway: null + ip_ranges: + - - 10.20.0.3 + - 10.20.0.254 + meta: + assign_vip: false + configurable: false + map_priority: 0 + notation: ip_ranges + render_addr_mask: null + render_type: null + unmovable: true + use_gateway: true + name: fuelweb_admin + vlan_start: null + public_vip: 172.16.0.2 +controller: +- action: add-br + name: br-eth0 +- action: add-port + bridge: br-eth0 + name: eth0 +- action: add-br + name: br-eth1 +- action: add-port + bridge: br-eth1 + name: eth1 +- action: add-br + name: br-eth2 +- action: add-port + bridge: br-eth2 + name: eth2 +- action: add-br + name: br-eth3 +- action: add-port + bridge: br-eth3 + name: eth3 +- action: add-br + name: br-ex +- action: add-br + name: br-mgmt +- action: add-br + name: br-storage +- action: add-br + name: br-fw-admin +- action: add-patch + bridges: + - br-eth1 + - br-storage + tags: + - 102 + - 0 + vlan_ids: + - 102 + - 0 +- action: add-patch + bridges: + - br-eth0 + - br-mgmt + tags: + - 101 + - 0 + vlan_ids: + - 101 + - 0 +- action: add-patch + bridges: + - br-eth0 + - br-fw-admin + trunks: + - 0 +- action: add-patch + bridges: + - br-eth3 + - br-ex + trunks: + - 0 +- action: add-br + name: br-prv +- action: add-patch + bridges: + - br-eth2 + - br-prv +compute: +- action: add-br + name: br-eth0 +- action: add-port + bridge: br-eth0 + name: eth0 +- action: add-br + name: br-eth1 +- action: add-port + bridge: br-eth1 + name: eth1 +- action: add-br + name: br-eth2 +- action: add-port + bridge: br-eth2 + name: eth2 +- action: add-br + name: br-eth3 +- action: add-port + bridge: br-eth3 + name: eth3 +- action: add-br + name: br-mgmt +- action: add-br + name: br-storage +- action: add-br + name: br-fw-admin +- action: add-patch + bridges: + - br-eth1 + - br-storage + tags: + - 102 + - 0 + vlan_ids: + - 102 + - 0 +- action: add-patch + bridges: + - br-eth0 + - br-mgmt + tags: + - 101 + - 0 + vlan_ids: + - 101 + - 0 +- action: add-patch + bridges: + - br-eth0 + - br-fw-admin + trunks: + - 0 +- action: add-br + name: br-prv +- action: add-patch + bridges: + - br-eth2 + - br-prv +interfaces: + eth0: + - fuelweb_admin + - management + eth1: + - storage + eth2: + - private + eth3: + - public +settings: + editable: + access: + email: + description: Email address for Administrator + label: email + type: text + value: admin@localhost + weight: 40 + metadata: + label: Access + weight: 10 + password: + description: Password for Administrator + label: password + type: password + value: admin + weight: 20 + tenant: + description: Tenant (project) name for Administrator + label: tenant + regex: + error: Invalid tenant name + source: ^(?!services$)(?!nova$)(?!glance$)(?!keystone$)(?!neutron$)(?!cinder$)(?!swift$)(?!ceph$)(?![Gg]uest$).* + type: text + value: admin + weight: 30 + user: + description: Username for Administrator + label: username + regex: + error: Invalid username + source: ^(?!services$)(?!nova$)(?!glance$)(?!keystone$)(?!neutron$)(?!cinder$)(?!swift$)(?!ceph$)(?![Gg]uest$).* + type: text + value: admin + weight: 10 + additional_components: + ceilometer: + description: If selected, Ceilometer component will be installed + label: Install Ceilometer + type: checkbox + value: true + weight: 40 + heat: + description: '' + label: '' + type: hidden + value: true + weight: 30 + metadata: + label: Additional Components + weight: 20 + murano: + description: If selected, Murano component will be installed + label: Install Murano + restrictions: + - cluster:net_provider != 'neutron' + type: checkbox + value: false + weight: 20 + sahara: + description: If selected, Sahara component will be installed + label: Install Sahara + type: checkbox + value: false + weight: 10 + common: + auth_key: + description: Public key(s) to include in authorized_keys on deployed nodes + label: Public Key + type: text + value: '' + weight: 70 + auto_assign_floating_ip: + description: If selected, OpenStack will automatically assign a floating IP + to a new instance + label: Auto assign floating IP + restrictions: + - cluster:net_provider == 'neutron' + type: checkbox + value: false + weight: 40 + compute_scheduler_driver: + label: Scheduler driver + type: radio + value: nova.scheduler.filter_scheduler.FilterScheduler + values: + - data: nova.scheduler.filter_scheduler.FilterScheduler + description: Currently the most advanced OpenStack scheduler. See the OpenStack + documentation for details. + label: Filter scheduler + - data: nova.scheduler.simple.SimpleScheduler + description: This is 'naive' scheduler which tries to find the least loaded + host + label: Simple scheduler + weight: 40 + debug: + description: Debug logging mode provides more information, but requires more + disk space. + label: OpenStack debug logging + type: checkbox + value: false + weight: 20 + disable_offload: + description: If set, generic segmentation offload (gso) and generic receive + offload (gro) on physical nics will be disabled. See ethtool man. + label: Disable generic offload on physical nics + restrictions: + - action: hide + condition: cluster:net_provider == 'neutron' and networking_parameters:segmentation_type + == 'gre' + type: checkbox + value: true + weight: 80 + libvirt_type: + label: Hypervisor type + type: radio + value: kvm + values: + - data: kvm + description: Choose this type of hypervisor if you run OpenStack on hardware + label: KVM + restrictions: + - settings:common.libvirt_type.value == 'vcenter' + - data: qemu + description: Choose this type of hypervisor if you run OpenStack on virtual + hosts. + label: QEMU + restrictions: + - settings:common.libvirt_type.value == 'vcenter' + - data: vcenter + description: Choose this type of hypervisor if you run OpenStack in a vCenter + environment. + label: vCenter + restrictions: + - settings:common.libvirt_type.value != 'vcenter' or cluster:net_provider + == 'neutron' + weight: 30 + metadata: + label: Common + weight: 30 + nova_quota: + description: Quotas are used to limit CPU and memory usage for tenants. Enabling + quotas will increase load on the Nova database. + label: Nova quotas + type: checkbox + value: false + weight: 25 + resume_guests_state_on_host_boot: + description: Whether to resume previous guests state when the host reboots. + If enabled, this option causes guests assigned to the host to resume their + previous state. If the guest was running a restart will be attempted when + nova-compute starts. If the guest was not running previously, a restart + will not be attempted. + label: Resume guests state on host boot + type: checkbox + value: false + weight: 60 + use_cow_images: + description: For most cases you will want qcow format. If it's disabled, raw + image format will be used to run VMs. OpenStack with raw format currently + does not support snapshotting. + label: Use qcow format for images + type: checkbox + value: true + weight: 50 + corosync: + group: + description: '' + label: Group + type: text + value: 226.94.1.1 + weight: 10 + metadata: + label: Corosync + restrictions: + - action: hide + condition: 'true' + weight: 50 + port: + description: '' + label: Port + type: text + value: '12000' + weight: 20 + verified: + description: Set True only if multicast is configured correctly on router. + label: Need to pass network verification. + type: checkbox + value: false + weight: 10 + external_dns: + dns_list: + description: List of upstream DNS servers, separated by comma + label: DNS list + type: text + value: 8.8.8.8, 8.8.4.4 + weight: 10 + metadata: + label: Upstream DNS + weight: 90 + external_ntp: + metadata: + label: Upstream NTP + weight: 100 + ntp_list: + description: List of upstream NTP servers, separated by comma + label: NTP servers list + type: text + value: 0.pool.ntp.org, 1.pool.ntp.org + weight: 10 + kernel_params: + kernel: + description: Default kernel parameters + label: Initial parameters + type: text + value: console=ttyS0,9600 console=tty0 rootdelay=90 nomodeset + weight: 45 + metadata: + label: Kernel parameters + weight: 40 + neutron_mellanox: + metadata: + enabled: true + label: Mellanox Neutron components + toggleable: false + weight: 50 + plugin: + label: Mellanox drivers and SR-IOV plugin + type: radio + value: disabled + values: + - data: disabled + description: If selected, Mellanox drivers, Neutron and Cinder plugin will + not be installed. + label: Mellanox drivers and plugins disabled + restrictions: + - settings:storage.iser.value == true + - data: drivers_only + description: If selected, Mellanox Ethernet drivers will be installed to + support networking over Mellanox NIC. Mellanox Neutron plugin will not + be installed. + label: Install only Mellanox drivers + restrictions: + - settings:common.libvirt_type.value != 'kvm' + - data: ethernet + description: If selected, both Mellanox Ethernet drivers and Mellanox network + acceleration (Neutron) plugin will be installed. + label: Install Mellanox drivers and SR-IOV plugin + restrictions: + - settings:common.libvirt_type.value != 'kvm' or not (cluster:net_provider + == 'neutron' and networking_parameters:segmentation_type == 'vlan') + weight: 60 + vf_num: + description: Note that one virtual function will be reserved to the storage + network, in case of choosing iSER. + label: Number of virtual NICs + restrictions: + - settings:neutron_mellanox.plugin.value != 'ethernet' + type: text + value: '16' + weight: 70 + nsx_plugin: + connector_type: + description: Default network transport type to use + label: NSX connector type + type: select + value: stt + values: + - data: gre + label: GRE + - data: ipsec_gre + label: GRE over IPSec + - data: stt + label: STT + - data: ipsec_stt + label: STT over IPSec + - data: bridge + label: Bridge + weight: 80 + l3_gw_service_uuid: + description: UUID for the default L3 gateway service to use with this cluster + label: L3 service UUID + regex: + error: Invalid L3 gateway service UUID + source: '[a-f\d]{8}-[a-f\d]{4}-[a-f\d]{4}-[a-f\d]{4}-[a-f\d]{12}' + type: text + value: '' + weight: 50 + metadata: + enabled: false + label: VMware NSX + restrictions: + - action: hide + condition: cluster:net_provider != 'neutron' or networking_parameters:net_l23_provider + != 'nsx' + weight: 20 + nsx_controllers: + description: One or more IPv4[:port] addresses of NSX controller node, separated + by comma (e.g. 10.30.30.2,192.168.110.254:443) + label: NSX controller endpoint + regex: + error: Invalid controller endpoints, specify valid IPv4[:port] pair + source: ^(([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])\.){3}([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])(:(6553[0-5]|655[0-2][\d]|65[0-4][\d]{2}|6[0-4][\d]{3}|5[\d]{4}|[\d][\d]{0,3}))?(,(([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])\.){3}([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])(:(6553[0-5]|655[0-2][\d]|65[0-4][\d]{2}|6[0-4][\d]{3}|5[\d]{4}|[\d][\d]{0,3}))?)*$ + type: text + value: '' + weight: 60 + nsx_password: + description: Password for Administrator + label: NSX password + regex: + error: Empty password + source: \S + type: password + value: '' + weight: 30 + nsx_username: + description: NSX administrator's username + label: NSX username + regex: + error: Empty username + source: \S + type: text + value: admin + weight: 20 + packages_url: + description: URL to NSX specific packages + label: URL to NSX bits + regex: + error: Invalid URL, specify valid HTTP/HTTPS URL with IPv4 address (e.g. + http://10.20.0.2/nsx) + source: ^https?://(([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])\.){3}([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])(:(6553[0-5]|655[0-2][\d]|65[0-4][\d]{2}|6[0-4][\d]{3}|5[\d]{4}|[\d][\d]{0,3}))?(/.*)?$ + type: text + value: '' + weight: 70 + replication_mode: + description: '' + label: NSX cluster has Service nodes + type: checkbox + value: true + weight: 90 + transport_zone_uuid: + description: UUID of the pre-existing default NSX Transport zone + label: Transport zone UUID + regex: + error: Invalid transport zone UUID + source: '[a-f\d]{8}-[a-f\d]{4}-[a-f\d]{4}-[a-f\d]{4}-[a-f\d]{12}' + type: text + value: '' + weight: 40 + provision: + metadata: + label: Provision + restrictions: + - action: hide + condition: not ('experimental' in version:feature_groups) + weight: 80 + method: + description: Which provision method to use for this cluster. + label: Provision method + type: radio + value: cobbler + values: + - data: image + description: Copying pre-built images on a disk. + label: Image + - data: cobbler + description: Install from scratch using anaconda or debian-installer. + label: Classic (use anaconda or debian-installer) + public_network_assignment: + assign_to_all_nodes: + description: When disabled, public network will be assigned to controllers + and zabbix-server only + label: Assign public network to all nodes + type: checkbox + value: false + weight: 10 + metadata: + label: Public network assignment + restrictions: + - action: hide + condition: cluster:net_provider != 'neutron' + weight: 50 + storage: + ephemeral_ceph: + description: Configures Nova to store ephemeral volumes in RBD. This works + best if Ceph is enabled for volumes and images, too. Enables live migration + of all types of Ceph backed VMs (without this option, live migration will + only work with VMs launched from Cinder volumes). + label: Ceph RBD for ephemeral volumes (Nova) + restrictions: + - settings:common.libvirt_type.value == 'vcenter' + type: checkbox + value: false + weight: 75 + images_ceph: + description: Configures Glance to use the Ceph RBD backend to store images. + If enabled, this option will prevent Swift from installing. + label: Ceph RBD for images (Glance) + type: checkbox + value: false + weight: 30 + images_vcenter: + description: Configures Glance to use the vCenter/ESXi backend to store images. + If enabled, this option will prevent Swift from installing. + label: VMWare vCenter/ESXi datastore for images (Glance) + restrictions: + - settings:common.libvirt_type.value != 'vcenter' + type: checkbox + value: false + weight: 35 + iser: + description: 'High performance block storage: Cinder volumes over iSER protocol + (iSCSI over RDMA). This feature requires SR-IOV capabilities in the NIC, + and will use a dedicated virtual function for the storage network.' + label: iSER protocol for volumes (Cinder) + restrictions: + - settings:storage.volumes_lvm.value != true or settings:common.libvirt_type.value + != 'kvm' + type: checkbox + value: false + weight: 11 + metadata: + label: Storage + weight: 60 + objects_ceph: + description: Configures RadosGW front end for Ceph RBD. This exposes S3 and + Swift API Interfaces. If enabled, this option will prevent Swift from installing. + label: Ceph RadosGW for objects (Swift API) + restrictions: + - settings:storage.images_ceph.value == false + type: checkbox + value: false + weight: 80 + osd_pool_size: + description: Configures the default number of object replicas in Ceph. This + number must be equal to or lower than the number of deployed 'Storage - + Ceph OSD' nodes. + label: Ceph object replication factor + regex: + error: Invalid number + source: ^[1-9]\d*$ + restrictions: + - settings:common.libvirt_type.value == 'vcenter' + type: text + value: '2' + weight: 85 + vc_datacenter: + description: Inventory path to a datacenter. If you want to use ESXi host + as datastore, it should be "ha-datacenter". + label: Datacenter name + regex: + error: Empty datacenter + source: \S + restrictions: + - action: hide + condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value + != 'vcenter' + type: text + value: '' + weight: 65 + vc_datastore: + description: Datastore associated with the datacenter. + label: Datastore name + regex: + error: Empty datastore + source: \S + restrictions: + - action: hide + condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value + != 'vcenter' + type: text + value: '' + weight: 60 + vc_host: + description: IP Address of vCenter/ESXi + label: vCenter/ESXi IP + regex: + error: Specify valid IPv4 address + source: ^(([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])\.){3}([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])$ + restrictions: + - action: hide + condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value + != 'vcenter' + type: text + value: '' + weight: 45 + vc_image_dir: + description: The name of the directory where the glance images will be stored + in the VMware datastore. + label: Datastore Images directory + regex: + error: Empty images directory + source: \S + restrictions: + - action: hide + condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value + != 'vcenter' + type: text + value: /openstack_glance + weight: 70 + vc_password: + description: vCenter/ESXi admin password + label: Password + regex: + error: Empty password + source: \S + restrictions: + - action: hide + condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value + != 'vcenter' + type: password + value: '' + weight: 55 + vc_user: + description: vCenter/ESXi admin username + label: Username + regex: + error: Empty username + source: \S + restrictions: + - action: hide + condition: settings:storage.images_vcenter.value == false or settings:common.libvirt_type.value + != 'vcenter' + type: text + value: '' + weight: 50 + volumes_ceph: + description: Configures Cinder to store volumes in Ceph RBD images. + label: Ceph RBD for volumes (Cinder) + restrictions: + - settings:storage.volumes_lvm.value == true or settings:common.libvirt_type.value + == 'vcenter' + type: checkbox + value: false + weight: 20 + volumes_lvm: + description: Requires at least one Storage - Cinder LVM node. + label: Cinder LVM over iSCSI for volumes + restrictions: + - settings:storage.volumes_ceph.value == true + type: checkbox + value: false + weight: 10 + volumes_vmdk: + description: Configures Cinder to store volumes via VMware vCenter. + label: VMware vCenter for volumes (Cinder) + restrictions: + - settings:common.libvirt_type.value != 'vcenter' or settings:storage.volumes_lvm.value + == true + type: checkbox + value: false + weight: 15 + syslog: + metadata: + label: Syslog + weight: 50 + syslog_port: + description: Remote syslog port + label: Port + regex: + error: Invalid Syslog port + source: ^([1-9][0-9]{0,3}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$ + type: text + value: '514' + weight: 20 + syslog_server: + description: Remote syslog hostname + label: Hostname + type: text + value: '' + weight: 10 + syslog_transport: + label: Syslog transport protocol + type: radio + value: tcp + values: + - data: udp + description: '' + label: UDP + - data: tcp + description: '' + label: TCP + weight: 30 + vcenter: + cluster: + description: vCenter cluster name. If you have multiple clusters, use comma + to separate names + label: Cluster + regex: + error: Invalid cluster list + source: ^([^,\ ]+([\ ]*[^,\ ])*)(,[^,\ ]+([\ ]*[^,\ ])*)*$ + type: text + value: '' + weight: 40 + datastore_regex: + description: The Datastore regexp setting specifies the data stores to use + with Compute. For example, "nas.*". If you want to use all available datastores, + leave this field blank + label: Datastore regexp + regex: + error: Invalid datastore regexp + source: ^(\S.*\S|\S|)$ + type: text + value: '' + weight: 50 + host_ip: + description: IP Address of vCenter + label: vCenter IP + regex: + error: Specify valid IPv4 address + source: ^(([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])\.){3}([\d]|[1-9][\d]|1[\d]{2}|2[0-4][\d]|25[0-5])$ + type: text + value: '' + weight: 10 + metadata: + label: vCenter + restrictions: + - action: hide + condition: settings:common.libvirt_type.value != 'vcenter' + weight: 20 + use_vcenter: + description: '' + label: '' + type: hidden + value: true + weight: 5 + vc_password: + description: vCenter admin password + label: Password + regex: + error: Empty password + source: \S + type: password + value: '' + weight: 30 + vc_user: + description: vCenter admin username + label: Username + regex: + error: Empty username + source: \S + type: text + value: '' + weight: 20 + vlan_interface: + description: Physical ESXi host ethernet adapter for VLAN networking (e.g. + vmnic1). If empty "vmnic0" is used by default + label: ESXi VLAN interface + restrictions: + - action: hide + condition: cluster:net_provider != 'nova_network' or networking_parameters:net_manager + != 'VlanManager' + type: text + value: '' + weight: 60 + zabbix: + metadata: + label: Zabbix Access + restrictions: + - action: hide + condition: not ('experimental' in version:feature_groups) + weight: 70 + password: + description: Password for Zabbix Administrator + label: password + type: password + value: zabbix + weight: 20 + username: + description: Username for Zabbix Administrator + label: username + type: text + value: admin + weight: 10 ... diff --git a/fuel/deploy/deploy.py b/fuel/deploy/deploy.py deleted file mode 100644 index 4037c1d..0000000 --- a/fuel/deploy/deploy.py +++ /dev/null @@ -1,212 +0,0 @@ -import time -import os -import sys - -import common -from dha import DeploymentHardwareAdapter -from dea import DeploymentEnvironmentAdapter -from configure_environment import ConfigureEnvironment - - -SUPPORTED_RELEASE = 'Juno on CentOS 6.5' - -N = common.N -E = common.E -R = common.R -RO = common.RO -exec_cmd = common.exec_cmd -parse = common.parse -err = common.err - -class Deploy(object): - - def __init__(self, yaml_config_dir): - self.supported_release = None - self.yaml_config_dir = yaml_config_dir - - def get_id_list(self, list): - return [l[0] for l in list] - - def cleanup_fuel_environments(self, env_list): - WAIT_LOOP = 10 - SLEEP_TIME = 2 - id_list = self.get_id_list(env_list) - for id in id_list: - exec_cmd('fuel env --env %s --delete' % id) - for i in range(WAIT_LOOP): - if id in self.get_id_list(parse(exec_cmd('fuel env list'))): - time.sleep(SLEEP_TIME) - else: - continue - - def cleanup_fuel_nodes(self, node_list): - for node in node_list: - if node[N['status']] == 'discover': - exec_cmd('fuel node --node-id %s --delete-from-db' - % node[N['id']]) - exec_cmd('dockerctl shell cobbler cobbler system remove ' - '--name node-%s' % node[N['id']]) - - def check_previous_installation(self): - env_list = parse(exec_cmd('fuel env list')) - if env_list: - self.cleanup_fuel_environments(env_list) - node_list = parse(exec_cmd('fuel node list')) - if node_list: - self.cleanup_fuel_nodes(node_list) - - def check_supported_release(self): - release_list= parse(exec_cmd('fuel release -l')) - for release in release_list: - if release[R['name']] == SUPPORTED_RELEASE: - self.supported_release = release - break - if not self.supported_release: - err("This Fuel doesn't contain the following " - "release: %s\n" % SUPPORTED_RELEASE) - - def check_role_definitions(self): - role_list= parse(exec_cmd('fuel role --release %s' - % self.supported_release[R['id']])) - roles = [role[RO['name']] for role in role_list] - if 'compute' not in roles: - err("Role compute does not exist in release %" - % self.supported_release[R['name']]) - if 'controller' not in roles: - err("Role controller does not exist in release %" - % self.supported_release[R['name']]) - - def check_prerequisites(self): - self.check_supported_release() - self.check_role_definitions() - self.check_previous_installation() - - def power_off_blades(self, dha, shelf_blades_dict): - for shelf, blade_list in shelf_blades_dict.iteritems(): - dha.power_off_blades(shelf, blade_list) - - def power_on_blades(self, dha, shelf_blades_dict): - for shelf, blade_list in shelf_blades_dict.iteritems(): - dha.power_on_blades(shelf, blade_list) - - def set_boot_order(self, dha, shelf_blades_dict): - for shelf, blade_list in shelf_blades_dict.iteritems(): - dha.set_boot_order_blades(shelf, blade_list) - - def count_discovered_nodes(self, node_list): - discovered_nodes = 0 - for node in node_list: - if node[N['status']] == 'discover': - discovered_nodes += 1 - return discovered_nodes - - def wait_for_discovered_blades(self, no_of_blades): - WAIT_LOOP = 10 - SLEEP_TIME = 2 - all_discovered = False - node_list = parse(exec_cmd('fuel node list')) - for i in range(WAIT_LOOP): - if (self.count_discovered_nodes(node_list) < no_of_blades): - time.sleep(SLEEP_TIME) - node_list = parse(exec_cmd('fuel node list')) - else: - all_discovered = True - break - if not all_discovered: - err("There are %s blades defined, but not all of " - "them have been discovered\n" % no_of_blades) - - def assign_cluster_node_ids(self, dha, dea, controllers, compute_hosts): - node_list= parse(exec_cmd('fuel node list')) - for shelf_id in dea.get_shelf_ids(): - for blade_id in dea.get_blade_ids_per_shelf(shelf_id): - blade_mac_list = dha.get_blade_mac_addresses( - shelf_id, blade_id) - - found = False - for node in node_list: - if (node[N['mac']] in blade_mac_list and - node[N['status']] == 'discover'): - found = True - break - if found: - if dea.is_controller(shelf_id, blade_id): - controllers.append(node[N['id']]) - if dea.is_compute_host(shelf_id, blade_id): - compute_hosts.append(node[N['id']]) - else: - err("Could not find the Node ID for blade " - "with MACs %s or blade is not in " - "discover status\n" % blade_mac_list) - - - def configure_environment(self, dea): - config_env = ConfigureEnvironment(dea, self.yaml_config_dir) - - - - def provision(self): - - - - def fix_power_address(self): - - - - - def deploy(self): - - if id in self.get_id_list(parse(exec_cmd('fuel env list'))): - - self.fix_power_address() - - - - -def main(): - - yaml_path = exec_cmd('pwd').strip() + '/dea.yaml' - yaml_config_dir = '/var/lib/opnfv/pre_deploy' - - deploy = Deploy(yaml_config_dir) - - dea = DeploymentEnvironmentAdapter() - - if not os.path.isfile(yaml_path): - sys.stderr.write("ERROR: File %s not found\n" % yaml_path) - sys.exit(1) - - dea.parse_yaml(yaml_path) - - server_type, mgmt_ip, username, password = dea.get_server_info() - shelf_blades_dict = dea.get_blade_ids_per_shelves() - - dha = DeploymentHardwareAdapter(server_type, mgmt_ip, username, password) - - deploy.check_prerequisites() - - deploy.power_off_blades(dha, shelf_blades_dict) - - deploy.set_boot_order(dha, shelf_blades_dict) - - deploy.power_on_blades(dha, shelf_blades_dict) - - macs = dha.get_blade_mac_addresses() - - deploy.wait_for_discovered_blades(dea.get_no_of_blades()) - - - controllers = [] - compute_hosts = [] - deploy.assign_cluster_node_ids(dha, dea, controllers, compute_hosts) - - - - deploy.configure_environment(dea) - - deploy.deploy(dea) - - - -if __name__ == '__main__': - main() \ No newline at end of file diff --git a/fuel/deploy/deploy.sh b/fuel/deploy/deploy.sh new file mode 100755 index 0000000..916125e --- /dev/null +++ b/fuel/deploy/deploy.sh @@ -0,0 +1,107 @@ +#!/bin/bash +############################################################################## +# Copyright (c) 2015 Ericsson AB and others. +# stefan.k.berg@ericsson.com +# jonas.bjurel@ericsson.com +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## + +# Setup locations +topdir=$(cd `dirname $0`; pwd) +functions=${topdir}/functions +tmpdir=$HOME/fueltmp +deployiso=${tmpdir}/deploy.iso +cloud_deploy=$(cd ${topdir}/cloud_deploy; pwd) + +# Define common functions +. ${functions}/common.sh + +exit_handler() { + # Remove safety catch + kill -9 `ps -p $killpid -o pid --no-headers` \ + `ps --ppid $killpid -o pid --no-headers`\ + > /dev/null 2>&1 +} + +# Set maximum allowed deploy time (default three hours) +MAXDEPLOYTIME=${MAXDEPLOYTIME-3h} + +####### MAIN ######## + +if [ "`whoami`" != "root" ]; then + error_exit "You need be root to run this script" +fi + +if [ $# -eq 0 -o $# -gt 2 ]; then + error_exit "Argument error" +fi + +# Setup tmpdir +if [ -d $tmpdir ]; then + rm -Rf $tmpdir || error_exit "Could not remove tmpdir $tmpdir" +fi + +mkdir $tmpdir || error_exit "Could not create tmpdir $tmpdir" + +if [ ! -f $1 ]; then + error_exit "Could not find ISO file $1" +else + isofile=$(cd `dirname $1`; echo `pwd`/`basename $1`) +fi + +# If no DEA specified, use the example one +if [ $# -eq 1 ]; then + deafile=${topdir}/dea.yaml +else + deafile=$(cd `dirname $2`; echo `pwd`/`basename $2`) +fi +cp ${deafile} ${cloud_deploy}/ + +if [ ! -f $deafile ]; then + error-exit "Could not find DEA file $deafile" +fi + +# Enable safety catch +echo "Enabling auto-kill if deployment exceeds $MAXDEPLOYTIME" +(sleep $MAXDEPLOYTIME; echo "Auto-kill of deploy after a timeout of $MAXDEPLOYTIME"; kill $$) & +killpid=$! + +# Enable exit handler +trap exit_handler exit + +# Stop all VMs +for node in `ls libvirt/vms` +do + virsh destroy $node >/dev/null 2>&1 +done + + +# Install the Fuel master +# (Convert to functions at later stage) +echo "Patching iso file" +${functions}/patch-iso.sh $isofile $deployiso $tmpdir || error_exit "Failed to patch ISO" +# Swap isofiles from now on +isofile=$deployiso +. ${functions}/install_iso.sh + +python ${cloud_deploy}/cloud_deploy.py + +echo "Waiting for five minutes for deploy to stabilize" +sleep 5m + +echo "Verifying node status after deployment" +# Any node with non-ready status? +ssh root@10.20.0.2 fuel node 2>/dev/null | tail -n +3 | cut -d "|" -f 2 | \ + sed 's/ //g' | grep -v ready | wc -l | grep -q "^0$" +if [ $? -ne 0 ]; then + echo "Deploy failed to verify" + ssh root@10.20.0.2 fuel node 2>/dev/null + error_exit "Exiting with error status" +else + ssh root@10.20.0.2 fuel node 2>/dev/null + echo "Deployment verified" +fi + diff --git a/fuel/deploy/deploy_fuel.sh b/fuel/deploy/deploy_fuel.sh deleted file mode 100755 index 8cb72b7..0000000 --- a/fuel/deploy/deploy_fuel.sh +++ /dev/null @@ -1,106 +0,0 @@ -#!/bin/bash -# Deploy in deployFuel has the "configure host-network, -# install fuel, configure vm and start it" meaning -set -o xtrace -set -o errexit -set -o nounset -set -o pipefail - -if [ $# -ne 2 ]; then - echo "Usage: $0 " - exit 1 -fi - -readonly iso_file=$1 -readonly interface=$2 -readonly vm_name="fuel_opnfv" -readonly ssh_fuel_vm="sshpass -p r00tme - ssh -o UserKnownHostsFile=/dev/null - -o StrictHostKeyChecking=no - -q - root@192.168.0.11" -readonly RUN_INSTALL="${RUN_INSTALL:-false}" -readonly DEV="${DEV:-false}" - -# poll is not real timeout, commands can take some undefined time to execute -# it is a count of how many times to try while sleeping shortly -# in between checks -readonly poll_virtinstall=1800 -readonly poll_fuel_startup=1200 -readonly poll_deployment=2150 -readonly fuel_logfile="/var/log/puppet/bootstrap_admin_node.log" - -cat >$interface.xml < - $interface - - - - -EOF - -cleanup_previous_run() { - echo "Cleaning up previous run" - set +eu - virsh net-destroy $interface > /dev/null 2>&1 - virsh net-undefine $interface > /dev/null 2>&1 - virsh destroy $vm_name > /dev/null 2>&1 - virsh undefine $vm_name > /dev/null 2>&1 - set -eu -} - -create_disk_and_install() { - rm -rf $vm_name.qcow2 - qemu-img create -f qcow2 -o preallocation=metadata $vm_name.qcow2 60G - virt-install --connect=qemu:///system \ - --name=$vm_name \ - --network=network:$interface \ - --ram 2048 --vcpus=4,cores=2 --check-cpu --hvm \ - --disk path=$vm_name.qcow2,format=qcow2,device=disk,bus=virtio \ - --noautoconsole --vnc \ - --cdrom $iso_file -} - -wait_for_virtinstall() { - # Workaround for virt-install --wait which restarts vm - # too fast too attach disk - echo "Waiting for virt-install to finish..." - set +eu - stopped=false - for i in $(seq 0 $poll_virtinstall); do - virsh_out=`virsh list | grep "$vm_name"` - if [ -z "$virsh_out" ]; then - stopped=true - break - fi - sleep 2 - done - set -eu -} - -wait_for_fuel_startup() { - echo "Wait for fuel to start up..." - for i in $(seq 0 $poll_fuel_startup); do - sleep 2 && echo -n "$i " - $ssh_fuel_vm grep complete $fuel_logfile && - echo "Fuel bootstrap is done, deployment should have started now" && - return 0 - done - return 1 -} - - -cleanup_previous_run -virsh net-define $interface.xml -virsh net-start $interface -create_disk_and_install -wait_for_virtinstall - -echo "Starting $vm_name after installation in 6s..." && sleep 6s -set +eu - -virsh start $vm_name -if ! wait_for_fuel_startup; then - echo "Fuel failed to start up" - exit 1 -fi diff --git a/fuel/deploy/functions/common.sh b/fuel/deploy/functions/common.sh new file mode 100755 index 0000000..f6cceb4 --- /dev/null +++ b/fuel/deploy/functions/common.sh @@ -0,0 +1,109 @@ +############################################################################## +# Copyright (c) 2015 Ericsson AB and others. +# stefan.k.berg@ericsson.com +# jonas.bjurel@ericsson.com +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## + +# Common functions + +error_exit () { + echo "Error: $@" >&2 + exit 1 +} + +ssh() { + SSHPASS="r00tme" sshpass -e ssh -o UserKnownHostsFile=/dev/null \ + -o StrictHostKeyChecking=no -o ConnectTimeout=15 "$@" +} + +scp() { + SSHPASS="r00tme" sshpass -e scp -o UserKnownHostsFile=/dev/null \ + -o StrictHostKeyChecking=no -o ConnectTimeout=15 "$@" +} + +noNodesUp () { + fuel node | grep True | wc -l +} + +fuel () { + ssh root@10.20.0.2 "fuel $@" +} + +# Return MAC id for virsh node +getNodeId() { + virsh dumpxml $1 | grep "mac address" | head -1 | sed "s/.*'..:..:..:..:\(.*\)'.*/\1/" +} + +# Wait for node with virtid name to come up +waitForHost() { + mac=`getNodeId $1` + + while true + do + fuel node --node-id $mac 2>/dev/null | grep -q True && break + sleep 3 + echo -n "." + done + echo -e "\n" +} + +# Currently not used! +# Wait for node count to increase +waitForNode() { + local cnt + local initCnt + local expectCnt + + initCnt=`noNodesUp` + expectCnt=$[initCnt+1] + while true + do + cnt=`noNodesUp` + if [ $cnt -eq $expectCnt ]; then + break + elif [ $cnt -lt $initCnt ]; then + error_exit "Node count decreased while waiting, $initCnt -> $cnt" + elif [ $cnt -gt $expectCnt ]; then + error_exit "Node count exceeded expect count, $cnt > $expectCnt" + fi + sleep 3 + echo -n "." + done + echo -e "\n" +} + +bootorder_dvdhd() { + virsh dumpxml $1 | grep -v "" | \ + sed "/<\/os>/i\ + \n\ + \n\ + " > $tmpdir/vm.xml || error_exit "Could not set bootorder" + virsh define $tmpdir/vm.xml || error_exit "Could not set bootorder" +} + +bootorder_hddvd() { + virsh dumpxml $1 | grep -v "" | \ + sed "/<\/os>/i\ + \n\ + \n\ + " > $tmpdir/vm.xml || error_exit "Could not set bootorder" + virsh define $tmpdir/vm.xml || error_exit "Could not set bootorder" +} + +addisofile() { + virsh dumpxml $1 | grep -v '\.iso' | sed "s/<.*device='cdrom'.*//" | \ + sed "/<.*device='cdrom'.*/a " > $tmpdir/vm.xml \ + || error_exit "Could not add isofile" + virsh define $tmpdir/vm.xml || error_exit "Could not add isofile" +} + +removeisofile() { + virsh dumpxml $1 | grep -v '\.iso' | sed "s/<.*device='cdrom'.*//" \ + > $tmpdir/vm.xml \ + || error_exit "Could not remove isofile" + virsh define $tmpdir/vm.xml || error_exit "Could not remove isofile" +} diff --git a/fuel/deploy/functions/install_iso.sh b/fuel/deploy/functions/install_iso.sh new file mode 100755 index 0000000..0a92cd5 --- /dev/null +++ b/fuel/deploy/functions/install_iso.sh @@ -0,0 +1,62 @@ +############################################################################## +# Copyright (c) 2015 Ericsson AB and others. +# stefan.k.berg@ericsson.com +# jonas.bjurel@ericsson.com +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## + +# Recreate disk - needed for the reboot to work +fueldisk=`virsh dumpxml fuel-master | \ + grep fuel-master.raw | sed "s/.*'\(.*\)'.*/\1/"` +disksize=`ls -l $fueldisk | awk '{ print $5 }'` +rm -f $fueldisk +fallocate -l $disksize $fueldisk + +bootorder_hddvd fuel-master +sleep 3 +addisofile fuel-master $isofile +sleep 3 +virsh start fuel-master + +# wait for node up +echo "Waiting for Fuel master to accept SSH" +while true +do + ssh root@10.20.0.2 date 2>/dev/null + if [ $? -eq 0 ]; then + break + fi + sleep 10 +done + +# Wait until fuelmenu is up +echo "Waiting for fuelmenu to come up" +menuPid="" +while [ -z "$menuPid" ] +do + menuPid=`ssh root@10.20.0.2 "ps -ef" 2>&1 | grep fuelmenu | grep -v grep | awk '{ print $2 }'` + sleep 10 +done + +# This is where we would inject our own astute.yaml + +echo "Found menu as PID $menuPid, now killing it" +ssh root@10.20.0.2 "kill $menuPid" 2>/dev/null + +# Wait until installation complete +echo "Waiting for bootstrap of Fuel node to complete" +while true +do + ssh root@10.20.0.2 "ps -ef" 2>/dev/null \ + | grep -q /usr/local/sbin/bootstrap_admin_node + if [ $? -ne 0 ]; then + break + fi + sleep 10 +done + +echo "Waiting two minutes for Fuel to stabilize" +sleep 2m diff --git a/fuel/deploy/functions/isolinux.cfg.patch b/fuel/deploy/functions/isolinux.cfg.patch new file mode 100644 index 0000000..298a057 --- /dev/null +++ b/fuel/deploy/functions/isolinux.cfg.patch @@ -0,0 +1,14 @@ +*** isolinux/isolinux.cfg.orig 2015-04-15 08:29:52.026868322 -0400 +--- isolinux/isolinux.cfg 2015-04-15 08:30:34.350868343 -0400 +*************** +*** 19,22 **** + menu label Fuel Install (^Static IP) + menu default + kernel vmlinuz +! append initrd=initrd.img biosdevname=0 ks=cdrom:/ks.cfg ip=10.20.0.2 gw=10.20.0.1 dns1=10.20.0.1 netmask=255.255.255.0 hostname=fuel.domain.tld showmenu=no +--- 19,22 ---- + menu label Fuel Install (^Static IP) + menu default + kernel vmlinuz +! append initrd=initrd.img biosdevname=0 ks=cdrom:/ks.cfg ip=10.20.0.2 gw=10.20.0.1 dns1=10.20.0.1 netmask=255.255.255.0 hostname=fuel.domain.tld showmenu=yes + diff --git a/fuel/deploy/functions/ks.cfg.patch b/fuel/deploy/functions/ks.cfg.patch new file mode 100644 index 0000000..1896957 --- /dev/null +++ b/fuel/deploy/functions/ks.cfg.patch @@ -0,0 +1,19 @@ +*** ks.cfg.orig Wed Apr 15 21:47:09 2015 +--- ks.cfg Wed Apr 15 21:47:24 2015 +*************** +*** 35,41 **** + default_drive=`echo ${drives} ${removable_drives} | awk '{print $1}'` + + installdrive="undefined" +! forceformat="no" + for I in `cat /proc/cmdline`; do case "$I" in *=*) eval $I;; esac ; done + + set ${drives} ${removable_drives} +--- 35,41 ---- + default_drive=`echo ${drives} ${removable_drives} | awk '{print $1}'` + + installdrive="undefined" +! forceformat="yes" + for I in `cat /proc/cmdline`; do case "$I" in *=*) eval $I;; esac ; done + + set ${drives} ${removable_drives} diff --git a/fuel/deploy/functions/patch-iso.sh b/fuel/deploy/functions/patch-iso.sh new file mode 100755 index 0000000..782737e --- /dev/null +++ b/fuel/deploy/functions/patch-iso.sh @@ -0,0 +1,69 @@ +#!/bin/bash +############################################################################## +# Copyright (c) 2015 Ericsson AB and others. +# stefan.k.berg@ericsson.com +# jonas.bjurel@ericsson.com +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## + +# This is a temporary script - this should be rolled into a separate +# build target "make ci-iso" instead! + +exit_handler() { + rm -Rf $tmpnewdir + fusermount -u $tmporigdir 2>/dev/null + test -d $tmporigdir && mdir $tmporigdir +} + +trap exit_handler exit + +error_exit() { + echo "$@" + exit 1 +} + + +top=$(cd `dirname $0`; pwd) +origiso=$(cd `dirname $1`; echo `pwd`/`basename $1`) +newiso=$(cd `dirname $2`; echo `pwd`/`basename $2`) +tmpdir=$3 +tmporigdir=/${tmpdir}/origiso +tmpnewdir=/${tmpdir}/newiso + +test -f $origiso || error_exit "Could not find origiso $origiso" +test -d $tmpdir || error_exit "Could not find tmpdir $tmpdir" + + +if [ "`whoami`" != "root" ]; then + error_exit "You need be root to run this script" +fi + +echo "Copying..." +rm -Rf $tmporigdir $tmpnewdir +mkdir -p $tmporigdir $tmpnewdir +fuseiso $origiso $tmporigdir || error_exit "Failed fuseiso" +cd $tmporigdir +find . | cpio -pd $tmpnewdir +cd $tmpnewdir +fusermount -u $tmporigdir +rmdir $tmporigdir +chmod -R 755 $tmpnewdir + +echo "Patching..." +cd $tmpnewdir +# Patch ISO to make it suitable for automatic deployment +cat $top/ks.cfg.patch | patch -p0 || error_exit "Failed patch 1" +cat $top/isolinux.cfg.patch | patch -p0 || error_exit "Failed patch 2" +rm -rf .rr_moved + +echo "Creating iso $newiso" +mkisofs -quiet -r \ + -J -R -b isolinux/isolinux.bin \ + -no-emul-boot \ + -boot-load-size 4 -boot-info-table \ + --hide-rr-moved \ + -x "lost+found" -o $newiso . || error_exit "Failed making iso" + diff --git a/fuel/deploy/libvirt/networks/fuel1 b/fuel/deploy/libvirt/networks/fuel1 new file mode 100644 index 0000000..7b2b154 --- /dev/null +++ b/fuel/deploy/libvirt/networks/fuel1 @@ -0,0 +1,12 @@ + + fuel1 + + + + + + + + + + diff --git a/fuel/deploy/libvirt/networks/fuel2 b/fuel/deploy/libvirt/networks/fuel2 new file mode 100644 index 0000000..615c920 --- /dev/null +++ b/fuel/deploy/libvirt/networks/fuel2 @@ -0,0 +1,5 @@ + + fuel2 + + + diff --git a/fuel/deploy/libvirt/networks/fuel3 b/fuel/deploy/libvirt/networks/fuel3 new file mode 100644 index 0000000..2383e6c --- /dev/null +++ b/fuel/deploy/libvirt/networks/fuel3 @@ -0,0 +1,5 @@ + + fuel3 + + + diff --git a/fuel/deploy/libvirt/networks/fuel4 b/fuel/deploy/libvirt/networks/fuel4 new file mode 100644 index 0000000..5b69f91 --- /dev/null +++ b/fuel/deploy/libvirt/networks/fuel4 @@ -0,0 +1,12 @@ + + fuel4 + + + + + + + + + + diff --git a/fuel/deploy/libvirt/vms/fuel-master b/fuel/deploy/libvirt/vms/fuel-master new file mode 100644 index 0000000..1b2d86f --- /dev/null +++ b/fuel/deploy/libvirt/vms/fuel-master @@ -0,0 +1,95 @@ + + fuel-master + 2097152 + 2097152 + 2 + + hvm + + + + + + + + + + + SandyBridge + Intel + + + + + + + + + + + + + + + + + + + + + + + + + + destroy + restart + restart + + /usr/bin/kvm + + + + +
+ + + + + +
+ + +
+ + +
+ + + + + +
+ + + + + + + + + + + +
+ +