# http://www.apache.org/licenses/LICENSE-2.0
###############################################################################
-import common
from configure_settings import ConfigureSettings
from configure_network import ConfigureNetwork
from configure_nodes import ConfigureNodes
-N = common.N
-E = common.E
-R = common.R
-RO = common.RO
-exec_cmd = common.exec_cmd
-parse = common.parse
-err = common.err
-log = common.log
-delete = common.delete
-create_dir_if_not_exists = common.create_dir_if_not_exists
+from common import (
+ E,
+ exec_cmd,
+ parse,
+ err,
+ log,
+ delete,
+ create_dir_if_not_exists,
+)
class ConfigureEnvironment(object):
###############################################################################
-import common
import yaml
import io
-N = common.N
-E = common.E
-R = common.R
-RO = common.RO
-exec_cmd = common.exec_cmd
-parse = common.parse
-err = common.err
-check_file_exists = common.check_file_exists
-log = common.log
-backup = common.backup
+from common import (
+ exec_cmd,
+ check_file_exists,
+ log,
+ backup,
+)
class ConfigureNetwork(object):
###############################################################################
-import common
import yaml
import io
import glob
-N = common.N
-E = common.E
-R = common.R
-RO = common.RO
-exec_cmd = common.exec_cmd
-parse = common.parse
-err = common.err
-check_file_exists = common.check_file_exists
-log = common.log
-backup = common.backup
+from common import (
+ exec_cmd,
+ check_file_exists,
+ log,
+ backup,
+)
class ConfigureNodes(object):
# http://www.apache.org/licenses/LICENSE-2.0
###############################################################################
-import common
+
import yaml
import io
-N = common.N
-E = common.E
-R = common.R
-RO = common.RO
-exec_cmd = common.exec_cmd
-parse = common.parse
-err = common.err
-check_file_exists = common.check_file_exists
-log = common.log
-backup = common.backup
+from common import (
+ exec_cmd,
+ check_file_exists,
+ log,
+ backup,
+)
class ConfigureSettings(object):
###############################################################################
-import os
import yaml
import io
-import glob
-import common
from dea import DeploymentEnvironmentAdapter
from configure_environment import ConfigureEnvironment
from deployment import Deployment
-YAML_CONF_DIR = '/var/lib/opnfv'
+from common import (
+ R,
+ exec_cmd,
+ parse,
+ check_file_exists,
+ commafy,
+ ArgParser,
+)
-N = common.N
-E = common.E
-R = common.R
-RO = common.RO
-exec_cmd = common.exec_cmd
-parse = common.parse
-err = common.err
-check_file_exists = common.check_file_exists
-log = common.log
-commafy = common.commafy
-ArgParser = common.ArgParser
+YAML_CONF_DIR = '/var/lib/opnfv'
class Deploy(object):
- def __init__(self, dea_file, blade_node_file, no_health_check):
+ def __init__(self, dea_file, no_health_check):
self.dea = DeploymentEnvironmentAdapter(dea_file)
- self.blade_node_file = blade_node_file
self.no_health_check = no_health_check
self.macs_per_blade = {}
self.blades = self.dea.get_node_ids()
- self.blade_node_dict = {}
+ self.blade_node_dict = self.dea.get_blade_node_map()
self.node_roles_dict = {}
self.env_id = None
self.wanted_release = self.dea.get_property('wanted_release')
- def get_blade_node_mapping(self):
- with io.open(self.blade_node_file, 'r') as stream:
- self.blade_node_dict = yaml.load(stream)
-
def assign_roles_to_cluster_node_ids(self):
self.node_roles_dict = {}
for blade, node in self.blade_node_dict.iteritems():
def deploy(self):
- self.get_blade_node_mapping()
-
self.assign_roles_to_cluster_node_ids()
self.configure_environment()
help='Don\'t run health check after deployment')
parser.add_argument('dea_file', action='store',
help='Deployment Environment Adapter: dea.yaml')
- parser.add_argument('blade_node_file', action='store',
- help='Blade Node mapping: blade_node.yaml')
args = parser.parse_args()
check_file_exists(args.dea_file)
- check_file_exists(args.blade_node_file)
- return (args.dea_file, args.blade_node_file, args.no_health_check)
+
+ kwargs = {'dea_file': args.dea_file,
+ 'no_health_check': args.no_health_check}
+ return kwargs
def main():
- dea_file, blade_node_file, no_health_check = parse_arguments()
- deploy = Deploy(dea_file, blade_node_file, no_health_check)
+ kwargs = parse_arguments()
+ deploy = Deploy(**kwargs)
deploy.deploy()
if __name__ == '__main__':
###############################################################################
-import common
-import os
-import shutil
-import glob
-import yaml
-import io
import time
+import re
-N = common.N
-E = common.E
-R = common.R
-RO = common.RO
-exec_cmd = common.exec_cmd
-run_proc = common.run_proc
-parse = common.parse
-err = common.err
-log = common.log
+from common import (
+ N,
+ E,
+ exec_cmd,
+ run_proc,
+ parse,
+ err,
+ log,
+ delete,
+)
+SEARCH_TEXT = 'Puppet (err)'
+LOG_FILE = '/var/log/puppet.log'
+GREP_LINES_OF_LEADING_CONTEXT = 100
+GREP_LINES_OF_TRAILING_CONTEXT = 100
class Deployment(object):
self.env_id = env_id
self.node_id_roles_dict = node_id_roles_dict
self.no_health_check = no_health_check
+ self.pattern = re.compile(
+ '\d\d\d\d-\d\d-\d\d\s\d\d:\d\d:\d\d')
+
+ def collect_error_logs(self):
+ for node_id, roles_blade in self.node_id_roles_dict.iteritems():
+ log_list = []
+ cmd = ('ssh -q node-%s grep \'"%s"\' %s'
+ % (node_id, SEARCH_TEXT, LOG_FILE))
+ results, _ = exec_cmd(cmd, False)
+ for result in results.splitlines():
+ log_msg = ''
+ cmd = ('ssh -q node-%s grep -B%s \'"%s"\' %s'
+ % (node_id, GREP_LINES_OF_LEADING_CONTEXT, result,
+ LOG_FILE))
+ details, _ = exec_cmd(cmd, False)
+ details_list = details.splitlines()
+
+ found_prev_log = False
+ for i in range(len(details_list) - 2, -1, -1):
+ if self.pattern.match(details_list[i]):
+ found_prev_log = True
+ break
+ if found_prev_log:
+ log_msg += '\n'.join(details_list[i:-1]) + '\n'
+
+ cmd = ('ssh -q node-%s grep -A%s \'"%s"\' %s'
+ % (node_id, GREP_LINES_OF_TRAILING_CONTEXT, result,
+ LOG_FILE))
+ details, _ = exec_cmd(cmd, False)
+ details_list = details.splitlines()
+
+ found_next_log = False
+ for i in range(1, len(details_list)):
+ if self.pattern.match(details_list[i]):
+ found_next_log = True
+ break
+ if found_next_log:
+ log_msg += '\n'.join(details_list[:i])
+ else:
+ log_msg += details
+
+ if log_msg:
+ log_list.append(log_msg)
+
+ if log_list:
+ role = ('controller' if 'controller' in roles_blade[0]
+ else 'compute host')
+ log('_' * 40 + 'Errors in node-%s %s' % (node_id, role)
+ + '_' * 40)
+ for log_msg in log_list:
+ print(log_msg + '\n')
def run_deploy(self):
WAIT_LOOP = 180
break
else:
time.sleep(SLEEP_TIME)
- exec_cmd('rm %s' % LOG_FILE)
+ delete(LOG_FILE)
if ready:
log('Environment %s successfully deployed' % self.env_id)
else:
+ self.collect_error_logs()
err('Deployment failed, environment %s is not operational'
% self.env_id)
# http://www.apache.org/licenses/LICENSE-2.0
###############################################################################
+
import subprocess
import sys
import os
return_code = process.returncode
if check:
if return_code > 0:
- err(response)
+ raise Exception(response)
else:
return response
return response, return_code
settings = self.get_property('settings')
ntp_list = settings['editable']['external_ntp']['ntp_list']['value']
return [n.strip() for n in ntp_list.split(',')]
+
+ def get_blade_node_map(self):
+ return self.dea_struct['blade_node_map']
from install_fuel_master import InstallFuelMaster
from deploy_env import CloudDeploy
from execution_environment import ExecutionEnvironment
-import common
-
-log = common.log
-exec_cmd = common.exec_cmd
-err = common.err
-warn = common.warn
-check_file_exists = common.check_file_exists
-check_dir_exists = common.check_dir_exists
-create_dir_if_not_exists = common.create_dir_if_not_exists
-delete = common.delete
-check_if_root = common.check_if_root
-ArgParser = common.ArgParser
+
+from common import (
+ log,
+ exec_cmd,
+ err,
+ warn,
+ check_file_exists,
+ create_dir_if_not_exists,
+ delete,
+ check_if_root,
+ ArgParser,
+)
FUEL_VM = 'fuel'
PATCH_DIR = 'fuel_patch'
def __init__(self, no_fuel, fuel_only, no_health_check, cleanup_only,
cleanup, storage_dir, pxe_bridge, iso_file, dea_file,
- dha_file, fuel_plugins_dir):
+ dha_file, fuel_plugins_dir, fuel_plugins_conf_dir,
+ no_plugins):
self.no_fuel = no_fuel
self.fuel_only = fuel_only
self.no_health_check = no_health_check
self.dea_file = dea_file
self.dha_file = dha_file
self.fuel_plugins_dir = fuel_plugins_dir
+ self.fuel_plugins_conf_dir = fuel_plugins_conf_dir
+ self.no_plugins = no_plugins
self.dea = (DeploymentEnvironmentAdapter(dea_file)
if not cleanup_only else None)
self.dha = DeploymentHardwareAdapter(dha_file)
def install_fuel_master(self):
log('Install Fuel Master')
- new_iso = '%s/deploy-%s' \
- % (self.tmp_dir, os.path.basename(self.iso_file))
+ new_iso = ('%s/deploy-%s'
+ % (self.tmp_dir, os.path.basename(self.iso_file)))
self.patch_iso(new_iso)
self.iso_file = new_iso
self.install_iso()
self.fuel_conf['ip'], self.fuel_username,
self.fuel_password, self.fuel_node_id,
self.iso_file, WORK_DIR,
- self.fuel_plugins_dir)
+ self.fuel_plugins_dir, self.no_plugins)
fuel.install()
def patch_iso(self, new_iso):
tmp_orig_dir = '%s/origiso' % self.tmp_dir
tmp_new_dir = '%s/newiso' % self.tmp_dir
- self.copy(tmp_orig_dir, tmp_new_dir)
- self.patch(tmp_new_dir, new_iso)
+ try:
+ self.copy(tmp_orig_dir, tmp_new_dir)
+ self.patch(tmp_new_dir, new_iso)
+ except Exception as e:
+ exec_cmd('fusermount -u %s' % tmp_orig_dir, False)
+ delete(self.tmp_dir)
+ err(e)
def copy(self, tmp_orig_dir, tmp_new_dir):
log('Copying...')
exec_cmd('fuseiso %s %s' % (self.iso_file, tmp_orig_dir))
with cd(tmp_orig_dir):
exec_cmd('find . | cpio -pd %s' % tmp_new_dir)
- with cd(tmp_new_dir):
- exec_cmd('fusermount -u %s' % tmp_orig_dir)
+ exec_cmd('fusermount -u %s' % tmp_orig_dir)
delete(tmp_orig_dir)
exec_cmd('chmod -R 755 %s' % tmp_new_dir)
def deploy_env(self):
dep = CloudDeploy(self.dea, self.dha, self.fuel_conf['ip'],
self.fuel_username, self.fuel_password,
- self.dea_file, WORK_DIR, self.no_health_check)
+ self.dea_file, self.fuel_plugins_conf_dir,
+ WORK_DIR, self.no_health_check)
return dep.deploy()
def setup_execution_environment(self):
'[default: pxebr]')
parser.add_argument('-p', dest='fuel_plugins_dir', action='store',
help='Fuel Plugins directory')
+ parser.add_argument('-pc', dest='fuel_plugins_conf_dir', action='store',
+ help='Fuel Plugins Configuration directory')
+ parser.add_argument('-np', dest='no_plugins', action='store_true',
+ default=False, help='Do not install Fuel Plugins')
args = parser.parse_args()
log(args)
'storage_dir': args.storage_dir, 'pxe_bridge': args.pxe_bridge,
'iso_file': args.iso_file, 'dea_file': args.dea_file,
'dha_file': args.dha_file,
- 'fuel_plugins_dir': args.fuel_plugins_dir}
+ 'fuel_plugins_dir': args.fuel_plugins_dir,
+ 'fuel_plugins_conf_dir': args.fuel_plugins_conf_dir,
+ 'no_plugins': args.no_plugins}
return kwargs
def main():
kwargs = parse_arguments()
-
d = AutoDeploy(**kwargs)
sys.exit(d.run())
import yaml
import glob
import time
+import shutil
from ssh_client import SSHClient
-import common
-
-exec_cmd = common.exec_cmd
-err = common.err
-check_file_exists = common.check_file_exists
-log = common.log
-parse = common.parse
-commafy = common.commafy
-N = common.N
-E = common.E
-R = common.R
-RO = common.RO
+
+from common import (
+ err,
+ log,
+ parse,
+ N,
+ E,
+ R,
+ delete,
+)
CLOUD_DEPLOY_FILE = 'deploy.py'
BLADE_RESTART_TIMES = 3
class CloudDeploy(object):
def __init__(self, dea, dha, fuel_ip, fuel_username, fuel_password,
- dea_file, work_dir, no_health_check):
+ dea_file, fuel_plugins_conf_dir, work_dir, no_health_check):
self.dea = dea
self.dha = dha
self.fuel_ip = fuel_ip
self.fuel_username = fuel_username
self.fuel_password = fuel_password
self.dea_file = dea_file
+ self.updated_dea_file = (
+ '%s/.%s' % (os.path.dirname(self.dea_file),
+ os.path.basename(self.dea_file)))
+ shutil.copy2(self.dea_file, self.updated_dea_file)
+ self.fuel_plugins_conf_dir = fuel_plugins_conf_dir
self.work_dir = work_dir
self.no_health_check = no_health_check
self.file_dir = os.path.dirname(os.path.realpath(__file__))
self.ssh = SSHClient(self.fuel_ip, self.fuel_username,
self.fuel_password)
- self.blade_node_file = '%s/blade_node.yaml' % self.file_dir
self.node_ids = self.dha.get_node_ids()
self.wanted_release = self.dea.get_property('wanted_release')
self.blade_node_dict = {}
self.macs_per_blade = {}
+ def merge_plugin_config_files_to_dea_file(self):
+ plugins_conf_dir = (
+ self.fuel_plugins_conf_dir if self.fuel_plugins_conf_dir
+ else '%s/plugins_conf' % os.path.dirname(self.dea_file))
+ if os.path.isdir(plugins_conf_dir):
+ with io.open(self.updated_dea_file) as stream:
+ updated_dea = yaml.load(stream)
+ for plugin_file in glob.glob('%s/*.yaml' % plugins_conf_dir):
+ with io.open(plugin_file) as stream:
+ plugin_conf = yaml.load(stream)
+ updated_dea['settings']['editable'].update(plugin_conf)
+ with io.open(self.updated_dea_file, 'w') as stream:
+ yaml.dump(updated_dea, stream, default_flow_style=False)
+
def upload_cloud_deployment_files(self):
with self.ssh as s:
s.exec_cmd('rm -rf %s' % self.work_dir, False)
s.exec_cmd('mkdir %s' % self.work_dir)
- s.scp_put(self.dea_file, self.work_dir)
- s.scp_put(self.blade_node_file, self.work_dir)
+ s.scp_put(self.updated_dea_file, '%s/%s' % (
+ self.work_dir, os.path.basename(self.dea_file)))
s.scp_put('%s/common.py' % self.file_dir, self.work_dir)
s.scp_put('%s/dea.py' % self.file_dir, self.work_dir)
for f in glob.glob('%s/cloud/*' % self.file_dir):
log('START CLOUD DEPLOYMENT')
deploy_app = '%s/%s' % (self.work_dir, deploy_app)
dea_file = '%s/%s' % (self.work_dir, os.path.basename(self.dea_file))
- blade_node_file = '%s/%s' % (
- self.work_dir, os.path.basename(self.blade_node_file))
with self.ssh as s:
- status = s.run(
- 'python %s %s %s %s' % (
- deploy_app, ('-nh' if self.no_health_check else ''),
- dea_file, blade_node_file))
+ status = s.run('python %s %s %s' % (
+ deploy_app, ('-nh' if self.no_health_check else ''), dea_file))
return status
def check_supported_release(self):
err('Not all blades have been discovered: %s'
% self.not_discovered_blades_summary())
- with io.open(self.blade_node_file, 'w') as stream:
- yaml.dump(self.blade_node_dict, stream, default_flow_style=False)
+ with io.open(self.updated_dea_file) as stream:
+ updated_dea = yaml.load(stream)
+ updated_dea.update({'blade_node_map': self.blade_node_dict})
+ with io.open(self.updated_dea_file, 'w') as stream:
+ yaml.dump(updated_dea, stream, default_flow_style=False)
def discovery_waiting_loop(self, discovered_macs):
WAIT_LOOP = 360
if blade:
log('Blade %s discovered as Node %s with MAC %s'
% (blade, node[N['id']], node[N['mac']]))
- self.blade_node_dict[blade] = node[N['id']]
+ self.blade_node_dict[blade] = int(node[N['id']])
def find_mac_in_dict(self, mac):
for blade, mac_list in self.macs_per_blade.iteritems():
self.wait_for_discovered_blades()
+ self.merge_plugin_config_files_to_dea_file()
+
self.upload_cloud_deployment_files()
+ delete(self.updated_dea_file)
+
return self.run_cloud_deploy(CLOUD_DEPLOY_FILE)
###############################################################################
-import common
from hardware_adapter import HardwareAdapter
-log = common.log
-exec_cmd = common.exec_cmd
-err = common.err
+from common import (
+ log,
+ exec_cmd,
+ err,
+)
+
'''
This is hardware adapter for Intel AMT based system. It use amttool to interact
# http://www.apache.org/licenses/LICENSE-2.0
###############################################################################
+
import yaml
import io
###############################################################################
-import common
from ipmi_adapter import IpmiAdapter
from ssh_client import SSHClient
-log = common.log
+from common import (
+ log,
+)
DEV = {'pxe': 'bootsource5',
'disk': 'bootsource3',
###############################################################################
-import common
import time
from hardware_adapter import HardwareAdapter
-log = common.log
-exec_cmd = common.exec_cmd
-err = common.err
+from common import (
+ log,
+ exec_cmd,
+ err,
+)
class IpmiAdapter(HardwareAdapter):
###############################################################################
-import common
from lxml import etree
from hardware_adapter import HardwareAdapter
-log = common.log
-exec_cmd = common.exec_cmd
-err = common.err
+from common import (
+ log,
+ exec_cmd,
+ err,
+ delete,
+)
DEV = {'pxe': 'network',
'disk': 'hd',
with open(xml_file, 'w') as f:
tree.write(f, pretty_print=True, xml_declaration=True)
exec_cmd('virsh define %s' % xml_file)
- exec_cmd('rm -fr %s' % temp_dir)
+ delete(temp_dir)
def node_zero_mbr(self, node_id):
vm_name = self.get_node_property(node_id, 'libvirtName')
for source in sources:
disk_file = source.get('file')
disk_size = exec_cmd('ls -l %s' % disk_file).split()[4]
- exec_cmd('rm -f %s' % disk_file)
+ delete(disk_file)
exec_cmd('fallocate -l %s %s' % (disk_size, disk_file))
def node_eject_iso(self, node_id):
from lxml import etree
-
-import common
from dha_adapters.libvirt_adapter import LibvirtAdapter
-exec_cmd = common.exec_cmd
-err = common.err
-log = common.log
-check_dir_exists = common.check_dir_exists
-check_file_exists = common.check_file_exists
-check_if_root = common.check_if_root
+from common import (
+ exec_cmd,
+ log,
+ delete,
+)
class ExecutionEnvironment(object):
exec_cmd('virsh destroy %s' % vm_name, False)
exec_cmd('virsh undefine %s' % vm_name, False)
for file in disk_files:
- exec_cmd('rm -f %s' % file)
+ delete(file)
def define_vm(self, vm_name, temp_vm_file, disk_path):
log('Creating VM %s with disks %s' % (vm_name, disk_path))
from lxml import etree
import glob
-
-import common
from execution_environment import ExecutionEnvironment
-exec_cmd = common.exec_cmd
-err = common.err
-log = common.log
-check_dir_exists = common.check_dir_exists
-check_file_exists = common.check_file_exists
-check_if_root = common.check_if_root
+from common import (
+ exec_cmd,
+ log,
+ check_dir_exists,
+ check_file_exists,
+ delete,
+)
class LibvirtEnvironment(ExecutionEnvironment):
temp_vm_file = '%s/%s' % (temp_dir, vm_name)
exec_cmd('cp %s %s' % (vm_template, temp_vm_file))
self.define_vm(vm_name, temp_vm_file, disk_path)
- exec_cmd('rm -fr %s' % temp_dir)
+ delete(temp_dir)
def start_vms(self):
for node_id in self.node_ids:
for node_id in self.node_ids:
self.delete_vm(node_id)
+
def setup_environment(self):
check_dir_exists(self.network_dir)
self.cleanup_environment()
from lxml import etree
-
-import common
from execution_environment import ExecutionEnvironment
-exec_cmd = common.exec_cmd
-log = common.log
-check_file_exists = common.check_file_exists
-check_if_root = common.check_if_root
+from common import (
+ exec_cmd,
+ check_file_exists,
+ check_if_root,
+ delete,
+)
class VirtualFuel(ExecutionEnvironment):
exec_cmd('cp %s %s' % (vm_template, temp_vm_file))
self.set_vm_nic(temp_vm_file)
self.define_vm(vm_name, temp_vm_file, disk_path)
- exec_cmd('rm -fr %s' % temp_dir)
+ delete(temp_dir)
def setup_environment(self):
check_if_root()
import io
import os
-import common
from environments.libvirt_environment import LibvirtEnvironment
from environments.virtual_fuel import VirtualFuel
-exec_cmd = common.exec_cmd
-err = common.err
-log = common.log
-check_dir_exists = common.check_dir_exists
-check_file_exists = common.check_file_exists
-check_if_root = common.check_if_root
-ArgParser = common.ArgParser
-
class ExecutionEnvironment(object):
# http://www.apache.org/licenses/LICENSE-2.0
###############################################################################
-
-import common
import time
import os
import glob
from ssh_client import SSHClient
from dha_adapters.libvirt_adapter import LibvirtAdapter
-log = common.log
-err = common.err
-clean = common.clean
-delete = common.delete
+from common import (
+ log,
+ err,
+ clean,
+ delete,
+)
TRANSPLANT_FUEL_SETTINGS = 'transplant_fuel_settings.py'
BOOTSTRAP_ADMIN = '/usr/local/sbin/bootstrap_admin_node'
FUEL_CLIENT_CONFIG = '/etc/fuel/client/config.yaml'
PLUGINS_DIR = '~/plugins'
LOCAL_PLUGIN_FOLDER = '/opt/opnfv'
+IGNORABLE_FUEL_ERRORS = ['does not update installed package',
+ 'Couldn\'t resolve host']
class InstallFuelMaster(object):
def __init__(self, dea_file, dha_file, fuel_ip, fuel_username,
fuel_password, fuel_node_id, iso_file, work_dir,
- fuel_plugins_dir):
+ fuel_plugins_dir, no_plugins):
self.dea_file = dea_file
self.dha = LibvirtAdapter(dha_file)
self.fuel_ip = fuel_ip
self.iso_dir = os.path.dirname(self.iso_file)
self.work_dir = work_dir
self.fuel_plugins_dir = fuel_plugins_dir
+ self.no_plugins = no_plugins
self.file_dir = os.path.dirname(os.path.realpath(__file__))
self.ssh = SSHClient(self.fuel_ip, self.fuel_username,
self.fuel_password)
log('Waiting for one minute for Fuel to stabilize')
time.sleep(60)
- self.delete_deprecated_fuel_client_config_from_fuel_6_1()
+ self.delete_deprecated_fuel_client_config()
+
+ if not self.no_plugins:
- self.collect_plugin_files()
+ self.collect_plugin_files()
- self.install_plugins()
+ self.install_plugins()
self.post_install_cleanup()
if self.fuel_plugins_dir:
for f in glob.glob('%s/*.rpm' % self.fuel_plugins_dir):
s.scp_put(f, PLUGINS_DIR)
- else:
- s.exec_cmd('cp %s/*.rpm %s' % (LOCAL_PLUGIN_FOLDER,
- PLUGINS_DIR))
def install_plugins(self):
log('Installing Fuel Plugins')
+ plugin_files = []
with self.ssh as s:
- r = s.exec_cmd('find %s -type f -name \'*.rpm\'' % PLUGINS_DIR)
- for f in r.splitlines():
+ for plugin_location in [PLUGINS_DIR, LOCAL_PLUGIN_FOLDER]:
+ r = s.exec_cmd('find %s -type f -name \'*.rpm\''
+ % plugin_location)
+ plugin_files.extend(r.splitlines())
+ for f in plugin_files:
log('Found plugin %s, installing ...' % f)
r, e = s.exec_cmd('fuel plugins --install %s' % f, False)
- if e and 'does not update installed package' not in r:
+ printout = r + e if e else r
+ if e and all([err not in printout
+ for err in IGNORABLE_FUEL_ERRORS]):
raise Exception('Installation of Fuel Plugin %s '
'failed: %s' % (f, e))
self.ssh.open()
success = True
break
- except Exception as e:
+ except Exception:
log('Trying to SSH into Fuel VM %s ... sleeping %s seconds'
% (self.fuel_ip, SLEEP_TIME))
time.sleep(SLEEP_TIME)
log('Remove ISO directory %s' % self.iso_dir)
delete(self.iso_dir)
- def delete_deprecated_fuel_client_config_from_fuel_6_1(self):
+ def delete_deprecated_fuel_client_config(self):
with self.ssh as s:
response, error = s.exec_cmd('fuel -v', False)
if (error and
- 'DEPRECATION WARNING' in error and
- '6.1.0' in error and
- FUEL_CLIENT_CONFIG in error):
+ 'DEPRECATION WARNING' in error and FUEL_CLIENT_CONFIG in error):
log('Delete deprecated fuel client config %s' % FUEL_CLIENT_CONFIG)
with self.ssh as s:
s.exec_cmd('rm %s' % FUEL_CLIENT_CONFIG, False)
###############################################################################
-import common
import time
import os
import yaml
import glob
import shutil
-N = common.N
-E = common.E
-R = common.R
-ArgParser = common.ArgParser
-exec_cmd = common.exec_cmd
-parse = common.parse
-err = common.err
-log = common.log
-delete = common.delete
-commafy = common.commafy
+from common import (
+ N,
+ E,
+ R,
+ ArgParser,
+ exec_cmd,
+ parse,
+ err,
+ log,
+ delete,
+ commafy,
+)
DEA_1 = '''
title: Deployment Environment Adapter (DEA)
import paramiko
-import common
import scp
-TIMEOUT = 600
-log = common.log
-err = common.err
+from common import (
+ log,
+ err,
+)
+TIMEOUT = 600
class SSHClient(object):
if check:
if error:
self.close()
- err(error)
+ raise Exception(error)
else:
return response
return response, error
type: text
value: '16'
weight: 70
- opendaylight:
- metadata:
- enabled: true
- label: OpenDaylight plugin
- plugin_id: 1
- restrictions:
- - cluster:net_provider != 'neutron': Only neutron is supported by OpenDaylight
- toggleable: true
- weight: 70
- rest_api_port:
- description: Port on which ODL REST API will be available.
- label: Port number
- regex:
- error: Invalid port number
- source: ^([1-9][0-9]{0,3}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$
- type: text
- value: '8282'
- weight: 40
- use_vxlan:
- description: Configure neutron to use VXLAN tunneling
- label: Use vxlan
- restrictions:
- - action: disable
- condition: networking_parameters:segmentation_type == 'vlan'
- message: Neutron with GRE segmentation required
- type: checkbox
- value: true
- weight: 20
- vni_range_end:
- description: VXLAN VNI IDs range end
- label: VNI range end
- regex:
- error: Invalid ID number
- source: ^\d+$
- restrictions:
- - action: hide
- condition: networking_parameters:segmentation_type == 'vlan'
- type: text
- value: '10000'
- weight: 31
- vni_range_start:
- description: VXLAN VNI IDs range start
- label: VNI range start
- regex:
- error: Invalid ID number
- source: ^\d+$
- restrictions:
- - action: hide
- condition: networking_parameters:segmentation_type == 'vlan'
- type: text
- value: '10'
- weight: 30
provision:
metadata:
label: Provision
--- /dev/null
+opendaylight:
+ metadata:
+ enabled: true
+ label: OpenDaylight plugin
+ plugin_id: 1
+ restrictions:
+ - cluster:net_provider != 'neutron': Only neutron is supported by OpenDaylight
+ toggleable: true
+ weight: 70
+ rest_api_port:
+ description: Port on which ODL REST API will be available.
+ label: Port number
+ regex:
+ error: Invalid port number
+ source: ^([1-9][0-9]{0,3}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$
+ type: text
+ value: '8282'
+ weight: 40
+ use_vxlan:
+ description: Configure neutron to use VXLAN tunneling
+ label: Use vxlan
+ restrictions:
+ - action: disable
+ condition: networking_parameters:segmentation_type == 'vlan'
+ message: Neutron with GRE segmentation required
+ type: checkbox
+ value: true
+ weight: 20
+ vni_range_end:
+ description: VXLAN VNI IDs range end
+ label: VNI range end
+ regex:
+ error: Invalid ID number
+ source: ^\d+$
+ restrictions:
+ - action: hide
+ condition: networking_parameters:segmentation_type == 'vlan'
+ type: text
+ value: '10000'
+ weight: 31
+ vni_range_start:
+ description: VXLAN VNI IDs range start
+ label: VNI range start
+ regex:
+ error: Invalid ID number
+ source: ^\d+$
+ restrictions:
+ - action: hide
+ condition: networking_parameters:segmentation_type == 'vlan'
+ type: text
+ value: '10'
+ weight: 30
\ No newline at end of file
type: text
value: '16'
weight: 70
- opendaylight:
- metadata:
- enabled: true
- label: OpenDaylight plugin
- plugin_id: 1
- restrictions:
- - cluster:net_provider != 'neutron': Only neutron is supported by OpenDaylight
- toggleable: true
- weight: 70
- rest_api_port:
- description: Port on which ODL REST API will be available.
- label: Port number
- regex:
- error: Invalid port number
- source: ^([1-9][0-9]{0,3}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$
- type: text
- value: '8282'
- weight: 40
- use_vxlan:
- description: Configure neutron to use VXLAN tunneling
- label: Use vxlan
- restrictions:
- - action: disable
- condition: networking_parameters:segmentation_type == 'vlan'
- message: Neutron with GRE segmentation required
- type: checkbox
- value: true
- weight: 20
- vni_range_end:
- description: VXLAN VNI IDs range end
- label: VNI range end
- regex:
- error: Invalid ID number
- source: ^\d+$
- restrictions:
- - action: hide
- condition: networking_parameters:segmentation_type == 'vlan'
- type: text
- value: '10000'
- weight: 31
- vni_range_start:
- description: VXLAN VNI IDs range start
- label: VNI range start
- regex:
- error: Invalid ID number
- source: ^\d+$
- restrictions:
- - action: hide
- condition: networking_parameters:segmentation_type == 'vlan'
- type: text
- value: '10'
- weight: 30
provision:
metadata:
label: Provision
--- /dev/null
+opendaylight:
+ metadata:
+ enabled: true
+ label: OpenDaylight plugin
+ plugin_id: 1
+ restrictions:
+ - cluster:net_provider != 'neutron': Only neutron is supported by OpenDaylight
+ toggleable: true
+ weight: 70
+ rest_api_port:
+ description: Port on which ODL REST API will be available.
+ label: Port number
+ regex:
+ error: Invalid port number
+ source: ^([1-9][0-9]{0,3}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$
+ type: text
+ value: '8282'
+ weight: 40
+ use_vxlan:
+ description: Configure neutron to use VXLAN tunneling
+ label: Use vxlan
+ restrictions:
+ - action: disable
+ condition: networking_parameters:segmentation_type == 'vlan'
+ message: Neutron with GRE segmentation required
+ type: checkbox
+ value: true
+ weight: 20
+ vni_range_end:
+ description: VXLAN VNI IDs range end
+ label: VNI range end
+ regex:
+ error: Invalid ID number
+ source: ^\d+$
+ restrictions:
+ - action: hide
+ condition: networking_parameters:segmentation_type == 'vlan'
+ type: text
+ value: '10000'
+ weight: 31
+ vni_range_start:
+ description: VXLAN VNI IDs range start
+ label: VNI range start
+ regex:
+ error: Invalid ID number
+ source: ^\d+$
+ restrictions:
+ - action: hide
+ condition: networking_parameters:segmentation_type == 'vlan'
+ type: text
+ value: '10'
+ weight: 30
\ No newline at end of file
type: text
value: '16'
weight: 70
- opendaylight:
- metadata:
- enabled: true
- label: OpenDaylight plugin
- plugin_id: 1
- restrictions:
- - cluster:net_provider != 'neutron': Only neutron is supported by OpenDaylight
- toggleable: true
- weight: 70
- rest_api_port:
- description: Port on which ODL REST API will be available.
- label: Port number
- regex:
- error: Invalid port number
- source: ^([1-9][0-9]{0,3}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$
- type: text
- value: '8282'
- weight: 40
- use_vxlan:
- description: Configure neutron to use VXLAN tunneling
- label: Use vxlan
- restrictions:
- - action: disable
- condition: networking_parameters:segmentation_type == 'vlan'
- message: Neutron with GRE segmentation required
- type: checkbox
- value: true
- weight: 20
- vni_range_end:
- description: VXLAN VNI IDs range end
- label: VNI range end
- regex:
- error: Invalid ID number
- source: ^\d+$
- restrictions:
- - action: hide
- condition: networking_parameters:segmentation_type == 'vlan'
- type: text
- value: '10000'
- weight: 31
- vni_range_start:
- description: VXLAN VNI IDs range start
- label: VNI range start
- regex:
- error: Invalid ID number
- source: ^\d+$
- restrictions:
- - action: hide
- condition: networking_parameters:segmentation_type == 'vlan'
- type: text
- value: '10'
- weight: 30
provision:
metadata:
label: Provision
--- /dev/null
+opendaylight:
+ metadata:
+ enabled: true
+ label: OpenDaylight plugin
+ plugin_id: 1
+ restrictions:
+ - cluster:net_provider != 'neutron': Only neutron is supported by OpenDaylight
+ toggleable: true
+ weight: 70
+ rest_api_port:
+ description: Port on which ODL REST API will be available.
+ label: Port number
+ regex:
+ error: Invalid port number
+ source: ^([1-9][0-9]{0,3}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$
+ type: text
+ value: '8282'
+ weight: 40
+ use_vxlan:
+ description: Configure neutron to use VXLAN tunneling
+ label: Use vxlan
+ restrictions:
+ - action: disable
+ condition: networking_parameters:segmentation_type == 'vlan'
+ message: Neutron with GRE segmentation required
+ type: checkbox
+ value: true
+ weight: 20
+ vni_range_end:
+ description: VXLAN VNI IDs range end
+ label: VNI range end
+ regex:
+ error: Invalid ID number
+ source: ^\d+$
+ restrictions:
+ - action: hide
+ condition: networking_parameters:segmentation_type == 'vlan'
+ type: text
+ value: '10000'
+ weight: 31
+ vni_range_start:
+ description: VXLAN VNI IDs range start
+ label: VNI range start
+ regex:
+ error: Invalid ID number
+ source: ^\d+$
+ restrictions:
+ - action: hide
+ condition: networking_parameters:segmentation_type == 'vlan'
+ type: text
+ value: '10'
+ weight: 30
type: text
value: '16'
weight: 70
- opendaylight:
- metadata:
- enabled: true
- label: OpenDaylight plugin
- plugin_id: 1
- restrictions:
- - cluster:net_provider != 'neutron': Only neutron is supported by OpenDaylight
- toggleable: true
- weight: 70
- rest_api_port:
- description: Port on which ODL REST API will be available.
- label: Port number
- regex:
- error: Invalid port number
- source: ^([1-9][0-9]{0,3}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$
- type: text
- value: '8282'
- weight: 40
- use_vxlan:
- description: Configure neutron to use VXLAN tunneling
- label: Use vxlan
- restrictions:
- - action: disable
- condition: networking_parameters:segmentation_type == 'vlan'
- message: Neutron with GRE segmentation required
- type: checkbox
- value: true
- weight: 20
- vni_range_end:
- description: VXLAN VNI IDs range end
- label: VNI range end
- regex:
- error: Invalid ID number
- source: ^\d+$
- restrictions:
- - action: hide
- condition: networking_parameters:segmentation_type == 'vlan'
- type: text
- value: '10000'
- weight: 31
- vni_range_start:
- description: VXLAN VNI IDs range start
- label: VNI range start
- regex:
- error: Invalid ID number
- source: ^\d+$
- restrictions:
- - action: hide
- condition: networking_parameters:segmentation_type == 'vlan'
- type: text
- value: '10'
- weight: 30
provision:
metadata:
label: Provision
--- /dev/null
+opendaylight:
+ metadata:
+ enabled: true
+ label: OpenDaylight plugin
+ plugin_id: 1
+ restrictions:
+ - cluster:net_provider != 'neutron': Only neutron is supported by OpenDaylight
+ toggleable: true
+ weight: 70
+ rest_api_port:
+ description: Port on which ODL REST API will be available.
+ label: Port number
+ regex:
+ error: Invalid port number
+ source: ^([1-9][0-9]{0,3}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$
+ type: text
+ value: '8282'
+ weight: 40
+ use_vxlan:
+ description: Configure neutron to use VXLAN tunneling
+ label: Use vxlan
+ restrictions:
+ - action: disable
+ condition: networking_parameters:segmentation_type == 'vlan'
+ message: Neutron with GRE segmentation required
+ type: checkbox
+ value: true
+ weight: 20
+ vni_range_end:
+ description: VXLAN VNI IDs range end
+ label: VNI range end
+ regex:
+ error: Invalid ID number
+ source: ^\d+$
+ restrictions:
+ - action: hide
+ condition: networking_parameters:segmentation_type == 'vlan'
+ type: text
+ value: '10000'
+ weight: 31
+ vni_range_start:
+ description: VXLAN VNI IDs range start
+ label: VNI range start
+ regex:
+ error: Invalid ID number
+ source: ^\d+$
+ restrictions:
+ - action: hide
+ condition: networking_parameters:segmentation_type == 'vlan'
+ type: text
+ value: '10'
+ weight: 30
type: text
value: '16'
weight: 70
- opendaylight:
- metadata:
- enabled: true
- label: OpenDaylight plugin
- plugin_id: 1
- restrictions:
- - cluster:net_provider != 'neutron': Only neutron is supported by OpenDaylight
- toggleable: true
- weight: 70
- rest_api_port:
- description: Port on which ODL REST API will be available.
- label: Port number
- regex:
- error: Invalid port number
- source: ^([1-9][0-9]{0,3}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$
- type: text
- value: '8282'
- weight: 40
- use_vxlan:
- description: Configure neutron to use VXLAN tunneling
- label: Use vxlan
- restrictions:
- - action: disable
- condition: networking_parameters:segmentation_type == 'vlan'
- message: Neutron with GRE segmentation required
- type: checkbox
- value: true
- weight: 20
- vni_range_end:
- description: VXLAN VNI IDs range end
- label: VNI range end
- regex:
- error: Invalid ID number
- source: ^\d+$
- restrictions:
- - action: hide
- condition: networking_parameters:segmentation_type == 'vlan'
- type: text
- value: '10000'
- weight: 31
- vni_range_start:
- description: VXLAN VNI IDs range start
- label: VNI range start
- regex:
- error: Invalid ID number
- source: ^\d+$
- restrictions:
- - action: hide
- condition: networking_parameters:segmentation_type == 'vlan'
- type: text
- value: '10'
- weight: 30
provision:
metadata:
label: Provision
--- /dev/null
+opendaylight:
+ metadata:
+ enabled: true
+ label: OpenDaylight plugin
+ plugin_id: 1
+ restrictions:
+ - cluster:net_provider != 'neutron': Only neutron is supported by OpenDaylight
+ toggleable: true
+ weight: 70
+ rest_api_port:
+ description: Port on which ODL REST API will be available.
+ label: Port number
+ regex:
+ error: Invalid port number
+ source: ^([1-9][0-9]{0,3}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$
+ type: text
+ value: '8282'
+ weight: 40
+ use_vxlan:
+ description: Configure neutron to use VXLAN tunneling
+ label: Use vxlan
+ restrictions:
+ - action: disable
+ condition: networking_parameters:segmentation_type == 'vlan'
+ message: Neutron with GRE segmentation required
+ type: checkbox
+ value: true
+ weight: 20
+ vni_range_end:
+ description: VXLAN VNI IDs range end
+ label: VNI range end
+ regex:
+ error: Invalid ID number
+ source: ^\d+$
+ restrictions:
+ - action: hide
+ condition: networking_parameters:segmentation_type == 'vlan'
+ type: text
+ value: '10000'
+ weight: 31
+ vni_range_start:
+ description: VXLAN VNI IDs range start
+ label: VNI range start
+ regex:
+ error: Invalid ID number
+ source: ^\d+$
+ restrictions:
+ - action: hide
+ condition: networking_parameters:segmentation_type == 'vlan'
+ type: text
+ value: '10'
+ weight: 30
import sys
-import common
import io
import yaml
from dea import DeploymentEnvironmentAdapter
-check_file_exists = common.check_file_exists
+from common import (
+ check_file_exists,
+)
ASTUTE_YAML = '/etc/fuel/astute.yaml'