X-Git-Url: https://gerrit.opnfv.org/gerrit/gitweb?a=blobdiff_plain;f=tools%2Fhugepages.py;h=4c91e7d25a78618b5022245650d5d8796aac2f64;hb=2cfae5e4569bf595e238a4ccb56a6ef5544a3265;hp=3a434d6e4fa2cc113ebe2ea50725fde27e504ba2;hpb=dd47e4e2ea55f01235c20bed8e07d69fb51c292e;p=vswitchperf.git diff --git a/tools/hugepages.py b/tools/hugepages.py index 3a434d6e..4c91e7d2 100644 --- a/tools/hugepages.py +++ b/tools/hugepages.py @@ -1,4 +1,4 @@ -# Copyright 2015-2016 Intel Corporation. +# Copyright 2015-2017 Intel Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -20,40 +20,133 @@ import re import subprocess import logging import locale +import math from tools import tasks from conf import settings _LOGGER = logging.getLogger(__name__) - +_ALLOCATED_HUGEPAGES = False # # hugepage management # +def get_hugepage_size(): + """Return the size of the configured hugepages + """ + hugepage_size_re = re.compile(r'^Hugepagesize:\s+(?P\d+)\s+kB', + re.IGNORECASE) + with open('/proc/meminfo', 'r') as result_file: + data = result_file.readlines() + for line in data: + match = hugepage_size_re.search(line) + if match: + _LOGGER.info('Hugepages size: %s kb', match.group('size_hp')) + return int(match.group('size_hp')) + _LOGGER.error('Could not parse for hugepage size') + return 0 + + +def allocate_hugepages(): + """Allocate hugepages on the fly + """ + hp_size = get_hugepage_size() + if hp_size > 0: + nr_hp = int(math.ceil(settings.getValue('HUGEPAGE_RAM_ALLOCATION')/hp_size)) + _LOGGER.info('Will allocate %s hugepages.', nr_hp) + + nr_hugepages = 'vm.nr_hugepages=' + str(nr_hp) + try: + tasks.run_task(['sudo', 'sysctl', nr_hugepages], + _LOGGER, 'Trying to allocate hugepages..', True) + except subprocess.CalledProcessError: + _LOGGER.error('Unable to allocate hugepages.') + return False + # pylint: disable=global-statement + global _ALLOCATED_HUGEPAGES + _ALLOCATED_HUGEPAGES = True + return True + + else: + _LOGGER.error('Division by 0 will be supported in next release') + return False + +def deallocate_hugepages(): + """De-allocate hugepages that were allocated on the fly + """ + # pylint: disable=global-statement + global _ALLOCATED_HUGEPAGES + if _ALLOCATED_HUGEPAGES: + nr_hugepages = 'vm.nr_hugepages= 0' + try: + tasks.run_task(['sudo', 'sysctl', nr_hugepages], + _LOGGER, 'Trying to de-allocate hugepages..', True) + except subprocess.CalledProcessError: + _LOGGER.error('Unable to de-allocate hugepages.') + return False + _ALLOCATED_HUGEPAGES = False + return True + + +def get_free_hugepages(socket=None): + """Get the free hugepage totals on the system. + + :param socket: optional socket param to get free hugepages on a socket. To + be passed a string. + :returns: hugepage amount as int + """ + hugepage_free_re = re.compile(r'HugePages_Free:\s+(?P\d+)$') + if socket: + if os.path.exists( + '/sys/devices/system/node/node{}/meminfo'.format(socket)): + meminfo_path = '/sys/devices/system/node/node{}/meminfo'.format( + socket) + else: + _LOGGER.info('No hugepage info found for socket %s', socket) + return 0 + else: + meminfo_path = '/proc/meminfo' + + with open(meminfo_path, 'r') as result_file: + data = result_file.readlines() + for line in data: + match = hugepage_free_re.search(line) + if match: + _LOGGER.info('Hugepages free: %s %s', match.group('free_hp'), + 'on socket {}'.format(socket) if socket else '') + return int(match.group('free_hp')) + _LOGGER.info('Could not parse for hugepage size') + return 0 + + def is_hugepage_available(): - """Check if hugepages are available on the system. + """Check if hugepages are configured/available on the system. """ - hugepage_re = re.compile(r'^HugePages_Free:\s+(?P\d+)$') + hugepage_size_re = re.compile(r'^Hugepagesize:\s+(?P\d+)\s+kB', + re.IGNORECASE) # read in meminfo with open('/proc/meminfo') as mem_file: mem_info = mem_file.readlines() - # first check if module is loaded + # see if the hugepage size is the recommended value for line in mem_info: - result = hugepage_re.match(line) - if not result: - continue - - num_huge = result.group('num_hp') - if not num_huge: - _LOGGER.info('No free hugepages.') - else: - _LOGGER.info('Found \'%s\' free hugepage(s).', num_huge) - return True - - return False + match_size = hugepage_size_re.match(line) + if match_size: + if match_size.group('size_hp') != '1048576': + _LOGGER.info( + '%s%s%s kB', + 'Hugepages not configured for recommend 1GB size. ', + 'Currently set at ', match_size.group('size_hp')) + num_huge = get_free_hugepages() + if num_huge == 0: + _LOGGER.info('No free hugepages.') + if not allocate_hugepages(): + return False + else: + _LOGGER.info('Found \'%s\' free hugepage(s).', num_huge) + return True def is_hugepage_mounted(): @@ -69,10 +162,11 @@ def is_hugepage_mounted(): def mount_hugepages(): - """Ensure hugepages are mounted. + """Ensure hugepages are mounted. Raises RuntimeError if no configured + hugepages are available. """ if not is_hugepage_available(): - return + raise RuntimeError('No Hugepages configured.') if is_hugepage_mounted(): return @@ -100,4 +194,5 @@ def umount_hugepages(): except subprocess.CalledProcessError: _LOGGER.error('Unable to umount hugepages.') - + if not deallocate_hugepages(): + _LOGGER.error('Unable to deallocate previously allocated hugepages.')