NSB: move interface probe to VNF, and attempt driver-only probe first
[yardstick.git] / yardstick / network_services / vnf_generic / vnf / prox_vnf.py
index 88911c3..2cdb3f9 100644 (file)
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+import errno
 import logging
-import multiprocessing
-import os
+import datetime
 import time
 
-from yardstick.network_services.vnf_generic.vnf.base import QueueFileWrapper
-from yardstick.network_services.vnf_generic.vnf.prox_helpers import ProxResourceHelper
+
+from yardstick.common.process import check_if_process_failed
 from yardstick.network_services.vnf_generic.vnf.prox_helpers import ProxDpdkVnfSetupEnvHelper
+from yardstick.network_services.vnf_generic.vnf.prox_helpers import ProxResourceHelper
 from yardstick.network_services.vnf_generic.vnf.sample_vnf import SampleVNF
+from yardstick.network_services.constants import PROCESS_JOIN_TIMEOUT
 
 LOG = logging.getLogger(__name__)
 
@@ -40,55 +42,34 @@ class ProxApproxVnf(SampleVNF):
         if resource_helper_type is None:
             resource_helper_type = ProxResourceHelper
 
+        self.prev_packets_in = 0
+        self.prev_packets_sent = 0
+        self.prev_time = time.time()
         super(ProxApproxVnf, self).__init__(name, vnfd, setup_env_helper_type,
                                             resource_helper_type)
-        self._result = {}
-        self._terminated = multiprocessing.Value('i', 0)
-        self._queue = multiprocessing.Value('i', 0)
-
-    def instantiate(self, scenario_cfg, context_cfg):
-        LOG.info("printing .........prox instantiate ")
-
-        self.scenario_helper.scenario_cfg = scenario_cfg
-
-        # this won't work we need 1GB hugepages at boot
-        self.setup_helper.setup_vnf_environment()
-
-        # self.connection.run("cat /proc/cpuinfo")
-
-        prox_args, prox_path, remote_path = self.resource_helper.get_process_args()
-
-        self.q_in = multiprocessing.Queue()
-        self.q_out = multiprocessing.Queue()
-        self.queue_wrapper = QueueFileWrapper(self.q_in, self.q_out, "PROX started")
-        self._vnf_process = multiprocessing.Process(target=self._run_prox,
-                                                    args=(remote_path, prox_path, prox_args))
-        self._vnf_process.start()
 
     def _vnf_up_post(self):
         self.resource_helper.up_post()
 
-    def _run_prox(self, file_wrapper, config_path, prox_path, prox_args):
-        # This runs in a different process and should not share an SSH connection
-        # with the rest of the object
-        self.ssh_helper.drop_connection()
-
-        time.sleep(self.WAIT_TIME)
-
-        args = " ".join(" ".join([k, v if v else ""]) for k, v in prox_args.items())
-
-        cmd_template = "sudo bash -c 'cd {}; {} -o cli {} -f {} '"
-        prox_cmd = cmd_template.format(os.path.dirname(prox_path), prox_path, args, config_path)
-
-        LOG.debug(prox_cmd)
-        self.ssh_helper.run(prox_cmd, stdin=file_wrapper, stdout=file_wrapper,
-                            keep_stdin_open=True, pty=False)
-
-    def vnf_execute(self, cmd, wait_time=2):
+    def vnf_execute(self, cmd, *args, **kwargs):
         # try to execute with socket commands
-        self.resource_helper.execute(cmd)
+        # ignore socket errors, e.g. when using force_quit
+        ignore_errors = kwargs.pop("_ignore_errors", False)
+        try:
+            return self.resource_helper.execute(cmd, *args, **kwargs)
+        except OSError as e:
+            if e.errno in {errno.EPIPE, errno.ESHUTDOWN, errno.ECONNRESET}:
+                if ignore_errors:
+                    LOG.debug("ignoring vnf_execute exception %s for command %s", e, cmd)
+                else:
+                    raise
+            else:
+                raise
 
     def collect_kpi(self):
+        # we can't get KPIs if the VNF is down
+        check_if_process_failed(self._vnf_process)
+
         if self.resource_helper is None:
             result = {
                 "packets_in": 0,
@@ -98,20 +79,62 @@ class ProxApproxVnf(SampleVNF):
             }
             return result
 
-        if len(self.vnfd_helper.interfaces) not in {2, 4}:
+        # use all_ports so we only use ports matched in topology
+        port_count = len(self.vnfd_helper.port_pairs.all_ports)
+        if port_count not in {1, 2, 4}:
             raise RuntimeError("Failed ..Invalid no of ports .. "
-                               "2 or 4 ports only supported at this time")
+                               "1, 2 or 4 ports only supported at this time")
+
+        self.port_stats = self.vnf_execute('port_stats', range(port_count))
+        curr_time = time.time()
+        try:
+            rx_total = self.port_stats[6]
+            tx_total = self.port_stats[7]
+        except IndexError:
+            LOG.debug("port_stats parse fail ")
+            # return empty dict so we don't mess up existing KPIs
+            return {}
 
-        port_stats = self.resource_helper.execute('port_stats', self.vnfd_helper.interfaces)
-        rx_total = port_stats[6]
-        tx_total = port_stats[7]
         result = {
-            "packets_in": tx_total,
-            "packets_dropped": (tx_total - rx_total),
-            "packets_fwd": rx_total,
-            "collect_stats": self.resource_helper.collect_kpi(),
+            "packets_in": rx_total,
+            "packets_dropped": max((tx_total - rx_total), 0),
+            "packets_fwd": tx_total,
+            # we share ProxResourceHelper with TG, but we want to collect
+            # collectd KPIs here and not TG KPIs, so use a different method name
+            "collect_stats": self.resource_helper.collect_collectd_kpi(),
         }
+        curr_packets_in = int((rx_total - self.prev_packets_in) / (curr_time - self.prev_time))
+        curr_packets_fwd = int((tx_total - self.prev_packets_sent) / (curr_time - self.prev_time))
+
+        result["curr_packets_in"] = curr_packets_in
+        result["curr_packets_fwd"] = curr_packets_fwd
+
+        self.prev_packets_in = rx_total
+        self.prev_packets_sent = tx_total
+        self.prev_time = curr_time
+
+        LOG.debug("%s collect KPIs %s %s", self.APP_NAME, datetime.datetime.now(), result)
         return result
 
     def _tear_down(self):
-        self.setup_helper.rebind_drivers()
+        # this should be standardized for all VNFs or removed
+        self.setup_helper.tear_down()
+
+    def terminate(self):
+        # stop collectd first or we get pika errors?
+        self.resource_helper.stop_collect()
+        # try to quit with socket commands
+        # pkill is not matching, debug with pgrep
+        self.ssh_helper.execute("sudo pgrep -lax  %s" % self.setup_helper.APP_NAME)
+        self.ssh_helper.execute("sudo ps aux | grep -i %s" % self.setup_helper.APP_NAME)
+        if self._vnf_process.is_alive():
+            self.vnf_execute("stop_all")
+            self.vnf_execute("quit")
+            # hopefully quit succeeds and socket closes, so ignore force_quit socket errors
+            self.vnf_execute("force_quit", _ignore_errors=True)
+        self.setup_helper.kill_vnf()
+        self._tear_down()
+        if self._vnf_process is not None:
+            LOG.debug("joining before terminate %s", self._vnf_process.name)
+            self._vnf_process.join(PROCESS_JOIN_TIMEOUT)
+            self._vnf_process.terminate()