US3541 merge various fixes to OPNFV branch 67/52767/1
authoryayogev <yaronyogev@gmail.com>
Tue, 27 Feb 2018 15:00:05 +0000 (17:00 +0200)
committeryayogev <yaronyogev@gmail.com>
Tue, 27 Feb 2018 15:00:05 +0000 (17:00 +0200)
timestamp of last commit tt was merged: 26-Jan-2018 16:25.

Change-Id: I7b0bf7885d7d0badb81c794a52c480b905d78459
Signed-off-by: yayogev <yaronyogev@gmail.com>
59 files changed:
INFO
app/discover/clique_finder.py
app/discover/configuration.py
app/discover/events/event_base.py
app/discover/events/event_instance_add.py
app/discover/events/event_interface_add.py
app/discover/events/event_port_add.py
app/discover/events/event_port_delete.py
app/discover/events/event_router_add.py
app/discover/events/event_router_update.py
app/discover/events/event_subnet_add.py
app/discover/events/event_subnet_update.py
app/discover/fetcher.py
app/discover/fetchers/api/api_fetch_availability_zones.py
app/discover/fetchers/api/api_fetch_network.py
app/discover/fetchers/api/api_fetch_networks.py
app/discover/fetchers/api/api_fetch_port.py
app/discover/fetchers/api/api_fetch_ports.py
app/discover/fetchers/api/api_fetch_project_hosts.py
app/discover/fetchers/cli/cli_access.py
app/discover/fetchers/cli/cli_fetch_host_pnics.py
app/discover/fetchers/cli/cli_fetch_vconnectors_ovs.py
app/discover/fetchers/cli/cli_fetch_vservice_vnics.py
app/discover/fetchers/db/db_fetch_oteps.py
app/discover/link_finders/find_links_for_vedges.py
app/discover/scan.py
app/discover/scan_manager.py
app/discover/scanner.py
app/install/calipso-installer.py
app/install/db/monitoring_config_templates.json
app/messages/message.py
app/monitoring/checks/check_instance_communictions.py
app/monitoring/checks/check_ping.py
app/monitoring/checks/check_pnic_ovs.py
app/monitoring/checks/check_pnic_vpp.py
app/monitoring/checks/check_vconnector.py
app/monitoring/checks/check_vedge_ovs.py
app/monitoring/checks/check_vedge_vpp.py
app/monitoring/checks/check_vnic_vconnector.py
app/monitoring/checks/check_vnic_vpp.py
app/monitoring/checks/check_vservice.py
app/monitoring/handlers/handle_otep.py
app/monitoring/handlers/monitoring_check_handler.py
app/monitoring/setup/monitoring_handler.py
app/test/api/responders_test/resource/test_clique_types.py
app/test/api/responders_test/test_data/clique_types.py
app/test/fetch/api_fetch/test_data/api_fetch_networks.py
app/test/fetch/api_fetch/test_data/api_fetch_ports.py
app/test/fetch/db_fetch/test_data/db_fetch_oteps.py
app/test/scan/test_data/scanner.py
app/test/scan/test_scanner.py
app/test/verify.sh
app/utils/dict_naming_converter.py
app/utils/inventory_mgr.py
app/utils/logging/console_logger.py
app/utils/logging/full_logger.py
app/utils/logging/logger.py
app/utils/logging/message_logger.py
app/utils/logging/mongo_logging_handler.py

diff --git a/INFO b/INFO
index 83fdb69..3a83585 100644 (file)
--- a/INFO
+++ b/INFO
@@ -1,4 +1,4 @@
-Project: Virtual Infrastructure Network Assurance (Calipso)
+Project: Calipso - Virtual Infrastructure Networking Assurance
 Project Creation Date: March 21st 2017
 Project Category:
 Lifecycle State:
index 4d68eb4..4e04e7e 100644 (file)
@@ -42,43 +42,74 @@ class CliqueFinder(Fetcher):
         return self.links.find({'target': db_id})
 
     def find_cliques(self):
-        self.log.info("scanning for cliques")
+        self.log.info("Scanning for cliques")
         clique_types = self.get_clique_types().values()
         for clique_type in clique_types:
             self.find_cliques_for_type(clique_type)
-        self.log.info("finished scanning for cliques")
+        self.log.info("Finished scanning for cliques")
 
     # Calculate priority score for clique type per environment and configuration
-    def _get_priority_score(self, clique_type):
+    def get_priority_score(self, clique_type):
         # environment-specific clique type takes precedence
-        if self.env == clique_type['environment']:
-            return 16
-        if (self.env_config['distribution'] == clique_type.get('distribution')
-            and
-            self.env_config['distribution_version'] ==
-                clique_type.get('distribution_version')):
-            return 8
-        if clique_type.get('mechanism_drivers') \
-                in self.env_config['mechanism_drivers']:
-            return 4
-        if self.env_config['type_drivers'] == clique_type.get('type_drivers'):
-            return 2
-        if clique_type.get('environment', '') == 'ANY':
-            # environment=ANY serves as fallback option, but it's not mandatory
-            return 1
-        else:
+        env = clique_type.get('environment')
+        config = self.env_config
+        # ECT - Clique Type with Environment name
+        if env:
+            if self.env == env:
+                return 2**6
+            if env == 'ANY':
+                # environment=ANY serves as fallback option
+                return 2**0
             return 0
+        # NECT - Clique Type without Environment name
+        else:
+            env_type = clique_type.get('environment_type')
+            # TODO: remove backward compatibility ('if not env_type' check)
+            if env_type and env_type != config.get('environment_type'):
+                return 0
+
+            score = 0
+
+            distribution = clique_type.get('distribution')
+            if distribution:
+                if config['distribution'] != distribution:
+                    return 0
+
+                score += 2**5
+
+                dv = clique_type.get('distribution_version')
+                if dv:
+                    if dv != config['distribution_version']:
+                        return 0
+                    score += 2**4
+
+            mechanism_drivers = clique_type.get('mechanism_drivers')
+            if mechanism_drivers:
+                if mechanism_drivers not in config['mechanism_drivers']:
+                    return 0
+                score += 2**3
+
+            type_drivers = clique_type.get('type_drivers')
+            if type_drivers:
+                if type_drivers != config['type_drivers']:
+                    return 0
+                score += 2**2
+
+            # If no configuration is specified, this clique type
+            # is a fallback for its environment type
+            return max(score, 2**1)
 
     # Get clique type with max priority
     # for given focal point type
     def _get_clique_type(self, clique_types):
-        scored_clique_types = [{'score': self._get_priority_score(clique_type),
+        scored_clique_types = [{'score': self.get_priority_score(clique_type),
                                 'clique_type': clique_type}
                                for clique_type in clique_types]
         max_score = max(scored_clique_types, key=lambda t: t['score'])
         if max_score['score'] == 0:
-            self.log.warn('No matching clique types for focal point type: {}'
-                          .format(clique_types[0].get('focal_point_type')))
+            self.log.warn('No matching clique types '
+                          'for focal point type: {fp_type}'
+                          .format(fp_type=clique_types[0].get('focal_point_type')))
             return None
         return max_score.get('clique_type')
 
@@ -143,8 +174,9 @@ class CliqueFinder(Fetcher):
             clique["constraints"][c] = val
         allow_implicit = clique_type.get('use_implicit_links', False)
         for link_type in clique_type["link_types"]:
-            self.check_link_type(clique, link_type, nodes_of_type,
-                                 allow_implicit=allow_implicit)
+            if not self.check_link_type(clique, link_type, nodes_of_type,
+                                        allow_implicit=allow_implicit):
+                break
 
         # after adding the links to the clique, create/update the clique
         if not clique["links"]:
@@ -197,7 +229,7 @@ class CliqueFinder(Fetcher):
         return CliqueFinder.link_type_reversed.get(link_type)
 
     def check_link_type(self, clique, link_type, nodes_of_type,
-                        allow_implicit=False):
+                        allow_implicit=False) -> bool:
         # check if it's backwards
         link_type_reversed = self.get_link_type_reversed(link_type)
         # handle case of links like T<-->T
@@ -213,15 +245,16 @@ class CliqueFinder(Fetcher):
             matches = self.links.find_one(link_search_condition)
             use_reversed = True if matches else False
         if self_linked or not use_reversed:
-            self.check_link_type_forward(clique, link_type, nodes_of_type,
-                                         allow_implicit=allow_implicit)
+            return self.check_link_type_forward(clique, link_type,
+                                                nodes_of_type,
+                                                allow_implicit=allow_implicit)
         if self_linked or use_reversed:
-            self.check_link_type_back(clique, link_type, nodes_of_type,
-                                      allow_implicit=allow_implicit)
+            return self.check_link_type_back(clique, link_type, nodes_of_type,
+                                             allow_implicit=allow_implicit)
 
     def check_link_type_for_direction(self, clique, link_type, nodes_of_type,
                                       is_reversed=False,
-                                      allow_implicit=False):
+                                      allow_implicit=False) -> bool:
         if is_reversed:
             link_type = self.get_link_type_reversed(link_type)
         from_type = link_type[:link_type.index("-")]
@@ -230,7 +263,7 @@ class CliqueFinder(Fetcher):
         other_side = 'target' if not is_reversed else 'source'
         match_type = to_type if is_reversed else from_type
         if match_type not in nodes_of_type.keys():
-            return
+            return False
         other_side_type = to_type if not is_reversed else from_type
         nodes_to_add = set()
         for match_point in nodes_of_type[match_type]:
@@ -245,6 +278,7 @@ class CliqueFinder(Fetcher):
             nodes_of_type[other_side_type] = set()
         nodes_of_type[other_side_type] = \
             nodes_of_type[other_side_type] | nodes_to_add
+        return len(nodes_to_add) > 0
 
     def find_matches_for_point(self, match_point, clique, link_type,
                                side_to_match, other_side,
@@ -271,13 +305,15 @@ class CliqueFinder(Fetcher):
         return nodes_to_add
 
     def check_link_type_forward(self, clique, link_type, nodes_of_type,
-                                allow_implicit=False):
-        self.check_link_type_for_direction(clique, link_type, nodes_of_type,
-                                           is_reversed=False,
-                                           allow_implicit=allow_implicit)
+                                allow_implicit=False) -> bool:
+        return self.check_link_type_for_direction(clique, link_type,
+                                                  nodes_of_type,
+                                                  is_reversed=False,
+                                                  allow_implicit=allow_implicit)
 
     def check_link_type_back(self, clique, link_type, nodes_of_type,
-                             allow_implicit=False):
-        self.check_link_type_for_direction(clique, link_type, nodes_of_type,
-                                           is_reversed=True,
-                                           allow_implicit=allow_implicit)
+                             allow_implicit=False) -> bool:
+        return self.check_link_type_for_direction(clique, link_type,
+                                                  nodes_of_type,
+                                                  is_reversed=True,
+                                                  allow_implicit=allow_implicit)
index c7bc0c0..9ec8f96 100644 (file)
@@ -47,6 +47,10 @@ class Configuration(metaclass=Singleton):
     def get_env_name(self):
         return self.env_name
 
+    def get_env_type(self):
+        return 'OpenStack' if 'environment_type' not in self.environment \
+            else self.environment['environment_type']
+
     def update_env(self, values):
         self.collection.update_one({"name": self.env_name},
                                    {'$set': MongoAccess.encode_mongo_keys(values)})
index 6b3b290..4b466e1 100644 (file)
@@ -11,6 +11,7 @@ from abc import abstractmethod, ABC
 
 from discover.fetcher import Fetcher
 from utils.inventory_mgr import InventoryMgr
+from utils.origins import ScanOrigin, ScanOrigins
 
 
 class EventResult:
@@ -23,6 +24,8 @@ class EventResult:
         self.message = message
         self.related_object = related_object
         self.display_context = display_context
+        self.origin = ScanOrigin(origin_id=None,
+                                 origin_type=ScanOrigins.EVENT)
 
 
 class EventBase(Fetcher, ABC):
index 4dd2b20..a8717a5 100644 (file)
@@ -25,7 +25,7 @@ class EventInstanceAdd(EventBase):
 
         # scan instance
         scanner = Scanner()
-        scanner.set_env(env)
+        scanner.setup(env=env, origin=self.origin)
         scanner.scan("ScanInstancesRoot", instances_root,
                      limit_to_child_id=instance_id,
                      limit_to_child_type='instance')
index e54bedb..f0ba569 100644 (file)
@@ -30,7 +30,7 @@ class EventInterfaceAdd(EventBase):
 
     def add_gateway_port(self, env, project, network_name, router_doc, host_id):
         fetcher = CliFetchHostVservice()
-        fetcher.set_env(env)
+        fetcher.setup(env=env, origin=self.origin)
         router_id = router_doc['id']
         router = fetcher.get_vservice(host_id, router_id)
         device_id = decode_router_id(router_id)
@@ -101,7 +101,7 @@ class EventInterfaceAdd(EventBase):
         # add router-interface port document.
         if not ApiAccess.regions:
             fetcher = ApiFetchRegions()
-            fetcher.set_env(env)
+            fetcher.setup(env=env, origin=self.origin)
             fetcher.get(project_id)
         port_doc = EventSubnetAdd().add_port_document(env, port_id,
                                                       network_name=network_name)
@@ -134,7 +134,7 @@ class EventInterfaceAdd(EventBase):
         # update vservice-vnic, vnic-network,
         FindLinksForVserviceVnics().add_links(search={"parent_id": router_id})
         scanner = Scanner()
-        scanner.set_env(env)
+        scanner.setup(env=env, origin=self.origin)
 
         scanner.scan_cliques()
         self.log.info("Finished router-interface added.")
index 9220015..e03db34 100644 (file)
@@ -168,7 +168,7 @@ class EventPortAdd(EventBase):
                     "router": ('Gateways', router_name)}
 
         fetcher = CliFetchVserviceVnics()
-        fetcher.set_env(env)
+        fetcher.setup(env=env, origin=self.origin)
         namespace = 'q{}-{}'.format(object_type, object_id)
         vnic_documents = fetcher.handle_service(host['id'], namespace, enable_cache=False)
         if not vnic_documents:
@@ -258,7 +258,7 @@ class EventPortAdd(EventBase):
 
             # update instance
             instance_fetcher = ApiFetchHostInstances()
-            instance_fetcher.set_env(env)
+            instance_fetcher.setup(env=env, origin=self.origin)
             instance_docs = instance_fetcher.get(host_id + '-')
             instance = next(filter(lambda i: i['id'] == instance_id, instance_docs), None)
 
@@ -278,7 +278,7 @@ class EventPortAdd(EventBase):
                 # set ovs as default type.
                 vnic_fetcher = CliFetchInstanceVnics()
 
-            vnic_fetcher.set_env(env)
+            vnic_fetcher.setup(env=env, origin=self.origin)
             vnic_docs = vnic_fetcher.get(instance_id + '-')
             vnic = next(filter(lambda vnic: vnic['mac_address'] == mac_address, vnic_docs), None)
 
@@ -298,7 +298,7 @@ class EventPortAdd(EventBase):
             for fetcher in fetchers_implementing_add_links:
                 fetcher.add_links()
             scanner = Scanner()
-            scanner.set_env(env)
+            scanner.setup(env=env, origin=self.origin)
             scanner.scan_cliques()
 
         port_document = self.inv.get_by_id(env, port['id'])
index 1e55870..937d8df 100644 (file)
@@ -61,7 +61,7 @@ class EventPortDelete(EventDeleteBase):
             # update instance mac address.
             if port_doc['mac_address'] == instance_doc['mac_address']:
                 instance_fetcher = ApiFetchHostInstances()
-                instance_fetcher.set_env(env)
+                instance_fetcher.setup(env=env, origin=self.origin)
                 host_id = port_doc['binding:host_id']
                 instance_id = port_doc['device_id']
                 instance_docs = instance_fetcher.get(host_id + '-')
index 1fb2244..0f8bc05 100644 (file)
@@ -100,7 +100,7 @@ class EventRouterAdd(EventBase):
         host = self.inv.get_by_id(env, host_id)
 
         fetcher = CliFetchHostVservice()
-        fetcher.set_env(env)
+        fetcher.setup(env=env, origin=self.origin)
         router_doc = fetcher.get_vservice(host_id, router_id)
         gateway_info = router['external_gateway_info']
 
@@ -114,7 +114,7 @@ class EventRouterAdd(EventBase):
         # scan links and cliques
         FindLinksForVserviceVnics().add_links(search={"parent_id": router_id})
         scanner = Scanner()
-        scanner.set_env(env)
+        scanner.setup(env=env, origin=self.origin)
         scanner.scan_cliques()
         self.log.info("Finished router added.")
 
index b63b224..f20f07e 100644 (file)
@@ -60,7 +60,7 @@ class EventRouterUpdate(EventBase):
 
             # add gw_port_id info and port document.
             fetcher = CliFetchHostVservice()
-            fetcher.set_env(env)
+            fetcher.setup(env=env, origin=self.origin)
             router_vservice = fetcher.get_vservice(host_id, router_full_id)
             if router_vservice.get('gw_port_id'):
                 router_doc['gw_port_id'] = router_vservice['gw_port_id']
@@ -74,7 +74,7 @@ class EventRouterUpdate(EventBase):
 
         # update the cliques.
         scanner = Scanner()
-        scanner.set_env(env)
+        scanner.setup(env=env, origin=self.origin)
         scanner.scan_cliques()
         self.log.info("Finished router update.")
         return EventResult(result=True,
index 4126e0c..0a91803 100644 (file)
@@ -29,7 +29,7 @@ class EventSubnetAdd(EventBase):
         # document does not has project attribute. In this case, network_name should not be provided.
 
         fetcher = ApiFetchPort()
-        fetcher.set_env(env)
+        fetcher.setup(env=env, origin=self.origin)
         ports = fetcher.get(port_id)
 
         if ports:
@@ -133,7 +133,7 @@ class EventSubnetAdd(EventBase):
             # update network
             if not ApiAccess.regions:
                 fetcher = ApiFetchRegions()
-                fetcher.set_env(env)
+                fetcher.setup(env=env, origin=self.origin)
                 fetcher.get(project_id)
 
             self.log.info("add new subnet.")
@@ -146,7 +146,7 @@ class EventSubnetAdd(EventBase):
         FindLinksForVserviceVnics().add_links(search={"parent_id": "qdhcp-%s-vnics" % network_id})
 
         scanner = Scanner()
-        scanner.set_env(env)
+        scanner.setup(env=env, origin=self.origin)
         scanner.scan_cliques()
         self.log.info("Finished subnet added.")
         return EventResult(result=True,
index 59b0afb..2c58e70 100644 (file)
@@ -50,7 +50,7 @@ class EventSubnetUpdate(EventBase):
                 # make sure that self.regions is not empty.
                 if not ApiAccess.regions:
                     fetcher = ApiFetchRegions()
-                    fetcher.set_env(env)
+                    fetcher.setup(env=env, origin=self.origin)
                     fetcher.get(project_id)
 
                 self.log.info("add port binding to DHCP server.")
@@ -69,12 +69,12 @@ class EventSubnetUpdate(EventBase):
                     # add link for vservice - vnic
                     FindLinksForVserviceVnics().add_links(search={"id": "qdhcp-%s" % network_id})
                     scanner = Scanner()
-                    scanner.set_env(env)
+                    scanner.setup(env=env, origin=self.origin)
                     scanner.scan_cliques()
                     FindLinksForVserviceVnics(). \
                         add_links(search={"id": "qdhcp-%s" % network_id})
                     scanner = Scanner()
-                    scanner.set_env(env)
+                    scanner.setup(env=env, origin=self.origin)
                     scanner.scan_cliques()
 
             if subnet['enable_dhcp'] is False and subnets[key]['enable_dhcp']:
index 8d7fdbb..707cd60 100644 (file)
@@ -8,16 +8,21 @@
 # http://www.apache.org/licenses/LICENSE-2.0                                  #
 ###############################################################################
 from discover.configuration import Configuration
+from utils.origins import Origin
 from utils.logging.full_logger import FullLogger
 
 
 class Fetcher:
 
+    ENV_TYPE_KUBERNETES = 'Kubernetes'
+    ENV_TYPE_OPENSTACK = 'OpenStack'
+
     def __init__(self):
         super().__init__()
         self.env = None
         self.log = FullLogger()
         self.configuration = None
+        self.origin = None
 
     @staticmethod
     def escape(string):
@@ -25,11 +30,55 @@ class Fetcher:
 
     def set_env(self, env):
         self.env = env
-        self.log.set_env(env)
+        self.log.setup(env=env)
         self.configuration = Configuration()
 
+    def setup(self, env, origin: Origin = None):
+        self.set_env(env=env)
+        if origin:
+            self.origin = origin
+            self.log.setup(origin=origin)
+
     def get_env(self):
         return self.env
 
     def get(self, object_id):
         return None
+
+    def set_folder_parent(self,
+                          o: dict,
+                          object_type: str =None,
+                          master_parent_type: str =None,
+                          master_parent_id: str =None,
+                          parent_objects_name=None,
+                          parent_type: str =None,
+                          parent_id: str =None,
+                          parent_text: str =None):
+        if object_type:
+            o['type'] = object_type
+            if not parent_objects_name:
+                parent_objects_name = '{}s'.format(object_type)
+        if not master_parent_type:
+            self.log.error('set_folder_parent: must specify: '
+                           'master_parent_type, master_parent_id, '
+                           'parent_type', 'parent_id')
+            return
+        if not parent_objects_name and not parent_type:
+            self.log.error('set_folder_parent: must specify: '
+                           'either parent_objects_name (e.g. "vedges") '
+                           'or parent_type and parent_id')
+            return
+        if parent_objects_name and not parent_type:
+            parent_type = '{}_folder'.format(parent_objects_name)
+        if parent_objects_name and not parent_id:
+            parent_id = '{}-{}'.format(master_parent_id, parent_objects_name)
+        o.update({
+            'master_parent_type': master_parent_type,
+            'master_parent_id': master_parent_id,
+            'parent_type': parent_type,
+            'parent_id': parent_id
+        })
+        if parent_text:
+            o['parent_text'] = parent_text
+        elif parent_objects_name:
+            o['parent_text'] = parent_objects_name.capitalize()
index 196893b..ad9550e 100644 (file)
@@ -28,7 +28,7 @@ class ApiFetchAvailabilityZones(ApiAccess):
         # because the later does not inclde the "internal" zone in the results
         endpoint = self.get_region_url_nover(region, "nova")
         req_url = endpoint + "/v2/" + token["tenant"]["id"] + \
-                  "/os-availability-zone/detail"
+            "/os-availability-zone/detail"
         headers = {
             "X-Auth-Project-Id": project,
             "X-Auth-Token": token["id"]
@@ -45,11 +45,10 @@ class ApiFetchAvailabilityZones(ApiAccess):
         for doc in azs:
             doc["id"] = doc["zoneName"]
             doc["name"] = doc.pop("zoneName")
-            doc["master_parent_type"] = "region"
-            doc["master_parent_id"] = region
-            doc["parent_type"] = "availability_zones_folder"
-            doc["parent_id"] = region + "-availability_zones"
-            doc["parent_text"] = "Availability Zones"
+            self.set_folder_parent(doc, object_type="availability_zone",
+                                   master_parent_type="region",
+                                   master_parent_id=region,
+                                   parent_text="Availability Zones")
             doc["available"] = doc["zoneState"]["available"]
             doc.pop("zoneState")
             ret.append(doc)
index 889b8a5..b253773 100644 (file)
@@ -23,7 +23,8 @@ class ApiFetchNetwork(ApiAccess):
             return []
         ret = []
         for region in self.regions:
-            # TODO: refactor legacy code (Unresolved reference - self.get_for_region)
+            # TODO: refactor legacy code
+            # (Unresolved reference - self.get_for_region)
             ret.extend(self.get_for_region(region, token, project_id))
         return ret
 
@@ -37,7 +38,7 @@ class ApiFetchNetwork(ApiAccess):
             "X-Auth-Token": token["id"]
         }
         response = self.get_url(req_url, headers)
-        if not "network" in response:
+        if "network" not in response:
             return []
         network = response["network"]
         subnets = network['subnets']
@@ -60,13 +61,12 @@ class ApiFetchNetwork(ApiAccess):
         network["cidrs"] = cidrs
         network["subnet_ids"] = subnet_ids
 
-        network["master_parent_type"] = "project"
-        network["master_parent_id"] = network["tenant_id"]
-        network["parent_type"] = "networks_folder"
-        network["parent_id"] = network["tenant_id"] + "-networks"
-        network["parent_text"] = "Networks"
-        # set the 'network' attribute for network objects to the name of network,
-        # to allow setting constraint on network when creating network clique
+        self.set_folder_parent(network, object_type="network",
+                               master_parent_type="project",
+                               master_parent_id=network["tenant_id"])
+        # set the 'network' attribute for network objects to the name of
+        # network, to allow setting constraint on network when creating
+        # network clique
         network['network'] = network["id"]
         # get the project name
         project = self.inv.get_by_id(self.get_env(), network["tenant_id"])
index 4b70f65..f76517a 100644 (file)
@@ -34,7 +34,7 @@ class ApiFetchNetworks(ApiAccess):
             "X-Auth-Token": token["id"]
         }
         response = self.get_url(req_url, headers)
-        if not "networks" in response:
+        if "networks" not in response:
             return []
         networks = response["networks"]
         req_url = endpoint + "/v2.0/subnets"
@@ -46,7 +46,6 @@ class ApiFetchNetworks(ApiAccess):
             for s in subnets:
                 subnets_hash[s["id"]] = s
         for doc in networks:
-            doc["master_parent_type"] = "project"
             project_id = doc["tenant_id"]
             if not project_id:
                 # find project ID of admin project
@@ -57,12 +56,12 @@ class ApiFetchNetworks(ApiAccess):
                 if not project:
                     self.log.error("failed to find admin project in DB")
                 project_id = project["id"]
-            doc["master_parent_id"] = project_id
-            doc["parent_type"] = "networks_folder"
-            doc["parent_id"] = project_id + "-networks"
-            doc["parent_text"] = "Networks"
-            # set the 'network' attribute for network objects to the name of network,
-            # to allow setting constraint on network when creating network clique
+            self.set_folder_parent(doc, object_type='network',
+                                   master_parent_id=project_id,
+                                   master_parent_type='project')
+            # set the 'network' attribute for network objects to the name of
+            # network, to allow setting constraint on network when creating
+            # network clique
             doc['network'] = doc["id"]
             # get the project name
             project = self.inv.get_by_id(self.get_env(), project_id)
index f8d9eeb..8de1452 100644 (file)
@@ -43,11 +43,9 @@ class ApiFetchPort(ApiAccess):
             return []
 
         doc = response["port"]
-        doc["master_parent_type"] = "network"
-        doc["master_parent_id"] = doc["network_id"]
-        doc["parent_type"] = "ports_folder"
-        doc["parent_id"] = doc["network_id"] + "-ports"
-        doc["parent_text"] = "Ports"
+        self.set_folder_parent(doc, object_type="port",
+                               master_parent_type="network",
+                               master_parent_id=doc["network_id"])
         # get the project name
         net = self.inv.get_by_id(self.get_env(), doc["network_id"])
         if net:
index f4c54a6..5e44c1b 100644 (file)
@@ -38,11 +38,9 @@ class ApiFetchPorts(ApiAccess):
             return []
         ports = response["ports"]
         for doc in ports:
-            doc["master_parent_type"] = "network"
-            doc["master_parent_id"] = doc["network_id"]
-            doc["parent_type"] = "ports_folder"
-            doc["parent_id"] = doc["network_id"] + "-ports"
-            doc["parent_text"] = "Ports"
+            self.set_folder_parent(doc, object_type="port",
+                                   master_parent_type="network",
+                                   master_parent_id=doc["network_id"])
             # get the project name
             net = self.inv.get_by_id(self.get_env(), doc["network_id"])
             if net:
index 2aeb24f..1059600 100644 (file)
@@ -11,11 +11,11 @@ import json
 
 from discover.fetchers.api.api_access import ApiAccess
 from discover.fetchers.db.db_access import DbAccess
-from discover.fetchers.cli.cli_access import CliAccess
+from discover.fetchers.cli.cli_fetch_host_details import CliFetchHostDetails
 from utils.ssh_connection import SshError
 
 
-class ApiFetchProjectHosts(ApiAccess, DbAccess, CliAccess):
+class ApiFetchProjectHosts(ApiAccess, DbAccess, CliFetchHostDetails):
     def __init__(self):
         super(ApiFetchProjectHosts, self).__init__()
 
index c77b22a..68b81c8 100644 (file)
@@ -17,7 +17,7 @@ from utils.logging.console_logger import ConsoleLogger
 from utils.ssh_conn import SshConn
 
 
-class CliAccess(BinaryConverter, Fetcher):
+class CliAccess(Fetcher, BinaryConverter):
     connections = {}
     ssh_cmd = "ssh -q -o StrictHostKeyChecking=no "
     call_count_per_con = {}
@@ -71,8 +71,9 @@ class CliAccess(BinaryConverter, Fetcher):
         self.cached_commands[cmd_path] = {"timestamp": curr_time, "result": ret}
         return ret
 
-    def run_fetch_lines(self, cmd, ssh_to_host="", enable_cache=True):
-        out = self.run(cmd, ssh_to_host, enable_cache)
+    def run_fetch_lines(self, cmd, ssh_to_host="", enable_cache=True,
+                        use_sudo=True):
+        out = self.run(cmd, ssh_to_host, enable_cache, use_sudo=use_sudo)
         if not out:
             return []
         # first try to split lines by whitespace
@@ -236,7 +237,7 @@ class CliAccess(BinaryConverter, Fetcher):
             self.find_matching_regexps(o, line, regexps)
         for regexp_tuple in regexps:
             name = regexp_tuple['name']
-            if 'name' not in o and 'default' in regexp_tuple:
+            if name not in o and 'default' in regexp_tuple:
                 o[name] = regexp_tuple['default']
 
     @staticmethod
@@ -247,4 +248,8 @@ class CliAccess(BinaryConverter, Fetcher):
             regex = re.compile(regex)
             matches = regex.search(line)
             if matches and name not in o:
-                o[name] = matches.group(1)
+                try:
+                    o[name] = matches.group(1)
+                except IndexError as e:
+                    self.log.error('failed to find group 1 in match, {}'
+                                   .format(str(regexp_tuple)))
index 26cd603..81d164d 100644 (file)
@@ -27,8 +27,8 @@ class CliFetchHostPnics(CliAccess):
              'description': 'IPv6 Address'}
         ]
 
-    def get(self, id):
-        host_id = id[:id.rindex("-")]
+    def get(self, parent_id):
+        host_id = parent_id[:parent_id.rindex("-")]
         cmd = 'ls -l /sys/class/net | grep ^l | grep -v "/virtual/"'
         host = self.inv.get_by_id(self.get_env(), host_id)
         if not host:
@@ -39,7 +39,8 @@ class CliFetchHostPnics(CliAccess):
                            ", host: " + str(host))
             return []
         host_types = host["host_type"]
-        if "Network" not in host_types and "Compute" not in host_types:
+        accepted_host_types = ['Network', 'Compute', 'Kube-node']
+        if not [t for t in accepted_host_types if t in host_types]:
             return []
         interface_lines = self.run_fetch_lines(cmd, host_id)
         interfaces = []
index ff37569..ac04568 100644 (file)
@@ -18,8 +18,8 @@ class CliFetchVconnectorsOvs(CliFetchVconnectors):
 
     def get_vconnectors(self, host):
         host_id = host['id']
-        lines = self.run_fetch_lines("brctl show", host_id)
-        headers = ["bridge_name", "bridge_id", "stp_enabled", "interfaces"]
+        lines = self.run_fetch_lines('brctl show', host_id)
+        headers = ['bridge_name', 'bridge_id', 'stp_enabled', 'interfaces']
         headers_count = len(headers)
         # since we hard-coded the headers list, remove the headers line
         del lines[:1]
@@ -31,26 +31,32 @@ class CliFetchVconnectorsOvs(CliFetchVconnectors):
         results = self.parse_cmd_result_with_whitespace(fixed_lines, headers, False)
         ret = []
         for doc in results:
-            doc["name"] = doc.pop("bridge_name")
-            doc["id"] = doc["name"] + "-" +  doc.pop("bridge_id")
-            doc["host"] = host_id
-            doc["connector_type"] = "bridge"
-            if "interfaces" in doc:
-                interfaces = {}
-                interface_names = doc["interfaces"].split(",")
-                for interface_name in interface_names:
-                    # find MAC address for this interface from ports list
-                    port_id_prefix = interface_name[3:]
-                    port = self.inv.find_items({
-                        "environment": self.get_env(),
-                        "type": "port",
-                        "binding:host_id": host_id,
-                        "id": {"$regex": r"^" + re.escape(port_id_prefix)}
-                    }, get_single=True)
-                    mac_address = '' if not port else port['mac_address']
-                    interface = {'name': interface_name, 'mac_address': mac_address}
-                    interfaces[interface_name] = interface
-                doc["interfaces"] = interfaces
-                doc['interfaces_names'] = list(interfaces.keys())
-                ret.append(doc)
+            doc['name'] = '{}-{}'.format(host_id, doc['bridge_name'])
+            doc['id'] = '{}-{}'.format(doc['name'], doc.pop('bridge_id'))
+            doc['host'] = host_id
+            doc['connector_type'] = 'bridge'
+            self.get_vconnector_interfaces(doc, host_id)
+            ret.append(doc)
         return ret
+
+    def get_vconnector_interfaces(self, doc, host_id):
+        if 'interfaces' not in doc:
+            doc['interfaces'] = {}
+            doc['interfaces_names'] = []
+            return
+        interfaces = {}
+        interface_names = doc['interfaces'].split(',')
+        for interface_name in interface_names:
+            # find MAC address for this interface from ports list
+            port_id_prefix = interface_name[3:]
+            port = self.inv.find_items({
+                'environment': self.get_env(),
+                'type': 'port',
+                'binding:host_id': host_id,
+                'id': {'$regex': r'^' + re.escape(port_id_prefix)}
+            }, get_single=True)
+            mac_address = '' if not port else port['mac_address']
+            interface = {'name': interface_name, 'mac_address': mac_address}
+            interfaces[interface_name] = interface
+        doc['interfaces'] = interfaces
+        doc['interfaces_names'] = list(interfaces.keys())
index 3bc3a5b..0129d3b 100644 (file)
@@ -66,17 +66,15 @@ class CliFetchVserviceVnics(CliAccess):
                     master_parent_id = "{}-{}".format(host, service)
                     current = {
                         "id": host + "-" + name,
-                        "type": "vnic",
                         "vnic_type": "vservice_vnic",
                         "host": host,
                         "name": name,
-                        "master_parent_type": "vservice",
-                        "master_parent_id": master_parent_id,
-                        "parent_type": "vnics_folder",
-                        "parent_id": "{}-vnics".format(master_parent_id),
-                        "parent_text": "vNICs",
                         "lines": []
                     }
+                    self.set_folder_parent(current, object_type="vnic",
+                                           master_parent_type="vservice",
+                                           master_parent_id=master_parent_id,
+                                           parent_text="vNICs")
                     interfaces.append(current)
                     self.handle_line(current, line_remainder)
             else:
index 85376ed..7721136 100644 (file)
@@ -82,4 +82,4 @@ class DbFetchOteps(DbAccess, CliAccess, metaclass=Singleton):
                     interface = l.split(":")[1].strip()
 
         if vconnector:
-            doc["vconnector"] = vconnector
+            doc["vconnector"] = '{}-{}'.format(host_id, vconnector)
index f9719b4..afabdbe 100644 (file)
@@ -104,8 +104,6 @@ class FindLinksForVedges(FindLinks):
         if "pnic" in vedge:
             if pname != vedge["pnic"]:
                 return
-        elif self.configuration.has_network_plugin('VPP'):
-            pass
         pnic = self.inv.find_items({
             "environment": self.get_env(),
             "type": "host_pnic",
index 49f37ff..fb5e833 100755 (executable)
@@ -22,6 +22,7 @@ from discover.scan_error import ScanError
 from discover.scanner import Scanner
 from monitoring.setup.monitoring_setup_manager import MonitoringSetupManager
 from utils.constants import EnvironmentFeatures
+from utils.origins import ScanOrigin, ScanOrigins
 from utils.mongo_access import MongoAccess
 from utils.exceptions import ScanArgumentsError
 from utils.inventory_mgr import InventoryMgr
@@ -112,6 +113,7 @@ class ScanPlan:
 
 class ScanController(Fetcher):
     DEFAULTS = {
+        "_id": None,
         "env": "",
         "mongo_config": "",
         "type": "",
@@ -126,7 +128,8 @@ class ScanController(Fetcher):
         "cliques_only": False,
         "monitoring_setup_only": False,
         "clear": False,
-        "clear_all": False
+        "clear_all": False,
+        "scheduled": False
     }
 
     def __init__(self):
@@ -274,9 +277,13 @@ class ScanController(Fetcher):
         self.conf.use_env(env_name)
 
         # generate ScanObject Class and instance.
+        origin = ScanOrigin(origin_id=args['_id'],
+                            origin_type=ScanOrigins.SCHEDULED
+                                        if args["scheduled"]
+                                        else ScanOrigins.MANUAL)
         scanner = Scanner()
         scanner.log.set_loglevel(args['loglevel'])
-        scanner.set_env(env_name)
+        scanner.setup(env=env_name, origin=origin)
         scanner.found_errors[env_name] = False
 
         # decide what scanning operations to do
index 91dd06c..6e31bbd 100644 (file)
@@ -103,7 +103,8 @@ class ScanManager(Manager):
 
     def _build_scan_args(self, scan_request: dict):
         args = {
-            'mongo_config': self.args.mongo_config
+            'mongo_config': self.args.mongo_config,
+            'scheduled': True if scan_request.get('interval') else False
         }
 
         def set_arg(name_from: str, name_to: str = None):
@@ -113,6 +114,7 @@ class ScanManager(Manager):
             if val:
                 args[name_to] = val
 
+        set_arg("_id")
         set_arg("object_id", "id")
         set_arg("log_level", "loglevel")
         set_arg("environment", "env")
index 8aac40b..8d36baf 100644 (file)
@@ -10,6 +10,7 @@
 # base class for scanners
 
 import json
+
 import os
 import queue
 import traceback
@@ -27,9 +28,6 @@ from utils.ssh_connection import SshError
 
 class Scanner(Fetcher):
 
-    ENV_TYPE_OPENSTACK = 'OpenStack'
-    ENV_TYPE_KUBERNETES = 'Kubernetes'
-
     config = None
     environment = None
     env = None
@@ -92,11 +90,11 @@ class Scanner(Fetcher):
             else basic_cond
         if not env_cond:
             env_cond = basic_cond
-        if 'environment_type' not in env_cond:
+        if 'environment_type' not in env_cond.keys():
             env_cond.update(basic_cond)
         if not isinstance(env_cond, dict):
-            self.log.warn('illegal environment_condition given '
-                          'for type {}'.format(type_to_fetch['type']))
+            self.log.warn('Illegal environment_condition given '
+                          'for type {type}'.format(type=type_to_fetch['type']))
             return True
         conf = self.config.get_env_config()
         if 'environment_type' not in conf:
@@ -104,14 +102,24 @@ class Scanner(Fetcher):
         for attr, required_val in env_cond.items():
             if attr == "mechanism_drivers":
                 if "mechanism_drivers" not in conf:
-                    self.log.warn('illegal environment configuration: '
+                    self.log.warn('Illegal environment configuration: '
                                   'missing mechanism_drivers')
                     return False
                 if not isinstance(required_val, list):
                     required_val = [required_val]
-                return bool(set(required_val) & set(conf["mechanism_drivers"]))
-            elif attr not in conf or conf[attr] != required_val:
+                value_ok = bool(set(required_val) &
+                                set(conf["mechanism_drivers"]))
+                if not value_ok:
+                    return False
+            elif attr not in conf:
                 return False
+            else:
+                if isinstance(required_val, list):
+                    if conf[attr] not in required_val:
+                        return False
+                else:
+                    if conf[attr] != required_val:
+                        return False
         # no check failed
         return True
 
@@ -132,18 +140,20 @@ class Scanner(Fetcher):
         if not isinstance(fetcher, Fetcher):
             type_to_fetch['fetcher'] = fetcher()  # make it an instance
             fetcher = type_to_fetch["fetcher"]
-        fetcher.set_env(self.get_env())
+        fetcher.setup(env=self.get_env(), origin=self.origin)
 
         # get children_scanner instance
         children_scanner = type_to_fetch.get("children_scanner")
 
         escaped_id = fetcher.escape(str(obj_id)) if obj_id else obj_id
         self.log.info(
-            "scanning : type=%s, parent: (type=%s, name=%s, id=%s)",
-            type_to_fetch["type"],
-            parent.get('type', 'environment'),
-            parent.get('name', ''),
-            escaped_id)
+            "Scanning: type={type}, "
+            "parent: (type={parent_type}, "
+            "name={parent_name}, "
+            "id={parent_id})".format(type=type_to_fetch["type"],
+                                     parent_type=parent.get('type', 'environment'),
+                                     parent_name=parent.get('name', ''),
+                                     parent_id=escaped_id))
 
         # fetch OpenStack data from environment by CLI, API or MySQL
         # or physical devices data from ACI API
@@ -154,18 +164,21 @@ class Scanner(Fetcher):
             self.found_errors[self.get_env()] = True
             return []
         except Exception as e:
-            self.log.error("Error while scanning : " +
-                           "fetcher=%s, " +
-                           "type=%s, " +
-                           "parent: (type=%s, name=%s, id=%s), " +
-                           "error: %s",
-                           fetcher.__class__.__name__,
-                           type_to_fetch["type"],
-                           "environment" if "type" not in parent
-                           else parent["type"],
-                           "" if "name" not in parent else parent["name"],
-                           escaped_id,
-                           e)
+            self.log.error(
+                "Error while scanning: fetcher={fetcher}, type={type}, "
+                "parent: (type={parent_type}, name={parent_name}, "
+                "id={parent_id}), "
+                "error: {error}".format(fetcher=fetcher.__class__.__name__,
+                                        type=type_to_fetch["type"],
+                                        parent_type="environment"
+                                                    if "type" not in parent
+                                                    else parent["type"],
+                                        parent_name=""
+                                                    if "name" not in parent
+                                                    else parent["name"],
+                                        parent_id=escaped_id,
+                                        error=e))
+
             traceback.print_exc()
             raise ScanError(str(e))
 
@@ -232,14 +245,16 @@ class Scanner(Fetcher):
         self.log.info("Scan complete")
 
     def scan_links(self):
-        self.log.info("scanning for links")
+        self.log.info("Scanning for links")
         for fetcher in self.link_finders:
-            fetcher.set_env(self.get_env())
+            fetcher.setup(env=self.get_env(),
+                          origin=self.origin)
             fetcher.add_links()
 
     def scan_cliques(self):
         clique_scanner = CliqueFinder()
-        clique_scanner.set_env(self.get_env())
+        clique_scanner.setup(env=self.get_env(),
+                             origin=self.origin)
         clique_scanner.find_cliques()
 
     def deploy_monitoring_setup(self):
index 78bb927..ad2b4e0 100644 (file)
@@ -16,7 +16,12 @@ import dockerpycreds
 # note : not used, useful for docker api security if used
 import time
 import json
-
+import socket
+# by default, we want to use the docker0 interface ip address for inter-contatiner communications,
+# if hostname argument will not be provided as argument for the calipso-installer
+import os
+dockerip = os.popen('ip addr show docker0 | grep "\<inet\>" | awk \'{ print $2 }\' | awk -F "/" \'{ print $1 }\'')
+local_hostname = dockerip.read().replace("\n", "")
 
 C_MONGO_CONFIG = "/local_dir/calipso_mongo_access.conf"
 H_MONGO_CONFIG = "/home/calipso/calipso_mongo_access.conf"
@@ -161,7 +166,8 @@ def start_mongo(dbport, copy):
     copy_file("clique_types")
     copy_file("cliques")
     copy_file("constants")
-    copy_file("environments_config")
+    copy_file("environments_config"),
+    copy_file("environment_options"),
     copy_file("inventory")
     copy_file("link_types")
     copy_file("links")
@@ -328,10 +334,10 @@ def container_stop(container_name):
 # parser for getting optional command arguments:
 parser = argparse.ArgumentParser()
 parser.add_argument("--hostname",
-                    help="Hostname or IP address of the server "
-                         "(default=172.17.0.1)",
+                    help="FQDN (ex:mysrv.cisco.com) or IP address of the Server"
+                         "(default=docker0 interface ip address)",
                     type=str,
-                    default="172.17.0.1",
+                    default=local_hostname,
                     required=False)
 parser.add_argument("--webport",
                     help="Port for the Calipso WebUI "
@@ -401,6 +407,8 @@ parser.add_argument("--copy",
                     required=False)
 args = parser.parse_args()
 
+print("\nrunning installer against host:", args.hostname, "\n")
+
 if args.command == "start-all":
     container = "all"
     action = "start"
@@ -424,6 +432,7 @@ while container != "all" and container not in container_names:
     if container == "q":
         exit()
 
+
 # starting the containers per arguments:
 if action == "start":
     # building /home/calipso/calipso_mongo_access.conf and
index b5c47df..e0d59e7 100644 (file)
     "order" : "1",\r
     "condition" : {\r
         "mechanism_drivers" : [\r
-            "OVS"\r
+            "OVS",\r
+            "Flannel"\r
         ]\r
     },\r
     "config" : {\r
         "checks" : {\r
             "{objtype}_{objid}" : {\r
                 "interval" : 15,\r
-                "command" : "check_vconnector_ovs.py {name}",\r
+                "command" : "check_vconnector.py {name}",\r
                 "standalone" : true,\r
                 "type": "metric",\r
                 "subscribers" : [\r
index e940054..eeef329 100644 (file)
@@ -29,7 +29,8 @@ class Message:
                  object_type: str = None,
                  ts: datetime = None,
                  received_ts: datetime = None,
-                 finished_ts: datetime = None):
+                 finished_ts: datetime = None,
+                 **kwargs):
         super().__init__()
 
         if level and level.lower() in self.LEVELS:
@@ -48,6 +49,7 @@ class Message:
         self.received_timestamp = received_ts
         self.finished_timestamp = finished_ts
         self.viewed = False
+        self.extra = kwargs
 
     def get(self):
         return {
@@ -62,5 +64,6 @@ class Message:
             "timestamp": self.timestamp,
             "received_timestamp": self.received_timestamp,
             "finished_timestamp": self.finished_timestamp,
-            "viewed": self.viewed
+            "viewed": self.viewed,
+            **self.extra
         }
index d3a94b7..4ce5165 100644 (file)
@@ -1,4 +1,4 @@
-#!/usr/bin/env python3
+#!/usr/bin/env python
 ###############################################################################
 # Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems)   #
 # and others                                                                  #
@@ -31,7 +31,7 @@ arp_mac_pos = arp_headers.index('HWaddress')
 arp_flags_pos = arp_headers.index('Flags')
 
 
-def check_vnic_tuple(vnic_and_service: str):
+def check_vnic_tuple(vnic_and_service):
     tuple_parts = vnic_and_service.split(',')
     local_service_id = tuple_parts[0]
     mac_address = tuple_parts[1]
index 35e7234..fbf1304 100755 (executable)
@@ -1,4 +1,4 @@
-#!/usr/bin/env python3
+#!/usr/bin/env python
 ###############################################################################
 # Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems)   #
 # and others                                                                  #
index c26e42f..7cfa699 100755 (executable)
@@ -1,4 +1,4 @@
-#!/usr/bin/env python3
+#!/usr/bin/env python
 ###############################################################################
 # Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems)   #
 # and others                                                                  #
@@ -15,7 +15,7 @@ import subprocess
 from binary_converter import binary2str
 
 
-def nic_not_found(name: str, output: str):
+def nic_not_found(name, output):
     print("Error finding NIC {}{}{}\n".format(name, ': ' if output else '',
                                               output))
     return 2
index 942fdc2..3db4e49 100755 (executable)
@@ -1,4 +1,4 @@
-#!/usr/bin/env python3
+#!/usr/bin/env python
 ###############################################################################
 # Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems)   #
 # and others                                                                  #
index 237a195..961d7ad 100644 (file)
@@ -1,4 +1,4 @@
-#!/usr/bin/env python3
+#!/usr/bin/env python
 ###############################################################################
 # Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems)   #
 # and others                                                                  #
index 849af66..33d3f71 100755 (executable)
@@ -1,4 +1,4 @@
-#!/usr/bin/env python3
+#!/usr/bin/env python
 ###############################################################################
 # Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems)   #
 # and others                                                                  #
index 346feae..94d5977 100755 (executable)
@@ -1,4 +1,4 @@
-#!/usr/bin/env python3
+#!/usr/bin/env python
 ###############################################################################
 # Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems)   #
 # and others                                                                  #
index 76efd0b..fc8721f 100755 (executable)
@@ -1,4 +1,4 @@
-#!/usr/bin/env python3
+#!/usr/bin/env python
 ###############################################################################
 # Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems)   #
 # and others                                                                  #
index 0f77ddd..22cc31d 100755 (executable)
@@ -1,4 +1,4 @@
-#!/usr/bin/env python3
+#!/usr/bin/env python
 ###############################################################################
 # Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems)   #
 # and others                                                                  #
index a95a46a..2a30a53 100644 (file)
@@ -1,4 +1,4 @@
-#!/usr/bin/env python3
+#!/usr/bin/env python
 ###############################################################################
 # Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems)   #
 # and others                                                                  #
index 0189625..b19baab 100644 (file)
@@ -29,7 +29,7 @@ class HandleOtep(MonitoringCheckHandler):
             self.log.error('Port not found: ' + port_id)
             return 1
         status = check_result['status']
-        port['status'] = self.STATUS_LABEL[status]
+        port['status'] = self.get_label_for_status(status)
         port['status_value'] = status
         port['status_text'] = check_result['output']
 
index c1f70fb..4902c3c 100644 (file)
@@ -19,14 +19,13 @@ from messages.message import Message
 from utils.inventory_mgr import InventoryMgr
 from utils.logging.full_logger import FullLogger
 from utils.special_char_converter import SpecialCharConverter
-from utils.string_utils import stringify_datetime
 
 SOURCE_SYSTEM = 'Sensu'
 ERROR_LEVEL = ['info', 'warn', 'error']
 
 
 class MonitoringCheckHandler(SpecialCharConverter):
-    STATUS_LABEL = ['OK', 'Warning', 'Error']
+    status_labels = {}
     TIME_FORMAT = '%Y-%m-%d %H:%M:%S %Z'
 
     def __init__(self, args):
@@ -39,9 +38,30 @@ class MonitoringCheckHandler(SpecialCharConverter):
             self.inv = InventoryMgr()
             self.inv.log.set_loglevel(args.loglevel)
             self.inv.set_collections(args.inventory)
+            self.status_labels = self.get_status_labels()
         except FileNotFoundError:
             sys.exit(1)
 
+    def get_status_labels(self):
+        statuses_name_search = {'name': 'monitoring_check_statuses'}
+        labels_data = self.inv.find_one(search=statuses_name_search,
+                                        collection='constants')
+        if not isinstance(labels_data, dict) or 'data' not in labels_data:
+            return ''
+        labels = {}
+        for status_data in labels_data['data']:
+            if not isinstance(status_data, dict):
+                continue
+            val = int(status_data['value'])
+            label = status_data['label']
+            labels[val] = label
+        return labels
+
+    def get_label_for_status(self, status: int) -> str:
+        if status not in self.status_labels.keys():
+            return ''
+        return self.status_labels.get(status, '')
+
     def doc_by_id(self, object_id):
         doc = self.inv.get_by_id(self.env, object_id)
         if not doc:
@@ -57,7 +77,10 @@ class MonitoringCheckHandler(SpecialCharConverter):
         return doc
 
     def set_doc_status(self, doc, status, status_text, timestamp):
-        doc['status'] = self.STATUS_LABEL[status] if isinstance(status, int) \
+        doc['status_value'] = status if isinstance(status, int) \
+            else status
+        doc['status'] = self.get_label_for_status(status) \
+            if isinstance(status, int) \
             else status
         if status_text:
             doc['status_text'] = status_text
@@ -83,7 +106,8 @@ class MonitoringCheckHandler(SpecialCharConverter):
         obj_id = 'link_{}_{}'.format(doc['source_id'], doc['target_id']) \
             if is_link \
             else doc['id']
-        obj_type = 'link_{}'.format(doc['link_type']) if is_link else doc['type']
+        obj_type = 'link_{}'.format(doc['link_type']) if is_link else \
+            doc['type']
         display_context = obj_id if is_link \
             else doc['network_id'] if doc['type'] == 'port' else doc['id']
         level = error_level if error_level\
index 903b8d8..0eeb668 100644 (file)
@@ -105,13 +105,25 @@ class MonitoringHandler(MongoAccess, CliAccess, BinaryConverter):
         if 'condition' not in doc:
             return True
         condition = doc['condition']
-        if 'mechanism_drivers' not in condition:
-            return True
-        required_mechanism_drivers = condition['mechanism_drivers']
-        if not isinstance(required_mechanism_drivers, list):
-            required_mechanism_drivers = [required_mechanism_drivers]
-        intersection = [val for val in required_mechanism_drivers
-                        if val in self.mechanism_drivers]
+        if not isinstance(condition, dict):
+            self.log.error('incorrect condition in monitoring ({}): '
+                           'condition must be a dict'
+                           .format(doc.get(doc.get('type'), '')))
+            return False
+        for key, required_value in condition.items():
+            if not self.check_env_config(key, required_value):
+                return False
+        return True
+
+    def check_env_config(self, config_name, required_config_value):
+        required_config_values = required_config_value \
+            if isinstance(required_config_value, list) \
+            else [required_config_value]
+        conf_values = self.configuration.environment.get(config_name, [])
+        conf_values = conf_values if isinstance(conf_values, list) \
+            else [conf_values]
+        intersection = [val for val in required_config_values
+                        if val in conf_values]
         return bool(intersection)
 
     def content_replace(self, content):
@@ -435,6 +447,7 @@ class MonitoringHandler(MongoAccess, CliAccess, BinaryConverter):
                 if '/*' in local_dir else local_dir
             if local_dir_base.strip('/*') == remote_path.strip('/*'):
                 return  # same directory - nothing to do
+            self.make_remote_dir(host, remote_path)
             cmd = 'cp {} {}'.format(what_to_copy, remote_path)
             self.run(cmd, ssh=ssh)
             return
index 702bc87..5e52cea 100644 (file)
@@ -12,7 +12,6 @@ import json
 from test.api.responders_test.test_data import base
 from test.api.test_base import TestBase
 from test.api.responders_test.test_data import clique_types
-import unittest
 from unittest.mock import patch
 
 
@@ -70,7 +69,6 @@ class TestCliqueTypes(TestBase):
             expected_code=base.BAD_REQUEST_CODE
         )
 
-    @unittest.SkipTest
     @patch(base.RESPONDER_BASE_READ)
     def test_get_clique_type_with_correct_configuration(self, read):
         self.validate_get_request(
@@ -272,7 +270,6 @@ class TestCliqueTypes(TestBase):
             expected_code=base.BAD_REQUEST_CODE
         )
 
-    @unittest.SkipTest
     @patch(base.RESPONDER_BASE_READ)
     def test_post_clique_type_with_duplicate_configuration(self, read):
         data = clique_types.CLIQUE_TYPES_WITH_SPECIFIC_CONFIGURATION[0]
index 534b6f0..0791bdf 100644 (file)
@@ -60,15 +60,15 @@ CLIQUE_TYPES_WITH_SPECIFIC_ID = [
     get_payload(update={'id': CORRECT_ID})
 ]
 
-CLIQUE_TYPES_WITH_SPECIFIC_CONFIGURATION = [
-    get_payload(update={'id': SAMPLE_IDS[0],
-                        **TEST_CONFIGURATION},
-                delete=['environment'])
-]
+CLIQUE_TYPES_WITH_SPECIFIC_CONFIGURATION = [
+    get_payload(update={'id': SAMPLE_IDS[0],
+                        **TEST_CONFIGURATION},
+                delete=['environment'])
+]
 
-CLIQUE_TYPES_WITH_SPECIFIC_CONFIGURATION_RESPONSE = {
-    "clique_types": CLIQUE_TYPES_WITH_SPECIFIC_CONFIGURATION
-}
+CLIQUE_TYPES_WITH_SPECIFIC_CONFIGURATION_RESPONSE = {
+    "clique_types": CLIQUE_TYPES_WITH_SPECIFIC_CONFIGURATION
+}
 
 CLIQUE_TYPES_WITH_SPECIFIC_FOCAL_POINT_TYPE = [
     get_payload(update={'id': _id,
@@ -144,4 +144,4 @@ CLIQUE_TYPE_WITH_WRONG_MECH_DRIVERS = get_payload(
 
 CLIQUE_TYPE_WITH_WRONG_TYPE_DRIVERS = get_payload(
     update={'type_drivers': WRONG_TYPE_DRIVER}
-)
+)
\ No newline at end of file
index 5079a92..38c60a3 100644 (file)
@@ -21,6 +21,7 @@ NETWORKS_RESPONSE = {
 
 NETWORKS_RESULT = [
     {
+        "type": "network",
         "id": "8673c48a-f137-4497-b25d-08b7b218fd17",
         "subnets": {
             "test23":  {
index fc0552c..bb1d89f 100644 (file)
@@ -26,6 +26,7 @@ PORTS_RESULT_WITH_NET = [
         "name": "fa:16:3e:d7:c5:16",
         "network_id": "b6fd5175-4b22-4256-9b1a-9fc4b9dce1fe",
         "tenant_id": "75c0eb79ff4a42b0ae4973c8375ddf40",
+        "type": "port",
         "master_parent_type": "network",
         "master_parent_id": "b6fd5175-4b22-4256-9b1a-9fc4b9dce1fe",
         "parent_type": "ports_folder",
@@ -41,6 +42,7 @@ PORTS_RESULT_WITHOUT_NET = [
         "name": "16620a58-c48c-4195-b9c1-779a8ba2e6f8",
         "network_id": "b6fd5175-4b22-4256-9b1a-9fc4b9dce1fe",
         "tenant_id": "75c0eb79ff4a42b0ae4973c8375ddf40",
+        "type": "port",
         "master_parent_type": "network",
         "master_parent_id": "b6fd5175-4b22-4256-9b1a-9fc4b9dce1fe",
         "parent_type": "ports_folder",
@@ -56,6 +58,7 @@ PORTS_RESULT_WITH_PROJECT = [
         "name": "fa:16:3e:d7:c5:16",
         "network_id": "b6fd5175-4b22-4256-9b1a-9fc4b9dce1fe",
         "tenant_id": "75c0eb79ff4a42b0ae4973c8375ddf40",
+        "type": "port",
         "master_parent_type": "network",
         "master_parent_id": "b6fd5175-4b22-4256-9b1a-9fc4b9dce1fe",
         "parent_type": "ports_folder",
index 6940c61..b0467a7 100644 (file)
@@ -128,5 +128,5 @@ OTEP_WITH_CONNECTOR = {
         "br-tun": {
         }
     },
-    "vconnector": "br-mesh"
+    "vconnector": "node-5.cisco.com-br-mesh"
 }
index 500021d..ed2129f 100644 (file)
@@ -10,7 +10,6 @@
 import queue
 from discover.fetchers.folder_fetcher import FolderFetcher
 
-
 SCANNER_TYPE_FOR_ENV = "ScanEnvironment"
 
 METADATA = {
@@ -64,7 +63,8 @@ TYPES_TO_FETCH = [
     },
     {
         "type": "network_services_folder",
-        "fetcher": FolderFetcher("network_services", "network", "Network vServices")
+        "fetcher": FolderFetcher("network_services", "network",
+                                 "Network vServices")
     }
 ]
 
@@ -162,9 +162,6 @@ TYPES_TO_FETCHES_FOR_SCAN_AGGREGATE = [{
     "fetcher": "DbFetchAggregateHosts"
 }]
 
-
-
-
 # id = 'RegionOne-aggregates'
 # obj = self.inv.get_by_id(self.env, id)
 obj = {'id': 'Mirantis-Liberty-Nvn'}
@@ -172,7 +169,6 @@ id_field = 'id'
 child_id = '',
 child_type = ''
 
-
 child_data = [
     {
         'id_path': '/Mirantis-Liberty-Nvn/Mirantis-Liberty-Nvn-regions',
@@ -191,15 +187,23 @@ child_data = [
 ]
 
 PARENT = {
-    "environment" : "Mirantis-Liberty-Xiaocong",
-    "id" : "node-6.cisco.com-vservices-dhcps",
-    "name" : "node-6.cisco.com-vservices-dhcps",
-    "object_name" : "DHCP servers",
-    "parent_id" : "node-6.cisco.com-vservices",
-    "parent_type" : "vservices_folder",
-    "show_in_tree" : True,
-    "text" : "DHCP servers",
-    "type" : "vservice_dhcps_folder"
+    "environment": "Mirantis-Liberty-Xiaocong",
+    "id": "node-6.cisco.com-vservices-dhcps",
+    "id_path": "/Mirantis-Liberty-Xiaocong/Mirantis-Liberty-Xiaocong-regions"
+               "/RegionOne/RegionOne-availability_zones"
+               "/internal/node-6.cisco.com"
+               "/node-6.cisco.com-vservices/node-6.cisco.com-vservices-dhcps",
+    "name": "node-6.cisco.com-vservices-dhcps",
+    "name_path": "/Mirantis-Liberty-Xiaocong/Regions"
+               "/RegionOne/Availability Zones"
+               "/internal/node-6.cisco.com"
+               "/vServices/DHCP servers",
+    "object_name": "DHCP servers",
+    "parent_id": "node-6.cisco.com-vservices",
+    "parent_type": "vservices_folder",
+    "show_in_tree": True,
+    "text": "DHCP servers",
+    "type": "vservice_dhcps_folder"
 }
 
 PARENT_WITHOUT_ID = {
@@ -285,7 +289,6 @@ DB_RESULTS_WITHOUT_MASTER_PARENT_IN_DB = [
     }
 ]
 
-
 DICTIONARY_DB_RESULTS = {
     "name": "Mirantis-Liberty-Xiaocong-regions",
     "parent_type": "environment",
@@ -296,18 +299,22 @@ DICTIONARY_DB_RESULTS = {
 }
 
 MASTER_PARENT = {
-    "create_object" : True,
-    "environment" : "Mirantis-Liberty-Xiaocong",
-    "id" : "node-6.cisco.com-vservices",
-    "id_path" : "/Mirantis-Liberty/Mirantis-Liberty-regions/RegionOne/RegionOne-availability_zones/internal/node-6.cisco.com/node-6.cisco.com-vservices",
-    "name" : "Vservices",
-    "name_path" : "/Mirantis-Liberty/Regions/RegionOne/Availability Zones/internal/node-6.cisco.com/Vservices",
-    "object_name" : "Vservices",
-    "parent_id" : "node-6.cisco.com",
-    "parent_type" : "host",
-    "show_in_tree" : True,
-    "text" : "Vservices",
-    "type" : "vservices_folder"
+    "create_object": True,
+    "environment": "Mirantis-Liberty-Xiaocong",
+    "id": "node-6.cisco.com-vservices",
+    "id_path": "/Mirantis-Liberty/Mirantis-Liberty-regions"
+               "/RegionOne/RegionOne-availability_zones"
+               "/internal/node-6.cisco.com/node-6.cisco.com-vservices",
+    "name": "Vservices",
+    "name_path": "/Mirantis-Liberty/Regions"
+                 "/RegionOne/Availability Zones"
+                 "/internal/node-6.cisco.com/Vservices",
+    "object_name": "Vservices",
+    "parent_id": "node-6.cisco.com",
+    "parent_type": "host",
+    "show_in_tree": True,
+    "text": "Vservices",
+    "type": "vservices_folder"
 }
 
 CONFIGURATIONS_WITHOUT_MECHANISM_DRIVERS = {
index e93a35b..bd1a0e3 100644 (file)
@@ -188,9 +188,11 @@ class TestScanner(TestScan):
 
         # store original method
         original_set = self.scanner.inv.set
+        original_get_by_id = self.scanner.inv.get_by_id
 
         # mock method
         self.scanner.inv.set = MagicMock()
+        self.scanner.inv.get_by_id = MagicMock(return_value=PARENT)
 
         self.scanner.scan_type(TYPE_TO_FETCH_FOR_ENVIRONMENT, PARENT, ID_FIELD)
         self.assertIn("projects", DB_RESULTS_WITH_PROJECT[0],
@@ -199,43 +201,53 @@ class TestScanner(TestScan):
                          "Can't delete the project key in the object")
 
         self.scanner.inv.set = original_set
+        self.scanner.inv.get_by_id = original_get_by_id
 
     @patch("discover.fetchers.folder_fetcher.FolderFetcher.get")
     def test_scan_type_without_create_object(self, fetcher_get):
         fetcher_get.return_value = DB_RESULTS_WITHOUT_CREATE_OBJECT
 
         original_set = self.scanner.inv.set
+        original_get_by_id = self.scanner.inv.get_by_id
 
         self.scanner.inv.set = MagicMock()
+        self.scanner.inv.get_by_id = MagicMock(return_value=PARENT)
         self.scanner.scan_type(TYPE_TO_FETCH_FOR_ENVIRONMENT, PARENT, ID_FIELD)
 
         self.assertEqual(self.scanner.inv.set.call_count, 0,
                          "Set the object when the create object is false")
 
         self.scanner.inv.set = original_set
+        self.scanner.inv.get_by_id = original_get_by_id
 
     @patch("discover.fetchers.folder_fetcher.FolderFetcher.get")
     def test_scan_type_with_create_object(self, fetcher_get):
         fetcher_get.return_value = DB_RESULTS_WITH_CREATE_OBJECT
 
         original_set = self.scanner.inv.set
+        original_get_by_id = self.scanner.inv.get_by_id
 
         self.scanner.inv.set = MagicMock()
+        self.scanner.inv.get_by_id = MagicMock(return_value=PARENT)
+
         self.scanner.scan_type(TYPE_TO_FETCH_FOR_ENVIRONMENT, PARENT, ID_FIELD)
 
         self.assertEqual(self.scanner.inv.set.call_count, 1,
                          "Set the object when the create object is false")
 
         self.scanner.inv.set = original_set
+        self.scanner.inv.get_by_id = original_get_by_id
 
     @patch("discover.fetchers.folder_fetcher.FolderFetcher.get")
     def test_scan_type_with_children_scanner(self, fetcher_get):
         fetcher_get.return_value = DB_RESULTS_WITH_CREATE_OBJECT
 
         original_set = self.scanner.inv.set
+        original_get_by_id = self.scanner.inv.get_by_id
         original_queue_for_scan = self.scanner.queue_for_scan
 
         self.scanner.inv.set = MagicMock()
+        self.scanner.inv.get_by_id = MagicMock(return_value=PARENT)
         self.scanner.queue_for_scan = MagicMock()
 
         self.scanner.scan_type(TYPE_TO_FETCH_FOR_ENVIRONMENT, PARENT, ID_FIELD)
@@ -244,6 +256,7 @@ class TestScanner(TestScan):
                          "Can't put children scanner in the queue")
 
         self.scanner.inv.set = original_set
+        self.scanner.inv.get_by_id = original_get_by_id
         self.scanner.queue_for_scan = original_queue_for_scan
 
     @patch("discover.fetchers.folder_fetcher.FolderFetcher.get")
@@ -251,9 +264,11 @@ class TestScanner(TestScan):
         fetcher_get.return_value = DB_RESULTS_WITH_CREATE_OBJECT
 
         original_set = self.scanner.inv.set
+        original_get_by_id = self.scanner.inv.get_by_id
         original_queue_for_scan = self.scanner.queue_for_scan
 
         self.scanner.inv.set = MagicMock()
+        self.scanner.inv.get_by_id = MagicMock(return_value=PARENT)
         self.scanner.queue_for_scan = MagicMock()
 
         self.scanner.scan_type(TYPE_TO_FETCH_FOR_ENV_WITHOUT_CHILDREN_FETCHER,
@@ -263,6 +278,7 @@ class TestScanner(TestScan):
                          "Can't put children scanner in the queue")
 
         self.scanner.inv.set = original_set
+        self.scanner.inv.get_by_id = original_get_by_id
         self.scanner.queue_for_scan = original_queue_for_scan
 
     @patch("discover.fetchers.folder_fetcher.FolderFetcher.get")
@@ -270,9 +286,11 @@ class TestScanner(TestScan):
         fetcher_get.return_value = DB_RESULTS_WITH_CREATE_OBJECT
 
         original_set = self.scanner.inv.set
+        original_get_by_id = self.scanner.inv.get_by_id
         original_queue_for_scan = self.scanner.queue_for_scan
 
         self.scanner.inv.set = MagicMock()
+        self.scanner.inv.get_by_id = MagicMock(return_value=PARENT)
         self.scanner.queue_for_scan = MagicMock()
 
         result = self.scanner.scan_type(TYPE_TO_FETCH_FOR_ENVIRONMENT, PARENT,
@@ -281,6 +299,7 @@ class TestScanner(TestScan):
         self.assertNotEqual(result, [], "Can't get children form scan_type")
 
         self.scanner.inv.set = original_set
+        self.scanner.inv.get_by_id = original_get_by_id
         self.scanner.queue_for_scan = original_queue_for_scan
 
     def test_scan_with_limit_to_child_type(self):
index bda298c..684195e 100755 (executable)
@@ -7,10 +7,16 @@
 # are made available under the terms of the Apache License, Version 2.0       #
 # which accompanies this distribution, and is available at                    #
 # http://www.apache.org/licenses/LICENSE-2.0                                  #
-##############################################################################
-set -o errexit
-set -o nounset
-set -o pipefail
+###############################################################################
+#set -o errexit
+#set -o nounset
+#set -o pipefail
+
+#sudo yum install -y https://centos7.iuscommunity.org/ius-release.rpm
+#sudo yum -y install python35
+#sudo pip install virtualenv
+#virtualenv -p $(which python3.5) $WORKSPACE/venv       
+#. $WORKSPACE/venv/bin/activate
 
 PYTHONPATH=$PWD/app python3 -m unittest discover -s app/test/api
 PYTHONPATH=$PWD/app python3 -m unittest discover -s app/test/event_based_scan
index 91fea2e..d0f8d42 100644 (file)
@@ -8,6 +8,7 @@
 # http://www.apache.org/licenses/LICENSE-2.0                                  #
 ###############################################################################
 from bson.objectid import ObjectId
+from datetime import datetime
 
 
 class DictNamingConverter:
@@ -20,21 +21,33 @@ class DictNamingConverter:
     # Returns:
     #     Dictionary with the new keys.
     @staticmethod
-    def change_dict_naming_convention(d, cf):
+    def change_dict_naming_convention(d, cf, level: int=0):
         new = {}
+        change_convention = DictNamingConverter.change_dict_naming_convention
         if not d:
             return d
-        if isinstance(d, str):
+        if isinstance(d, str) or isinstance(d, int) or isinstance(d, float) \
+                or isinstance(d, bool) or isinstance(d, datetime):
             return d
         if isinstance(d, ObjectId):
             return d
-        for k, v in d.items():
-            new_v = v
-            if isinstance(v, dict):
-                new_v = DictNamingConverter.change_dict_naming_convention(v, cf)
-            elif isinstance(v, list):
-                new_v = list()
-                for x in v:
-                    new_v.append(DictNamingConverter.change_dict_naming_convention(x, cf))
-            new[cf(k)] = new_v
+        if isinstance(d, object) and not isinstance(d, dict):
+            for k in dir(d):
+                if k.startswith('_'):
+                    continue
+                v = getattr(d, k)
+                if callable(v):
+                    continue
+                new[cf(k)] = change_convention(v, cf, level+1)
+        if isinstance(d, dict):
+            for k, v in d.items():
+                new_v = v
+                if isinstance(v, dict):
+                    new_v = change_convention(v, cf, level+1)
+                elif isinstance(v, list):
+                    new_v = list()
+                    for x in v:
+                        list_val = change_convention(x, cf, level+1)
+                        new_v.append(list_val)
+                new[cf(k)] = new_v
         return new
index bbc5542..97b6cd4 100644 (file)
@@ -389,38 +389,9 @@ class InventoryMgr(MongoAccess, metaclass=Singleton):
         parent_id_path = parent.get("id_path", "/{}".format(environment))
         parent_name_path = parent.get("name_path", "/{}".format(environment))
 
-        try:
-            # case of dynamic folder added by need
-            master_parent_type = o["master_parent_type"]
-            master_parent_id = o["master_parent_id"]
-            master_parent = self.get_by_id(environment, master_parent_id)
-            if not master_parent:
-                self.log.error("failed to find master parent " +
-                               master_parent_id)
+        if 'master_parent_type' in o:
+            if not self.create_parent_folder(o, parent):
                 return False
-            folder_id_path = "/".join((master_parent["id_path"],
-                                       o["parent_id"]))
-            folder_name_path = "/".join((master_parent["name_path"],
-                                         o["parent_text"]))
-            folder = {
-                "environment": parent["environment"],
-                "parent_id": master_parent_id,
-                "parent_type": master_parent_type,
-                "id": o["parent_id"],
-                "id_path": folder_id_path,
-                "show_in_tree": True,
-                "name_path": folder_name_path,
-                "name": o["parent_id"],
-                "type": o["parent_type"],
-                "text": o["parent_text"]
-            }
-            # remove master_parent_type & master_parent_id after use,
-            # as they're there just ro help create the dynamic folder
-            o.pop("master_parent_type", True)
-            o.pop("master_parent_id", True)
-            self.set(folder)
-        except KeyError:
-            pass
 
         if o.get("text"):
             o["name"] = o["text"]
@@ -459,6 +430,42 @@ class InventoryMgr(MongoAccess, metaclass=Singleton):
         if "create_object" not in o or o["create_object"]:
             # add/update object in DB
             self.set(o)
-            if self.is_feature_supported(environment, EnvironmentFeatures.MONITORING):
+            if self.is_feature_supported(environment,
+                                         EnvironmentFeatures.MONITORING):
                 self.monitoring_setup_manager.create_setup(o)
         return True
+
+    def create_parent_folder(self, o, parent) -> bool:
+        # case of dynamic folder added by need
+        master_parent_type = o["master_parent_type"]
+        master_parent_id = o["master_parent_id"]
+        env_path = '/{}'.format(parent['environment'])
+        master_parent = {'id_path': env_path, 'name_path': env_path} \
+            if master_parent_type == 'environment' \
+            else self.get_by_id(o['environment'], master_parent_id)
+        if not master_parent:
+            self.log.error("failed to find master parent " +
+                           master_parent_id)
+            return False
+        folder_id_path = "/".join((master_parent['id_path'],
+                                   o["parent_id"]))
+        folder_name_path = "/".join((master_parent["name_path"],
+                                     o["parent_text"]))
+        folder = {
+            "environment": parent["environment"],
+            "parent_id": master_parent_id,
+            "parent_type": master_parent_type,
+            "id": o["parent_id"],
+            "id_path": folder_id_path,
+            "show_in_tree": True,
+            "name_path": folder_name_path,
+            "name": o["parent_id"],
+            "type": o["parent_type"],
+            "text": o["parent_text"]
+        }
+        # remove master_parent_type & master_parent_id after use,
+        # as they're there just ro help create the dynamic folder
+        o.pop("master_parent_type", True)
+        o.pop("master_parent_id", True)
+        self.set(folder)
+        return True
index bb8b2ed..b1008e4 100644 (file)
@@ -18,4 +18,3 @@ class ConsoleLogger(Logger):
         super().__init__(logger_name="{}-Console".format(self.PROJECT_NAME),
                          level=level)
         self.add_handler(logging.StreamHandler())
-
index 411eceb..f6fe5fa 100644 (file)
 import logging
 import logging.handlers
 
+from utils.origins import Origin
 from utils.logging.logger import Logger
 from utils.logging.mongo_logging_handler import MongoLoggingHandler
 
 
 class FullLogger(Logger):
 
-    def __init__(self, env: str = None, log_file: str = None,
-                 level: str = Logger.default_level):
+    def __init__(self, env: str = None, origin: Origin = None,
+                 log_file: str = None, level: str = Logger.default_level):
         super().__init__(logger_name="{}-Full".format(self.PROJECT_NAME),
                          level=level)
+        self.env = env
+        self.origin = origin
 
         # Console handler
         self.add_handler(logging.StreamHandler())
 
         # Message handler
-        self.add_handler(MongoLoggingHandler(env, self.level))
+        self.add_handler(MongoLoggingHandler(env=env, origin=origin,
+                                             level=self.level))
 
         # File handler
         if log_file:
             self.add_handler(logging.handlers.WatchedFileHandler(log_file))
 
+    def _get_message_handler(self):
+        defined_handlers = [h for h in self.log.handlers
+                            if isinstance(h, MongoLoggingHandler)]
+        return defined_handlers[0] if defined_handlers else None
+
     # Make sure we update MessageHandler with new env
     def set_env(self, env):
-        super().set_env(env)
+        self.env = env
 
-        defined_handler = [h for h in self.log.handlers
-                           if isinstance(h, MongoLoggingHandler)]
-        if defined_handler:
-            defined_handler[0].env = env
+        handler = self._get_message_handler()
+        if handler:
+            handler.env = env
         else:
             self.add_handler(MongoLoggingHandler(env, self.level))
+
+    def set_origin(self, origin: Origin):
+        self.origin = origin
+
+        handler = self._get_message_handler()
+        if handler:
+            handler.origin = origin
+        else:
+            self.add_handler(MongoLoggingHandler(env=self.env,
+                                                 level=self.level,
+                                                 origin=origin))
+
+    def setup(self, **kwargs):
+        env = kwargs.get('env')
+        if env and self.env != env:
+            self.set_env(env)
+
+        origin = kwargs.get('origin')
+        if origin and self.origin != origin:
+            self.set_origin(origin)
\ No newline at end of file
index 316d3fd..9628040 100644 (file)
@@ -34,11 +34,12 @@ class Logger(ABC):
                             level=level)
         self.log.propagate = False
         self.set_loglevel(level)
-        self.env = None
         self.level = level
 
-    def set_env(self, env):
-        self.env = env
+    # Subclasses should override this method
+    # to perform runtime changes to handlers, etc.
+    def setup(self, **kwargs):
+        pass
 
     @staticmethod
     def check_level(level):
index 02e098f..d433a0f 100644 (file)
@@ -18,4 +18,18 @@ class MessageLogger(Logger):
     def __init__(self, env: str = None, level: str = None):
         super().__init__(logger_name="{}-Message".format(self.PROJECT_NAME),
                          level=level)
+        self.env = env
         self.add_handler(MongoLoggingHandler(env, self.level))
+
+    def set_env(self, env):
+        self.env = env
+
+        if self.log.handlers:
+            self.log.handlers[0].env = env
+        else:
+            self.add_handler(MongoLoggingHandler(env, self.level))
+
+    def setup(self, **kwargs):
+        env = kwargs.get('env')
+        if env and self.env != env:
+            self.set_env(env)
index ffb6f85..3929e02 100644 (file)
@@ -11,9 +11,9 @@ import datetime
 import logging
 
 from messages.message import Message
+from utils.origins import Origin
 from utils.inventory_mgr import InventoryMgr
 from utils.logging.logger import Logger
-from utils.string_utils import stringify_datetime
 
 
 class MongoLoggingHandler(logging.Handler):
@@ -22,11 +22,12 @@ class MongoLoggingHandler(logging.Handler):
     """
     SOURCE_SYSTEM = 'Calipso'
 
-    def __init__(self, env: str, level: str):
+    def __init__(self, env: str, level: str, origin: Origin = None):
         super().__init__(Logger.get_numeric_level(level))
         self.str_level = level
         self.env = env
         self.inv = None
+        self.origin = origin
 
     def emit(self, record):
         # Try to invoke InventoryMgr for logging
@@ -46,7 +47,22 @@ class MongoLoggingHandler(logging.Handler):
         d = now - datetime.datetime(1970, 1, 1)
         timestamp_id = '{}.{}.{}'.format(d.days, d.seconds, d.microseconds)
         source = self.SOURCE_SYSTEM
+
         message = Message(msg_id=timestamp_id, env=self.env, source=source,
                           msg=Logger.formatter.format(record), ts=now,
                           level=record.levelname)
+        if self.origin:
+            message.extra['origin_id'] = (
+                str(self.origin.origin_id)
+                if self.origin.origin_id
+                else None
+            )
+            message.extra['origin_type'] = (
+                self.origin.origin_type.value
+                if self.origin.origin_type
+                else None
+            )
+            for extra_field in self.origin.extra:
+                message.extra[extra_field] = getattr(self.origin, extra_field)
+
         self.inv.collections['messages'].insert_one(message.get())
\ No newline at end of file