release 1.0 calipso for opnfv apex 47/44047/1
authorKoren Lev <korenlev@gmail.com>
Thu, 28 Sep 2017 22:38:18 +0000 (01:38 +0300)
committerYaron Yogev <yaronyogev@gmail.com>
Tue, 3 Oct 2017 09:46:58 +0000 (09:46 +0000)
Change-Id: I3e63cd27c5f4d3756e67a07c749863a68e84dde2
Signed-off-by: Koren Lev <korenlev@gmail.com>
(cherry picked from commit d32f75145676bacefde0d08a14680a5984623451)

137 files changed:
app/api/auth/auth.py
app/api/responders/resource/environment_configs.py
app/config/events.json
app/config/link_finders.json
app/config/scanners.json
app/discover/event_manager.py
app/discover/events/event_interface_add.py
app/discover/events/event_interface_delete.py
app/discover/events/event_router_add.py
app/discover/events/event_router_delete.py
app/discover/events/event_router_update.py
app/discover/events/event_subnet_add.py
app/discover/events/event_subnet_update.py
app/discover/events/listeners/default_listener.py
app/discover/events/listeners/listener_base.py
app/discover/fetcher_new.py [deleted file]
app/discover/fetchers/cli/cli_access.py
app/discover/fetchers/cli/cli_fetch_bond_host_pnics.py [new file with mode: 0644]
app/discover/fetchers/cli/cli_fetch_host_pnics.py
app/discover/fetchers/cli/cli_fetch_host_vservice.py
app/discover/fetchers/cli/cli_fetch_host_vservices.py
app/discover/fetchers/cli/cli_fetch_instance_vnics_base.py
app/discover/fetchers/cli/cli_fetch_vservice_vnics.py
app/discover/fetchers/db/db_access.py
app/discover/fetchers/db/db_fetch_host_network_agents.py
app/discover/fetchers/db/db_fetch_oteps.py
app/discover/fetchers/db/db_fetch_vedges_ovs.py
app/discover/link_finders/__init__.py
app/discover/link_finders/find_links_for_pnics.py
app/discover/link_finders/find_links_for_vconnectors.py
app/discover/link_finders/find_links_for_vservice_vnics.py
app/discover/scanner.py
app/install/calipso-installer.py
app/install/configure/setup_apex_environment.py [new file with mode: 0644]
app/install/db/apex_environment_config.json [new file with mode: 0644]
app/install/db/api_tokens.json [new file with mode: 0644]
app/install/db/connection_tests.json [new file with mode: 0644]
app/install/db/constants.json
app/install/db/environments_config.json
app/install/db/supported_environments.json
app/install/db/user_settings.json [new file with mode: 0644]
app/install/ldap.conf.example
app/messages/message.py
app/monitoring/handlers/monitoring_check_handler.py
app/test/api/responders_test/resource/test_environment_configs.py
app/test/api/responders_test/test_data/base.py
app/test/api/responders_test/test_data/environment_configs.py
app/test/api/test_base.py
app/test/event_based_scan/test_interface_add.py
app/test/event_based_scan/test_interface_delete.py
app/test/event_based_scan/test_router_add.py
app/test/event_based_scan/test_router_update.py
app/test/fetch/api_fetch/test_data/configurations.py
app/test/fetch/cli_fetch/test_cli_access.py
app/test/fetch/db_fetch/mock_cursor.py
app/test/fetch/db_fetch/test_data/db_fetch_host_network_agents.py
app/test/fetch/db_fetch/test_data/db_fetch_oteps.py
app/test/fetch/db_fetch/test_data/db_fetch_vedges_ovs.py
app/test/fetch/db_fetch/test_db_fetch_oteps.py
app/test/fetch/db_fetch/test_db_fetch_vedges_ovs.py
app/test/scan/test_data/configurations.py
app/test/scan/test_data/scanner.py
app/test/utils/test_cli_dist_translator.py [new file with mode: 0644]
app/utils/cli_dist_translator.py [new file with mode: 0644]
app/utils/inventory_mgr.py
app/utils/logging/mongo_logging_handler.py
app/utils/mongo_access.py
app/utils/util.py
ui/imports/api/accounts/methods.js
ui/imports/api/connection-tests/connection-tests.js [new file with mode: 0644]
ui/imports/api/connection-tests/methods.js [new file with mode: 0644]
ui/imports/api/connection-tests/server/publications.js [new file with mode: 0644]
ui/imports/api/environments/configuration-groups/monitoring-configuration.js
ui/imports/api/environments/environments.js
ui/imports/api/environments/methods.js
ui/imports/api/scheduled-scans/scheduled-scans.js
ui/imports/api/scheduled-scans/server/publications.js
ui/imports/api/supported_environments/supported_environments.js
ui/imports/api/user-settings/methods.js [moved from ui/imports/api/configurations/methods.js with 77% similarity]
ui/imports/api/user-settings/server/publications.js [moved from ui/imports/api/configurations/server/publications.js with 80% similarity]
ui/imports/api/user-settings/user-settings.js [moved from ui/imports/api/configurations/configurations.js with 85% similarity]
ui/imports/lib/images-for-node-type.js
ui/imports/startup/client/index.js
ui/imports/startup/client/template-helpers.js
ui/imports/startup/server/register-api.js
ui/imports/ui/components/alarm-icons/alarm-icons.html
ui/imports/ui/components/alarm-icons/alarm-icons.js
ui/imports/ui/components/dashboard/dashboard.js
ui/imports/ui/components/detailed-node-info-box/detailed-node-info-box.html
ui/imports/ui/components/detailed-node-info-box/detailed-node-info-box.styl
ui/imports/ui/components/env-aci-info/env-aci-info.html
ui/imports/ui/components/env-aci-info/env-aci-info.js
ui/imports/ui/components/env-amqp-credentials-info/env-amqp-credentials-info.html
ui/imports/ui/components/env-amqp-credentials-info/env-amqp-credentials-info.js
ui/imports/ui/components/env-main-info/env-main-info.html
ui/imports/ui/components/env-main-info/env-main-info.js
ui/imports/ui/components/env-master-host-credentials-info/env-master-host-credentials-info.html
ui/imports/ui/components/env-master-host-credentials-info/env-master-host-credentials-info.js
ui/imports/ui/components/env-monitoring-info/env-monitoring-info.html
ui/imports/ui/components/env-monitoring-info/env-monitoring-info.js
ui/imports/ui/components/env-nfv-info/env-nfv-info.html
ui/imports/ui/components/env-nfv-info/env-nfv-info.js
ui/imports/ui/components/env-open-stack-db-credentials-info/env-open-stack-db-credentials-info.html
ui/imports/ui/components/env-open-stack-db-credentials-info/env-open-stack-db-credentials-info.js
ui/imports/ui/components/env-os-api-endpoint-info/env-os-api-endpoint-info.html
ui/imports/ui/components/env-os-api-endpoint-info/env-os-api-endpoint-info.js
ui/imports/ui/components/environment-dashboard/environment-dashboard.js
ui/imports/ui/components/environment-wizard/environment-wizard.js
ui/imports/ui/components/environment/environment.html
ui/imports/ui/components/environment/environment.js
ui/imports/ui/components/general-folder-node-dashboard/general-folder-node-dashboard.js
ui/imports/ui/components/graph-tooltip-window/graph-tooltip-window.js
ui/imports/ui/components/index.styl
ui/imports/ui/components/message/message.html
ui/imports/ui/components/message/message.js
ui/imports/ui/components/network-graph-manager/network-graph-manager.js
ui/imports/ui/components/network-graph/network-graph.js
ui/imports/ui/components/new-scanning/new-scanning.js
ui/imports/ui/components/scanning-request/scanning-request.js
ui/imports/ui/components/scheduled-scan/scheduled-scan.js
ui/imports/ui/components/scheduled-scans-list/scheduled-scans-list.html
ui/imports/ui/components/user-settings/user-settings.html [moved from ui/imports/ui/components/configuration/configuration.html with 91% similarity]
ui/imports/ui/components/user-settings/user-settings.js [moved from ui/imports/ui/components/configuration/configuration.js with 82% similarity]
ui/imports/ui/components/user-settings/user-settings.styl [moved from ui/imports/ui/components/configuration/configuration.styl with 92% similarity]
ui/lib/router.js
ui/public/ic_device_hub_black_24dp_2x-green.png [new file with mode: 0644]
ui/public/ic_device_hub_black_24dp_2x-orange.png [new file with mode: 0644]
ui/public/ic_device_hub_black_24dp_2x-red.png [new file with mode: 0644]
ui/public/ic_device_hub_black_24dp_2x.png [new file with mode: 0644]
ui/public/ic_tv_black_24dp_2x.png [new file with mode: 0644]
ui/public/ic_zoom_out_map_black_24dp_2x.png [new file with mode: 0644]
ui/public/old-2017-09-25/ic_device_hub_black_24dp_2x-green.png [new file with mode: 0644]
ui/public/old-2017-09-25/ic_device_hub_black_24dp_2x-orange.png [new file with mode: 0644]
ui/public/old-2017-09-25/ic_device_hub_black_24dp_2x-red.png [new file with mode: 0644]
ui/public/old-2017-09-25/ic_device_hub_black_24dp_2x.png [new file with mode: 0644]
ui/public/old-2017-09-25/ic_tv_black_24dp_2x-old-2017-09-25.png [new file with mode: 0644]
ui/public/old-2017-09-25/ic_zoom_out_map_black_24dp_2x-old-2017-09-25.png [new file with mode: 0644]

index 04fc4b9..b7139f4 100644 (file)
@@ -19,7 +19,7 @@ class Auth:
         super().__init__()
         self.inv = InventoryMgr()
         self.log = FullLogger()
-        self.tokens_coll = self.inv.client['tokens']['api_tokens']
+        self.tokens_coll = self.inv.collections['api_tokens']
         self.ldap_access = LDAPAccess()
 
     def get_token(self, token):
@@ -37,7 +37,7 @@ class Auth:
             self.tokens_coll.insert_one(token)
         except Exception as e:
             self.log.error("Failed to write new token {0} to database for {1}"
-                           .format(token[token], str(e)))
+                           .format(token['token'], str(e)))
             error = 'Failed to create new token'
 
         return error
index 32e70ad..90a1adf 100644 (file)
@@ -239,6 +239,7 @@ class EnvironmentConfigs(ResponderBase):
             "configuration": self.require(list, mandatory=True),
             "distribution": self.require(str, False, DataValidate.LIST,
                                          self.distributions, True),
+            "distribution_version": self.require(str, mandatory=True),
             "listen": self.require(bool, True, mandatory=True),
             "user": self.require(str),
             "mechanism_drivers": self.require(list, False, DataValidate.LIST,
@@ -343,8 +344,11 @@ class EnvironmentConfigs(ResponderBase):
         # validate the environment config with supported environments
         matches = {
             'environment.distribution': env_config['distribution'],
+            'environment.distribution_version':
+                env_config['distribution_version'],
             'environment.type_drivers': env_config['type_drivers'],
-            'environment.mechanism_drivers': {'$in': env_config['mechanism_drivers']}
+            'environment.mechanism_drivers':
+                {'$in': env_config['mechanism_drivers']}
         }
 
         err_prefix = 'configuration not accepted: '
index c067754..e09ebb6 100644 (file)
     "compute.instance.rebuild.end": "EventInstanceAdd",
     "compute.instance.update": "EventInstanceUpdate",
     "compute.instance.delete.end": "EventInstanceDelete",
-    "network.create.start": "EventNetworkAdd",
     "network.create.end": "EventNetworkAdd",
     "network.update": "EventNetworkUpdate",
-    "network.update.start": "EventNetworkUpdate",
     "network.update.end": "EventNetworkUpdate",
     "network.delete": "EventNetworkDelete",
-    "network.delete.start": "EventNetworkDelete",
     "network.delete.end": "EventNetworkDelete",
     "subnet.create": "EventSubnetAdd",
-    "subnet.create.start": "EventSubnetAdd",
     "subnet.create.end": "EventSubnetAdd",
     "subnet.update": "EventSubnetUpdate",
-    "subnet.update.start": "EventSubnetUpdate",
     "subnet.update.end": "EventSubnetUpdate",
     "subnet.delete": "EventSubnetDelete",
-    "subnet.delete.start": "EventSubnetDelete",
     "subnet.delete.end": "EventSubnetDelete",
+    "port.create": "EventPortAdd",
     "port.create.end": "EventPortAdd",
+    "port.update": "EventPortUpdate",
     "port.update.end": "EventPortUpdate",
+    "port.delete": "EventPortDelete",
     "port.delete.end": "EventPortDelete",
     "router.create": "EventRouterAdd",
-    "router.create.start": "EventRouterAdd",
     "router.create.end": "EventRouterAdd",
     "router.update": "EventRouterUpdate",
-    "router.update.start": "EventRouterUpdate",
     "router.update.end": "EventRouterUpdate",
     "router.delete": "EventRouterDelete",
-    "router.delete.start": "EventRouterDelete",
     "router.delete.end": "EventRouterDelete",
     "router.interface.create": "EventInterfaceAdd",
-    "router.interface.delete": "EventInterfaceDelete"
+    "router.interface.create.end": "EventInterfaceAdd",
+    "router.interface.delete": "EventInterfaceDelete",
+    "router.interface.delete.end": "EventInterfaceDelete"
   }
 }
\ No newline at end of file
index 2368333..55c31f6 100644 (file)
@@ -4,9 +4,9 @@
   "link_finders": [
     "FindLinksForInstanceVnics",
     "FindLinksForOteps",
-    "FindLinksForPnics",
     "FindLinksForVconnectors",
     "FindLinksForVedges",
-    "FindLinksForVserviceVnics"
+    "FindLinksForVserviceVnics",
+    "FindLinksForPnics"
   ]
 }
\ No newline at end of file
index ae856a2..c5efb06 100644 (file)
         },
         "fetcher": "CliFetchHostPnics",
         "children_scanner": "ScanHostPnic"
+      },
+      {
+        "type": "host_pnic",
+        "fetcher": "CliFetchBondHostPnics",
+        "children_scanner": "ScanHostPnic"
       }
     ],
     "ScanHostPnic": [
index 6a56912..e2f8282 100644 (file)
@@ -40,12 +40,18 @@ class EventManager(Manager):
     }
 
     LISTENERS = {
-        'Mirantis-6.0': DefaultListener,
-        'Mirantis-7.0': DefaultListener,
-        'Mirantis-8.0': DefaultListener,
-        'RDO-Mitaka': DefaultListener,
-        'RDO-Liberty': DefaultListener,
-        'Apex-Euphrates': DefaultListener,
+        'Mirantis': {
+            '6.0': DefaultListener,
+            '7.0': DefaultListener,
+            '8.0': DefaultListener,
+        },
+        'RDO': {
+            'Mitaka': DefaultListener,
+            'Liberty': DefaultListener,
+        },
+        'Apex': {
+            'Euphrates': DefaultListener,
+        },
     }
 
     def __init__(self):
@@ -105,7 +111,8 @@ class EventManager(Manager):
 
     def get_listener(self, env: str):
         env_config = self.inv.get_env_config(env)
-        return self.LISTENERS.get(env_config.get('distribution'))
+        return (self.LISTENERS.get(env_config.get('distribution'), {})
+                              .get(env_config.get('distribution_version')))
 
     def listen_to_events(self, listener: ListenerBase, env_name: str, process_vars: dict):
         listener.listen({
index 698559c..e54bedb 100644 (file)
@@ -83,11 +83,12 @@ class EventInterfaceAdd(EventBase):
 
     def handle(self, env, values):
         interface = values['payload']['router_interface']
+        project_id = values['_context_project_id']
         project = values['_context_project_name']
         host_id = values["publisher_id"].replace("network.", "", 1)
         port_id = interface['port_id']
         subnet_id = interface['subnet_id']
-        router_id = encode_router_id(host_id, interface['id'])
+        router_id = encode_router_id(interface['id'])
 
         network_document = self.inv.get_by_field(env, "network", "subnet_ids",
                                                  subnet_id, get_single=True)
@@ -98,10 +99,10 @@ class EventInterfaceAdd(EventBase):
         network_id = network_document['id']
 
         # add router-interface port document.
-        if len(ApiAccess.regions) == 0:
+        if not ApiAccess.regions:
             fetcher = ApiFetchRegions()
             fetcher.set_env(env)
-            fetcher.get(None)
+            fetcher.get(project_id)
         port_doc = EventSubnetAdd().add_port_document(env, port_id,
                                                       network_name=network_name)
 
index b1df978..f4ec400 100644 (file)
@@ -18,8 +18,7 @@ class EventInterfaceDelete(EventDeleteBase):
     def handle(self, env, values):
         interface = values['payload']['router_interface']
         port_id = interface['port_id']
-        host_id = values["publisher_id"].replace("network.", "", 1)
-        router_id = encode_router_id(host_id, interface['id'])
+        router_id = encode_router_id(interface['id'])
 
         # update router document
         port_doc = self.inv.get_by_id(env, port_id)
index 3c1c9e2..1fb2244 100644 (file)
@@ -96,7 +96,7 @@ class EventRouterAdd(EventBase):
         router = values['payload']['router']
         host_id = values["publisher_id"].replace("network.", "", 1)
         project_id = values['_context_project_id']
-        router_id = encode_router_id(host_id, router['id'])
+        router_id = encode_router_id(router['id'])
         host = self.inv.get_by_id(env, host_id)
 
         fetcher = CliFetchHostVservice()
index 65072d6..d0bd645 100644 (file)
@@ -21,7 +21,6 @@ class EventRouterDelete(EventDeleteBase):
             self.log.error("Publisher_id is not in event values. Aborting router delete")
             return EventResult(result=False, retry=False)
 
-        host_id = values['publisher_id'].replace('network.', '', 1)
         if 'router_id' in payload:
             router_id = payload['router_id']
         elif 'id' in payload:
@@ -33,5 +32,5 @@ class EventRouterDelete(EventDeleteBase):
             self.log.error("Router id is not in payload. Aborting router delete")
             return EventResult(result=False, retry=False)
 
-        router_full_id = encode_router_id(host_id, router_id)
+        router_full_id = encode_router_id(router_id)
         return self.delete_handler(env, router_full_id, "vservice")
index cfbbf58..b63b224 100644 (file)
@@ -26,7 +26,7 @@ class EventRouterUpdate(EventBase):
         host_id = values["publisher_id"].replace("network.", "", 1)
         router_id = payload['id'] if 'id' in payload else router['id']
 
-        router_full_id = encode_router_id(host_id, router_id)
+        router_full_id = encode_router_id(router_id)
         router_doc = self.inv.get_by_id(env, router_full_id)
         if not router_doc:
             self.log.info("Router document not found, aborting router updating")
index fcae5fd..4126e0c 100644 (file)
@@ -131,10 +131,10 @@ class EventSubnetAdd(EventBase):
         # Check DHCP enable, if true, scan network.
         if subnet['enable_dhcp'] is True:
             # update network
-            if len(ApiAccess.regions) == 0:
+            if not ApiAccess.regions:
                 fetcher = ApiFetchRegions()
                 fetcher.set_env(env)
-                fetcher.get(None)
+                fetcher.get(project_id)
 
             self.log.info("add new subnet.")
             host_id = notification["publisher_id"].replace("network.", "", 1)
index 3529f0d..59b0afb 100644 (file)
@@ -23,6 +23,7 @@ class EventSubnetUpdate(EventBase):
     def handle(self, env, notification):
         # check for network document.
         subnet = notification['payload']['subnet']
+        project_id = notification['_context_project_id']
         project = notification['_context_project_name']
         host_id = notification['publisher_id'].replace('network.', '', 1)
         subnet_id = subnet['id']
@@ -47,10 +48,10 @@ class EventSubnetUpdate(EventBase):
                                                network_document['name'])
 
                 # make sure that self.regions is not empty.
-                if len(ApiAccess.regions) == 0:
+                if not ApiAccess.regions:
                     fetcher = ApiFetchRegions()
                     fetcher.set_env(env)
-                    fetcher.get(None)
+                    fetcher.get(project_id)
 
                 self.log.info("add port binding to DHCP server.")
                 port_id = DbFetchPort(). \
index 54453a7..273f3e3 100755 (executable)
@@ -30,17 +30,19 @@ from monitoring.setup.monitoring_setup_manager import MonitoringSetupManager
 from utils.constants import OperationalStatus, EnvironmentFeatures
 from utils.inventory_mgr import InventoryMgr
 from utils.logging.full_logger import FullLogger
+from utils.logging.logger import Logger
 from utils.mongo_access import MongoAccess
-from utils.string_utils import stringify_datetime
 from utils.util import SignalHandler, setup_args
 
 
 class DefaultListener(ListenerBase, ConsumerMixin):
 
     SOURCE_SYSTEM = "OpenStack"
-
     COMMON_METADATA_FILE = "events.json"
 
+    LOG_FILENAME = "default_listener.log"
+    LOG_LEVEL = Logger.INFO
+
     DEFAULTS = {
         "env": "Mirantis-Liberty",
         "mongo_config": "",
@@ -92,7 +94,7 @@ class DefaultListener(ListenerBase, ConsumerMixin):
             return False, None
 
     def process_task(self, body, message):
-        received_timestamp = stringify_datetime(datetime.datetime.now())
+        received_timestamp = datetime.datetime.now()
         processable, event_data = self._extract_event_data(body)
         # If env listener can't process the message
         # or it's not intended for env listener to handle,
@@ -100,7 +102,7 @@ class DefaultListener(ListenerBase, ConsumerMixin):
         if processable and event_data["event_type"] in self.handler.handlers:
             event_result = self.handle_event(event_data["event_type"],
                                              event_data)
-            finished_timestamp = stringify_datetime(datetime.datetime.now())
+            finished_timestamp = datetime.datetime.now()
             self.save_message(message_body=event_data,
                               result=event_result,
                               started=received_timestamp,
@@ -143,8 +145,8 @@ class DefaultListener(ListenerBase, ConsumerMixin):
     # 'Retry' flag specifies if the error is recoverable or not
     # 'Retry' flag is checked only is 'result' is False
     def handle_event(self, event_type: str, notification: dict) -> EventResult:
-        print("Got notification.\nEvent_type: {}\nNotification:\n{}".
-              format(event_type, notification))
+        self.log.error("Got notification.\nEvent_type: {}\nNotification:\n{}".
+                       format(event_type, notification))
         try:
             result = self.handler.handle(event_name=event_type,
                                          notification=notification)
@@ -154,7 +156,7 @@ class DefaultListener(ListenerBase, ConsumerMixin):
             return EventResult(result=False, retry=False)
 
     def save_message(self, message_body: dict, result: EventResult,
-                     started: str, finished: str):
+                     started: datetime, finished: datetime):
         try:
             message = Message(
                 msg_id=message_body.get('message_id'),
index 7052dc9..4ff4e57 100644 (file)
@@ -7,11 +7,25 @@
 # which accompanies this distribution, and is available at                    #
 # http://www.apache.org/licenses/LICENSE-2.0                                  #
 ###############################################################################
+import os
 from abc import ABC, abstractmethod
 
+from utils.logging.console_logger import ConsoleLogger
+from utils.logging.file_logger import FileLogger
+from utils.logging.logger import Logger
+
 
 class ListenerBase(ABC):
 
+    LOG_FILENAME = "listener_base.log"
+    LOG_LEVEL = Logger.WARNING
+
+    def __init__(self):
+        super().__init__()
+        self.log_file = os.path.join(FileLogger.LOG_DIRECTORY,
+                                     self.LOG_FILENAME)
+        self.log = ConsoleLogger(level=Logger.INFO)
+
     @staticmethod
     @abstractmethod
     def listen(self):
diff --git a/app/discover/fetcher_new.py b/app/discover/fetcher_new.py
deleted file mode 100644 (file)
index f545554..0000000
+++ /dev/null
@@ -1,30 +0,0 @@
-###############################################################################
-# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems)   #
-# and others                                                                  #
-#                                                                             #
-# All rights reserved. This program and the accompanying materials            #
-# are made available under the terms of the Apache License, Version 2.0       #
-# which accompanies this distribution, and is available at                    #
-# http://www.apache.org/licenses/LICENSE-2.0                                  #
-###############################################################################
-from discover.fetcher import Fetcher\r
-##old stuff\r
-class FetchHostObjectTypes(Fetcher):\r
-  \r
-  \r
-  def get(self, parent):\r
-    ret = {\r
-      "type": "host_object_type",\r
-      "id": "",\r
-      "parent": parent,\r
-      "rows": [\r
-        {"id": "instances_root", "text": "Instances", "descendants": 1},\r
-        {"id": "networks_root", "text": "Networks", "descendants": 1},\r
-        {"id": "pnics_root", "text": "pNICs", "descendants": 1},\r
-        {"id": "vservices_root", "text": "vServices", "descendants": 1}\r
-      ]\r
-    }\r
-    return ret\r
-\r
-    ## old/moved\r
-\r
index 275a3e8..c77b22a 100644 (file)
@@ -12,6 +12,7 @@ import time
 
 from discover.fetcher import Fetcher
 from utils.binary_converter import BinaryConverter
+from utils.cli_dist_translator import CliDistTranslator
 from utils.logging.console_logger import ConsoleLogger
 from utils.ssh_conn import SshConn
 
@@ -41,11 +42,16 @@ class CliAccess(BinaryConverter, Fetcher):
     def run(self, cmd, ssh_to_host="", enable_cache=True, on_gateway=False,
             ssh=None, use_sudo=True):
         ssh_conn = ssh if ssh else SshConn(ssh_to_host)
-        if use_sudo and not cmd.strip().startswith("sudo "):
-            cmd = "sudo " + cmd
-        if not on_gateway and ssh_to_host \
-                and not ssh_conn.is_gateway_host(ssh_to_host):
-            cmd = self.ssh_cmd + ssh_to_host + " " + cmd
+        commands = self.adapt_cmd_to_env(ssh_conn, cmd, use_sudo, on_gateway,
+                                         ssh_to_host)
+        out = ''
+        for c in commands:
+            out += self.run_single_command(c, ssh_conn, ssh_to_host,
+                                           enable_cache=enable_cache)
+        return out
+
+    def run_single_command(self, cmd, ssh_conn, ssh_to_host="",
+                           enable_cache=True):
         curr_time = time.time()
         cmd_path = ssh_to_host + ',' + cmd
         if enable_cache and cmd_path in self.cached_commands:
@@ -73,9 +79,44 @@ class CliAccess(BinaryConverter, Fetcher):
         ret = out.splitlines()
         # if split by whitespace did not work, try splitting by "\\n"
         if len(ret) == 1:
-            ret = [l for l in out.split("\\n") if l != ""]
+            ret = [line for line in out.split("\\n") if line != ""]
         return ret
 
+    MULTI_COMMAND_SEPARATOR = ';;;'
+
+    @staticmethod
+    def handle_split_cmd(cmd: str):
+        if CliAccess.MULTI_COMMAND_SEPARATOR in cmd:
+            return cmd.split(CliAccess.MULTI_COMMAND_SEPARATOR)
+        return [cmd]
+
+    def adapt_cmd_to_env(self, ssh_conn, cmd, use_sudo, on_gateway,
+                         ssh_to_host):
+        cmd = self.adapt_cmd_to_dist(cmd)
+        commands = self.handle_split_cmd(cmd)
+        return [self.adapt_cmd_to_environment(c, use_sudo, on_gateway,
+                                              ssh_to_host, ssh_conn)
+                for c in commands]
+
+    def adapt_cmd_to_environment(self, cmd, use_sudo, on_gateway, ssh_to_host,
+                                 ssh_conn):
+        if self.configuration.environment["distribution"] == "Mercury":
+            use_sudo = False
+        if use_sudo and not cmd.strip().startswith("sudo "):
+            cmd = "sudo " + cmd
+        if not on_gateway and ssh_to_host \
+                and not ssh_conn.is_gateway_host(ssh_to_host):
+            cmd = self.ssh_cmd + ssh_to_host + " " + cmd
+        return cmd
+
+    def adapt_cmd_to_dist(self, cmd):
+        env_conf = self.configuration.get_env_config()
+        dist = env_conf.get('distribution')
+        dist_version = env_conf.get('distribution_version')
+        translator = CliDistTranslator(dist, dist_version=dist_version)
+        cmd = translator.translate(cmd)
+        return cmd
+
     # parse command output columns separated by whitespace
     # since headers can contain whitespace themselves,
     # it is the caller's responsibility to provide the headers
@@ -126,7 +167,8 @@ class CliAccess(BinaryConverter, Fetcher):
             content[headers[i]] = content_parts[i]
         return content
 
-    def merge_ws_spillover_lines(self, lines):
+    @staticmethod
+    def merge_ws_spillover_lines(lines):
         # with WS-separated output, extra output sometimes spills to next line
         # detect that and add to the end of the previous line for our procesing
         pending_line = None
@@ -156,7 +198,8 @@ class CliAccess(BinaryConverter, Fetcher):
     - header_regexp: regexp marking the start of the section
     - end_regexp: regexp marking the end of the section
     """
-    def get_section_lines(self, lines, header_regexp, end_regexp):
+    @staticmethod
+    def get_section_lines(lines, header_regexp, end_regexp):
         if not lines:
             return []
         header_re = re.compile(header_regexp)
@@ -196,7 +239,8 @@ class CliAccess(BinaryConverter, Fetcher):
             if 'name' not in o and 'default' in regexp_tuple:
                 o[name] = regexp_tuple['default']
 
-    def find_matching_regexps(self, o, line, regexps):
+    @staticmethod
+    def find_matching_regexps(o, line, regexps):
         for regexp_tuple in regexps:
             name = regexp_tuple['name']
             regex = regexp_tuple['re']
diff --git a/app/discover/fetchers/cli/cli_fetch_bond_host_pnics.py b/app/discover/fetchers/cli/cli_fetch_bond_host_pnics.py
new file mode 100644 (file)
index 0000000..77f149f
--- /dev/null
@@ -0,0 +1,134 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems)   #
+# and others                                                                  #
+#                                                                             #
+# All rights reserved. This program and the accompanying materials            #
+# are made available under the terms of the Apache License, Version 2.0       #
+# which accompanies this distribution, and is available at                    #
+# http://www.apache.org/licenses/LICENSE-2.0                                  #
+###############################################################################
+from collections import deque
+
+from discover.fetchers.cli.cli_access import CliAccess
+from utils.inventory_mgr import InventoryMgr
+
+
+class CliFetchBondHostPnics(CliAccess):
+    BOND_DIR = '/proc/net/bonding/'
+    SLAVE_INTERFACE_HEADER = 'Slave Interface: '
+
+    def __init__(self):
+        super().__init__()
+        self.inv = InventoryMgr()
+
+    def get(self, parent_id: str):
+        self.log.info('CliFetchBondHostPnics: checking under {}'
+                      .format(parent_id))
+        host_id = parent_id[:parent_id.rindex('-')]
+        cmd = 'ls -1 {} 2>&1'.format(self.BOND_DIR)
+        host = self.inv.get_by_id(self.get_env(), host_id)
+        if not host:
+            self.log.error('CliFetchBondHostPnics: host not found: ' + host_id)
+            return []
+        host_types = host['host_type']
+        if 'Network' not in host_types and 'Compute' not in host_types:
+            return []
+        lines = self.run_fetch_lines(cmd, host_id)
+        if lines and 'No such file or directory' in lines[0]:
+            return []  # no bonds so directory does not exist
+        bonds = []
+        for line in lines:
+            bond = self.get_bond_details(host_id, line)
+            if bond:
+                bonds.append(bond)
+        return bonds
+
+    def get_bond_details(self, host_id: str, interface_name: str) -> dict:
+        lines = self.run_fetch_lines('cat {}{}'
+                                     .format(self.BOND_DIR, interface_name),
+                                     host_id)
+        status, mac_address = \
+            self.get_bond_status_and_mac_address(host_id, interface_name)
+        interface_id = '{}-{}'.format(interface_name, mac_address)
+        interface = {
+            'host': host_id,
+            'name': interface_name,
+            'id': interface_id,
+            'local_name': interface_name,
+            'mac_address': mac_address,
+            'Link detected': 'yes' if status == 'up' else 'no',
+            'EtherChannel': True,
+            'EtherChannel Master': '',
+            'members': {}
+        }
+        # keep stack of info objects to support multi-level info
+        info_objects = deque([interface])
+        for line in [line for line in lines if line != '']:
+            if line.startswith(self.SLAVE_INTERFACE_HEADER):
+                name = line[line.index(':')+1:].strip()
+                slave = {
+                    'name': name,
+                    'EtherChannel Master': interface_id
+                }
+                # remove any pending info objects, keep only interface
+                info_objects = deque([interface])
+                info_objects.append(slave)
+                interface['members'][name] = slave
+            elif line.rstrip(':').lower().endswith('info'):
+                # move to lower level info object
+                info_name = line.rstrip(':')
+                upper_info_obj = info_objects[-1]
+                info_obj = {}
+                upper_info_obj[info_name] = info_obj
+                info_objects.append(info_obj)
+            else:
+                self.get_attribute_from_line(info_objects[-1], line)
+        for slave in list(interface['members'].values()):
+            self.set_slave_host_pnic_bond_attributes(host_id, slave,
+                                                     interface_id)
+        return interface
+
+    def get_bond_status_and_mac_address(self, host_id: str, name: str):
+        output = self.run_fetch_lines('ip link show {}'.format(name), host_id)
+        status_line = output[0]
+        status = status_line[status_line.index(' state ') + len(' state '):]
+        status = status[:status.index(' ')]
+        matches = [line.strip() for line in output if 'link/ether' in line]
+        if not matches:
+            self.log.error('Failed to find line with MAC address '
+                           'for bond {} (host: {})'
+                           .format(name, host_id))
+        tokens = matches[0].split()
+        if len(tokens) < 2:
+            self.log.error('Failed to find MAC address in line: {}'
+                           .format(matches[0]))
+        mac_address = tokens[1]
+        return status.lower(), mac_address
+
+    def get_attribute_from_line(self, obj: dict, line: str):
+        if ':' not in line:
+            self.log.error('object {}: failed to find ":" in line: {}'
+                           .format(obj['name'], line))
+            return
+        attr = line[:line.index(':')]
+        value = line[len(attr)+1:]
+        obj[attr.strip()] = value.strip()
+
+    def set_slave_host_pnic_bond_attributes(self, host, slave, interface_id):
+        pnic = self.inv.find_one({
+            'environment': self.get_env(),
+            'host': host,
+            'type': 'host_pnic',
+            'name': slave['name']
+        })
+        if not pnic:
+            self.log.error('unable to find slave pNIC {} under bond {}'
+                           .format(slave_id, interface_id))
+            return
+        mac_address = pnic['mac_address']
+        slave_id = '{}-{}'.format(slave.get('name', ''), mac_address)
+        slave['mac_address'] = mac_address
+        slave['id'] = slave_id
+        pnic['EtherChannel'] = True
+        pnic['EtherChannel Master'] = interface_id
+        self.inv.set(pnic)
index 5df4d3b..4af3ebc 100644 (file)
@@ -67,21 +67,17 @@ class CliFetchHostPnics(CliAccess):
             tokens = None
             if interface is None:
                 tokens = line.split()
-                name = tokens[0].strip('- :')
-                name = name.strip()
-                if name == interface_name:
-                    line_remainder = line.strip('-')[len(interface_name)+2:]
-                    line_remainder = line_remainder.strip(' :')
-                    id = interface_name
-                    interface = {
-                        "host": host_id,
-                        "name": id,
-                        "local_name": interface_name,
-                        "lines": []
-                    }
-                    self.handle_line(interface, line_remainder)
-                    if '<UP,' in line:
-                        status_up = True
+                line_remainder = line.strip('-')[len(interface_name)+2:]
+                line_remainder = line_remainder.strip(' :')
+                interface = {
+                    "host": host_id,
+                    "name": interface_name,
+                    "local_name": interface_name,
+                    "lines": []
+                }
+                self.handle_line(interface, line_remainder)
+                if '<UP,' in line:
+                    status_up = True
             if status_up is None:
                 if tokens is None:
                     tokens = line.split()
index 9f8173f..ae7c656 100644 (file)
@@ -31,35 +31,37 @@ class CliFetchHostVservice(CliAccess, DbAccess):
     def set_details(self, host_id, r):
         # keep the index without prefix
         id_full = r["local_service_id"].strip()
-        prefix = id_full[1:id_full.index('-')]
-        id_clean = id_full[id_full.index('-') + 1:]
-        r["service_type"] = prefix
-        name = self.get_router_name(r, id_clean) if prefix == "router" \
+        prefix = id_full[:id_full.index('-')]
+        id_clean = id_full[len(prefix)+1:]
+        r["service_type"] = prefix[1:]
+        name = self.get_router_name(r, id_clean) \
+            if r["service_type"] == "router" \
             else self.get_network_name(id_clean)
         r["name"] = prefix + "-" + name
         r["host"] = host_id
-        r["id"] = host_id + "-" + id_full
+        r["id"] = "{}-{}".format(host_id, id_full)
         self.set_agent_type(r)
 
-    def get_network_name(self, id):
+    def get_network_name(self, network_id):
         query = """
                 SELECT name
                 FROM {}.networks
                 WHERE id = %s
                 """.format(self.neutron_db)
-        results = self.get_objects_list_for_id(query, "router", id)
+        results = self.get_objects_list_for_id(query, "router", network_id)
         if not list(results):
-            return id
+            return network_id
         for db_row in results:
             return db_row["name"]
 
-    def get_router_name(self, r, id):
+    def get_router_name(self, r, router_id):
         query = """
                 SELECT *
                 FROM {}.routers
                 WHERE id = %s
                 """.format(self.neutron_db)
-        results = self.get_objects_list_for_id(query, "router", id.strip())
+        results = self.get_objects_list_for_id(query, "router",
+                                               router_id.strip())
         for db_row in results:
             r.update(db_row)
         return r["name"]
index 9b62dcb..b9496bc 100644 (file)
@@ -19,7 +19,7 @@ class CliFetchHostVservices(CliFetchHostVservice):
         if "Network" not in host["host_type"]:
             return []
         services_ids = [l[:l.index(' ')] if ' ' in l else l
-                        for l in self.run_fetch_lines("ip netns", host_id)]
+                        for l in self.run_fetch_lines("ip netns list", host_id)]
         results = [{"local_service_id": s} for s in services_ids if self.type_re.match(s)]
         for r in results:
             self.set_details(host_id, r)
index 4de1840..bb1e7fc 100644 (file)
@@ -58,7 +58,7 @@ class CliFetchInstanceVnicsBase(CliAccess):
 
     def set_vnic_properties(self, v, instance):
         v["name"] = self.get_vnic_name(v, instance)
-        v["id"] = v["name"]
+        v["id"] = "{}-{}".format(instance["host"], v["name"])
         v["vnic_type"] = "instance_vnic"
         v["host"] = instance["host"]
         v["instance_id"] = instance["id"]
index d10d99e..239ecd7 100644 (file)
@@ -42,7 +42,7 @@ class CliFetchVserviceVnics(CliAccess):
             return []
         if "Network" not in host["host_type"]:
             return []
-        lines = self.run_fetch_lines("ip netns", host_id)
+        lines = self.run_fetch_lines("ip netns list", host_id)
         ret = []
         for l in [l for l in lines
                   if l.startswith("qdhcp") or l.startswith("qrouter")]:
@@ -68,7 +68,7 @@ class CliFetchVserviceVnics(CliAccess):
                     current = None
                 else:
                     line_remainder = matches.group(2)
-                    vservice_id = host + "-" + service
+                    master_parent_id = "{}-{}".format(host, service)
                     current = {
                         "id": host + "-" + name,
                         "type": "vnic",
@@ -76,9 +76,9 @@ class CliFetchVserviceVnics(CliAccess):
                         "host": host,
                         "name": name,
                         "master_parent_type": "vservice",
-                        "master_parent_id": vservice_id,
+                        "master_parent_id": master_parent_id,
                         "parent_type": "vnics_folder",
-                        "parent_id": vservice_id + "-vnics",
+                        "parent_id": "{}-vnics".format(master_parent_id),
                         "parent_text": "vNICs",
                         "lines": []
                     }
index 49fdb5e..64d7372 100644 (file)
@@ -7,6 +7,7 @@
 # which accompanies this distribution, and is available at                    #
 # http://www.apache.org/licenses/LICENSE-2.0                                  #
 ###############################################################################
+import functools
 import mysql.connector
 
 from discover.configuration import Configuration
@@ -15,6 +16,24 @@ from discover.scan_error import ScanError
 from utils.string_utils import jsonify
 
 
+def with_cursor(method):
+    @functools.wraps(method)
+    def wrap(self, *args, **kwargs):
+        self.connect_to_db(DbAccess.query_count_per_con >= 25)
+        DbAccess.query_count_per_con += 1
+        cursor = DbAccess.conn.cursor(dictionary=True)
+        try:
+            res = method(self, *args, cursor=cursor, **kwargs)
+            DbAccess.conn.commit()
+            return res
+        except:
+            DbAccess.conn.rollback()
+            raise
+        finally:
+            cursor.close()
+    return wrap
+
+
 class DbAccess(Fetcher):
     conn = None
     query_count_per_con = 0
@@ -47,10 +66,9 @@ class DbAccess(Fetcher):
             return
         DbAccess.query_count_per_con = 0
 
-    @staticmethod
-    def get_neutron_db_name():
+    @with_cursor
+    def get_neutron_db_name(self, cursor=None):
         # check if DB schema 'neutron' exists
-        cursor = DbAccess.conn.cursor(dictionary=True)
         cursor.execute('SHOW DATABASES')
         matches = [row.get('Database', '') for row in cursor
                    if 'neutron' in row.get('Database', '')]
@@ -68,6 +86,8 @@ class DbAccess(Fetcher):
             self.log.info("DbAccess: ****** forcing reconnect, " +
                           "query count: %s ******",
                           DbAccess.query_count_per_con)
+            DbAccess.conn.commit()
+            DbAccess.conn.close()
             DbAccess.conn = None
         self.conf = self.config.get("mysql")
         cnf = self.conf
@@ -76,16 +96,15 @@ class DbAccess(Fetcher):
                         cnf["user"], cnf["pwd"],
                         cnf["schema"])
 
-    def get_objects_list_for_id(self, query, object_type, id):
-        self.connect_to_db(DbAccess.query_count_per_con >= 25)
-        DbAccess.query_count_per_con += 1
+    @with_cursor
+    def get_objects_list_for_id(self, query, object_type, object_id,
+                                cursor=None):
         self.log.debug("query count: %s, running query:\n%s\n",
                        str(DbAccess.query_count_per_con), query)
 
-        cursor = DbAccess.conn.cursor(dictionary=True)
         try:
-            if id:
-                cursor.execute(query, [str(id)])
+            if object_id:
+                cursor.execute(query, [str(object_id)])
             else:
                 cursor.execute(query)
         except (AttributeError, mysql.connector.errors.OperationalError) as e:
@@ -93,13 +112,13 @@ class DbAccess(Fetcher):
             self.connect_to_db(True)
             # try again to run the query
             cursor = DbAccess.conn.cursor(dictionary=True)
-            if id:
-                cursor.execute(query, [str(id)])
+            if object_id:
+                cursor.execute(query, [str(object_id)])
             else:
                 cursor.execute(query)
 
         rows = []
-        for row in cursor:
+        for row in cursor.fetchall():
             rows.append(row)
         return rows
 
index c323573..7d415f2 100644 (file)
@@ -27,9 +27,8 @@ class DbFetchHostNetworkAgents(DbAccess):
         host_id = id[:-1 * len("-network_agents")]
         results = self.get_objects_list_for_id(query, "network_agent", host_id)
         mechanism_drivers = self.env_config['mechanism_drivers']
-        id_prefix = mechanism_drivers[0] if mechanism_drivers else 'network_agent'
         for o in results:
             o["configurations"] = json.loads(o["configurations"])
             o["name"] = o["binary"]
-            o['id'] = id_prefix + '-' + o['id']
+            o['id'] = o['name'] + '-' + o['id']
         return results
index 3e3f4e1..f7eb8bd 100644 (file)
@@ -35,7 +35,9 @@ class DbFetchOteps(DbAccess, CliAccess, metaclass=Singleton):
         table_name = "{}.ml2_{}_endpoints".format(self.neutron_db, tunnel_type)
         env_config = self.config.get_env_config()
         distribution = env_config["distribution"]
-        if distribution == "Canonical-icehouse":
+        distribution_version = env_config["distribution_version"]
+        dist_ver = "{}-{}".format(distribution, distribution_version)
+        if dist_ver == "Canonical-icehouse":
             # for Icehouse, we only get IP address from the DB, so take the
             # host IP address and from the host data in Mongo
             host = self.inv.get_by_id(self.get_env(), host_id)
index 838ccb9..f516d10 100644 (file)
@@ -24,8 +24,8 @@ class DbFetchVedgesOvs(DbAccess, CliAccess, metaclass=Singleton):
         self.port_re = re.compile("^\s*port (\d+): ([^(]+)( \(internal\))?$")
         self.port_line_header_prefix = " " * 8 + "Port "
 
-    def get(self, id):
-        host_id = id[:id.rindex('-')]
+    def get(self, parent_id):
+        host_id = parent_id[:parent_id.rindex('-')]
         results = self.get_objects_list_for_id(
             """
               SELECT *
@@ -66,11 +66,11 @@ class DbFetchVedgesOvs(DbAccess, CliAccess, metaclass=Singleton):
             if not port_matches:
                 continue
             port = {}
-            id = port_matches.group(1)
+            port_id = port_matches.group(1)
             name = port_matches.group(2)
             is_internal = port_matches.group(3) == " (internal)"
             port["internal"] = is_internal
-            port["id"] = id
+            port["id"] = port_id
             port["name"] = name
             ports[name] = port
         return ports
@@ -106,7 +106,7 @@ class DbFetchVedgesOvs(DbAccess, CliAccess, metaclass=Singleton):
         if "tunneling_ip" not in doc["configurations"]:
             return {}
         if not doc["configurations"]["tunneling_ip"]:
-            self.get_bridge_pnic(doc)
+            self.get_pnics(doc)
             return {}
 
         # read the 'br-tun' interface ports
@@ -148,31 +148,48 @@ class DbFetchVedgesOvs(DbAccess, CliAccess, metaclass=Singleton):
             tunnel_ports[port["name"]] = port
         return tunnel_ports
 
-    def get_bridge_pnic(self, doc):
-        conf = doc["configurations"]
-        if "bridge_mappings" not in conf or not conf["bridge_mappings"]:
-            return
-        for v in conf["bridge_mappings"].values(): br = v
-        ifaces_list_lines = self.run_fetch_lines("ovs-vsctl list-ifaces " + br,
-                                                 doc["host"])
-        br_pnic_postfix = br + "--br-"
-        interface = ""
+    def get_pnics(self, vedge) -> dict:
+        bridges = vedge["configurations"].get("bridge_mappings", {})
+        pnics = {}
+        for bridge in bridges.values():
+            self.get_bridge_pnic(pnics, vedge, bridge)
+        return pnics
+
+    MIRANTIS_DIST = "Mirantis"
+
+    def get_bridge_pnic(self, pnics: dict, vedge: dict, bridge: dict):
+        cmd = "ovs-vsctl list-ifaces {}".format(bridge)
+        ifaces_list_lines = self.run_fetch_lines(cmd, vedge["host"])
+        env_config = self.configuration.get_env_config()
+        distribution = env_config.get("distribution")
+        dist_version = env_config.get("distribution_version")
+        use_br_postfix = distribution == self.MIRANTIS_DIST and \
+            dist_version in ["6.0", "7.0", "8.0"]
         for l in ifaces_list_lines:
-            if l.startswith(br_pnic_postfix):
-                interface = l[len(br_pnic_postfix):]
-                break
-        if not interface:
-            return
-        doc["pnic"] = interface
+            if use_br_postfix:
+                br_pnic_postfix = "{}--br-".format(bridge)
+                interface = l[len(br_pnic_postfix):] \
+                    if l.startswith(br_pnic_postfix) \
+                    else ""
+            else:
+                interface = l
+            if interface:
+                pnic = self.find_pnic_for_interface(vedge, interface)
+                if pnic:
+                    pnics[pnic["name"]] = pnic
+
+    def find_pnic_for_interface(self, vedge, interface):
         # add port ID to pNIC
         pnic = self.inv.find_items({
             "environment": self.get_env(),
             "type": "host_pnic",
-            "host": doc["host"],
+            "host": vedge["host"],
             "name": interface
         }, get_single=True)
         if not pnic:
             return
-        port = doc["ports"][interface]
-        pnic["port_id"] = port["id"]
+        vedge["pnic"] = interface
+        port = vedge["ports"].get(interface, {})
+        pnic["port_id"] = port.get("id", "")
         self.inv.set(pnic)
+        return pnic
index e69de29..1e85a2a 100644 (file)
@@ -0,0 +1,10 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems)   #
+# and others                                                                  #
+#                                                                             #
+# All rights reserved. This program and the accompanying materials            #
+# are made available under the terms of the Apache License, Version 2.0       #
+# which accompanies this distribution, and is available at                    #
+# http://www.apache.org/licenses/LICENSE-2.0                                  #
+###############################################################################
+
index 1f02426..94eba7b 100644 (file)
@@ -41,6 +41,18 @@ class FindLinksForPnics(FindLinks):
 
     def add_pnic_network_links(self, pnic):
         host = pnic["host"]
+        if self.configuration.get_env_config()['type_drivers'] == "vlan":
+            # take this pnic only if we can find matching vedge-pnic links
+            matches = self.inv.find({
+                "environment": self.get_env(),
+                "link_type": "vedge-host_pnic",
+                "host": host,
+                "target_id": pnic["id"]},
+                projection={"_id": 1},
+                collection="links",
+                get_single=True)
+            if not matches:
+                return
         # find ports for that host, and fetch just the network ID
         ports = self.inv.find_items({
             "environment": self.get_env(),
index edb351a..0703cd8 100644 (file)
@@ -31,7 +31,8 @@ class FindLinksForVconnectors(FindLinks):
         is_ovs = mechanism_drivers and mechanism_drivers[0] == 'OVS'
         if is_ovs:
             # interface ID for OVS
-            vnic = self.inv.get_by_id(self.get_env(), interface_name)
+            vnic_id = "{}-{}".format(vconnector["host"], interface_name)
+            vnic = self.inv.get_by_id(self.get_env(), vnic_id)
         else:
             # interface ID for VPP - match interface MAC address to vNIC MAC
             interface = vconnector['interfaces'][interface_name]
index ca9bc4a..f975c92 100644 (file)
@@ -33,11 +33,6 @@ class FindLinksForVserviceVnics(FindLinks):
         host = self.inv.get_by_id(self.get_env(), v["host"])
         if "Network" not in host["host_type"]:
             return
-        if "network" not in v:
-            return
-        network = self.inv.get_by_id(self.get_env(), v["network"])
-        if network == []:
-            return
         vservice_id = v["parent_id"]
         vservice_id = vservice_id[:vservice_id.rindex('-')]
         vservice = self.inv.get_by_id(self.get_env(), vservice_id)
@@ -46,7 +41,14 @@ class FindLinksForVserviceVnics(FindLinks):
         target = v["_id"]
         target_id = v["id"]
         link_type = "vservice-vnic"
-        link_name = network["name"]
+        extra_attributes = None
+        if "network" in v:
+            network = self.inv.get_by_id(self.get_env(), v["network"])
+            link_name = network["name"]
+            extra_attributes = {'network': v['network']}
+        else:
+            link_name = "{}-{}".format(vservice["object_name"],
+                                       v["object_name"])
         state = "up"  # TBD
         link_weight = 0  # TBD
         self.create_link(self.get_env(),
@@ -54,4 +56,4 @@ class FindLinksForVserviceVnics(FindLinks):
                          target, target_id,
                          link_type, link_name, state, link_weight,
                          host=v["host"],
-                         extra_attributes={'network': v['network']})
+                         extra_attributes=extra_attributes)
index d1323bd..1fbcc68 100644 (file)
@@ -240,7 +240,7 @@ class Scanner(Fetcher):
         run_app_path = conf.get('run_app_path', '')
         if not run_app_path:
             run_app_path = conf.get('app_path', '/etc/calipso')
-            return run_app_path
+        return run_app_path
 
     def load_scanners_metadata(self):
         parser = ScanMetadataParser(self.inv)
index 523a838..c2b8579 100644 (file)
@@ -176,6 +176,10 @@ def start_mongo(dbport, copy):
     copy_file("scheduled_scans")
     copy_file("statistics")
     copy_file("supported_environments")
+    copy_file("connection_tests")
+    copy_file("api_tokens")
+    copy_file("user_settings")
+    copy_file("apex_environment_config")
 
     # note : 'messages', 'roles', 'users' and some of the 'constants'
     # are filled by calipso-ui at runtime
@@ -216,14 +220,14 @@ def start_ldap():
                                 volumes=calipso_volume)
 
 
-def start_api():
+def start_api(apiport):
     name = "calipso-api"
     if container_started(name):
         return
     print("\nstarting container {}...\n".format(name))
     image_name = "korenlev/calipso:api"
     download_image(image_name)
-    api_ports = {'8000/tcp': 8000, '22/tcp': 40022}
+    api_ports = {'8000/tcp': apiport, '22/tcp': 40022}
     DockerClient.containers.run(image_name,
                                 detach=True,
                                 name=name,
@@ -252,15 +256,15 @@ def start_scan():
                                 volumes=calipso_volume)
 
 
-def start_sensu():
+def start_sensu(uchiwaport, sensuport, rabbitport, rabbitmport):
     name = "calipso-sensu"
     if container_started(name):
         return
     print("\nstarting container {}...\n".format(name))
     image_name = "korenlev/calipso:sensu"
     download_image(image_name)
-    sensu_ports = {'22/tcp': 20022, '3000/tcp': 3000, '4567/tcp': 4567,
-                   '5671/tcp': 5671, '15672/tcp': 15672}
+    sensu_ports = {'22/tcp': 20022, '3000/tcp': uchiwaport, '4567/tcp': sensuport,
+                   '5671/tcp': rabbitport, '15672/tcp': rabbitmport}
     DockerClient.containers.run(image_name,
                                 detach=True,
                                 name=name,
@@ -326,6 +330,36 @@ parser.add_argument("--dbport",
                     type=int,
                     default="27017",
                     required=False)
+parser.add_argument("--apiport",
+                    help="Port for the Calipso API "
+                         "(default=8000)",
+                    type=int,
+                    default="8000",
+                    required=False)
+parser.add_argument("--uchiwaport",
+                    help="Port for the Calipso Uchiwa "
+                         "(default=3000)",
+                    type=int,
+                    default="3000",
+                    required=False)
+parser.add_argument("--rabbitmport",
+                    help="Port for the Calipso Sensu RabbitMQ Managment "
+                         "(default=15672)",
+                    type=int,
+                    default="15672",
+                    required=False)
+parser.add_argument("--sensuport",
+                    help="Port for the Calipso Sensu-api "
+                         "(default=4567)",
+                    type=int,
+                    default="4567",
+                    required=False)
+parser.add_argument("--rabbitport",
+                    help="Port for the Calipso Sensu RabbitMQ "
+                         "(default=5671)",
+                    type=int,
+                    default="5671",
+                    required=False)
 parser.add_argument("--dbuser",
                     help="User for the Calipso MongoDB "
                          "(default=calipso)",
@@ -339,14 +373,14 @@ parser.add_argument("--dbpassword",
                     default="calipso_default",
                     required=False)
 parser.add_argument("--command",
-                    help="'start-all' or 'stop-all' the calipso containers "
+                    help="'start-all' or 'stop-all' the Calipso containers "
                          "(default=None)",
                     type=str,
                     default=None,
                     required=False)
 parser.add_argument("--copy",
                     help="'c' to copy json files from 'db' folder to mongoDB, 'q' to skip copy of files "
-                         "(default=q)",
+                         "(default=None)",
                     type=str,
                     default=None,
                     required=False)
@@ -382,9 +416,10 @@ if action == "start":
     calipso_mongo_access_text = \
         "server {}\n" \
         "user {}\n" \
+        "port {}\n" \
         "pwd {}\n" \
         "auth_db calipso" \
-            .format(args.hostname, args.dbuser, args.dbpassword)
+            .format(args.hostname, args.dbuser, args.dbport, args.dbpassword)
     LDAP_PWD_ATTRIBUTE = "password password"
     LDAP_USER_PWD_ATTRIBUTE = "userpassword"
     ldap_text = \
@@ -421,13 +456,13 @@ if action == "start":
         start_ldap()
         time.sleep(1)
     if container == "calipso-api" or container == "all":
-        start_api()
+        start_api(args.apiport)
         time.sleep(1)
     if container == "calipso-scan" or container == "all":
         start_scan()
         time.sleep(1)
     if container == "calipso-sensu" or container == "all":
-        start_sensu()
+        start_sensu(args.uchiwaport, args.sensuport, args.rabbitport, args.rabbitmport)
         time.sleep(1)
     if container == "calipso-ui" or container == "all":
         start_ui(args.hostname, args.dbuser, args.dbpassword, args.webport,
diff --git a/app/install/configure/setup_apex_environment.py b/app/install/configure/setup_apex_environment.py
new file mode 100644 (file)
index 0000000..7dc49c5
--- /dev/null
@@ -0,0 +1,568 @@
+#!/usr/bin/env python3
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems)   #
+# and others                                                                  #
+#                                                                             #
+# All rights reserved. This program and the accompanying materials            #
+# are made available under the terms of the Apache License, Version 2.0       #
+# which accompanies this distribution, and is available at                    #
+# http://www.apache.org/licenses/LICENSE-2.0                                  #
+###############################################################################
+from abc import ABC
+from logging.handlers import WatchedFileHandler
+import argparse
+import json
+import logging
+import re
+import shlex
+import subprocess
+import sys
+
+
+def run_command(cmd, raise_on_error=False) -> str:
+    try:
+        output = subprocess.check_output([cmd], shell=True)
+        return output.decode('utf-8')
+    except subprocess.CalledProcessError as e:
+        error_msg = 'Error running command: {}, output: {}'\
+            .format(cmd, e.output.decode('utf-8'))
+        if raise_on_error:
+            raise RuntimeError(error_msg)
+        return msg
+
+
+class Logger(ABC):
+    DEBUG = 'DEBUG'
+    INFO = 'INFO'
+    WARNING = 'WARNING'
+    ERROR = 'ERROR'
+    CRITICAL = 'CRITICAL'
+
+    PROJECT_NAME = 'Calipso'
+
+    levels = [DEBUG, INFO, WARNING, ERROR, CRITICAL]
+    log_format = '%(asctime)s %(levelname)s: %(message)s'
+    formatter = logging.Formatter(log_format)
+    default_level = INFO
+
+    def __init__(self, logger_name: str = PROJECT_NAME,
+                 level: str = default_level):
+        super().__init__()
+        self.check_level(level)
+        self.log = logging.getLogger(logger_name)
+        logging.basicConfig(format=self.log_format,
+                            level=level)
+        self.log.propagate = False
+        self.set_loglevel(level)
+        self.env = None
+        self.level = level
+
+    def set_env(self, env):
+        self.env = env
+
+    @staticmethod
+    def check_level(level):
+        if level.upper() not in Logger.levels:
+            raise ValueError('Invalid log level: {}. Supported levels: ({})'
+                             .format(level, ", ".join(Logger.levels)))
+
+    @staticmethod
+    def get_numeric_level(loglevel):
+        Logger.check_level(loglevel)
+        numeric_level = getattr(logging, loglevel.upper(), Logger.default_level)
+        if not isinstance(numeric_level, int):
+            raise ValueError('Invalid log level: {}'.format(loglevel))
+        return numeric_level
+
+    def set_loglevel(self, loglevel):
+        # assuming loglevel is bound to the string value obtained from the
+        # command line argument. Convert to upper case to allow the user to
+        # specify --log=DEBUG or --log=debug
+        numeric_level = self.get_numeric_level(loglevel)
+
+        for handler in self.log.handlers:
+            handler.setLevel(numeric_level)
+        self.log.setLevel(numeric_level)
+        self.level = loglevel
+
+    def _log(self, level, message, *args, exc_info=False, **kwargs):
+        self.log.log(level, message, *args, exc_info=exc_info, **kwargs)
+
+    def debug(self, message, *args, **kwargs):
+        self._log(logging.DEBUG, message, *args, **kwargs)
+
+    def info(self, message, *args, **kwargs):
+        self._log(logging.INFO, message, *args, **kwargs)
+
+    def warning(self, message, *args, **kwargs):
+        self._log(logging.WARNING, message, *args, **kwargs)
+
+    def warn(self, message, *args, **kwargs):
+        self.warning(message, *args, **kwargs)
+
+    def error(self, message, *args, **kwargs):
+        self._log(logging.ERROR, message, *args, **kwargs)
+
+    def exception(self, message, *args, **kwargs):
+        self._log(logging.ERROR, message, exc_info=True, *args, **kwargs)
+
+    def critical(self, message, *args, **kwargs):
+        self._log(logging.CRITICAL, message, *args, **kwargs)
+
+    def add_handler(self, handler):
+        handler_defined = handler.__class__ in map(lambda h: h.__class__,
+                                                   self.log.handlers)
+
+        if not handler_defined:
+            handler.setLevel(self.level)
+            handler.setFormatter(self.formatter)
+            self.log.addHandler(handler)
+
+
+class FileLogger(Logger):
+
+    def __init__(self, log_file: str, level: str = Logger.default_level):
+        super().__init__(logger_name="{}-File".format(self.PROJECT_NAME),
+                         level=level)
+        self.add_handler(WatchedFileHandler(log_file))
+
+
+class ApexEnvironmentFetcher:
+
+    DEFAULTS = {
+        'logfile': '/home/calipso/log/apex_environment_fetch.log',
+        'mongo_config': '/local_dir/calipso_mongo_access.conf',
+        'config_dir': '/home/calipso/Calipso/app/install/db',
+        'env': 'Apex-Euphrates',
+        'loglevel': 'INFO',
+        'git_repo': 'https://git.opnfv.org/calipso',
+        'root': False
+    }
+
+    USER_NAME = 'calipso'
+    USER_PWD = 'calipso_default'
+    REPO_LOCAL_NAME = 'Calipso'
+    INSTALLER = 'python3 app/install/calipso-installer.py --command start-all'
+    CONFIG_FILE_NAME = 'apex-configuration.conf'
+    ENV_CONFIG_FILE_NAME = 'apex_environment_config.json'
+    OVERCLOUDRC_FILE = 'overcloudrc.v3'
+    SSH_DIR = '/home/calipso/.ssh'
+    SSH_OPTIONS = '-q -o StrictHostKeyChecking=no'
+    UNDERCLOUD_KEY_FILE = 'uc-id_rsa'
+    UNDERCLOUD_PUBLIC_KEY_FILE = '{}/uc-id_rsa.pub'.format(SSH_DIR)
+    OVERCLOUD_USER = 'heat-admin'
+    OVERCLOUD_KEY_FILE = 'oc-id_rsa'
+    MOUNT_SSH_DIR = '/local_dir/.ssh'
+    OVERCLOUD_KEYSTONE_CONF = 'oc-keystone.conf'
+    OVERCLOUD_ML2_CONF = 'overcloud_ml2_conf.ini'
+    OVERCLOUD_RABBITMQ_CONF = 'overcloud_rabbitmq_conf.ini'
+
+    def __init__(self):
+        self.args = self.get_args()
+        self.log = None
+        self.config_file = '{}/{}'.format(self.args.config_dir,
+                                          self.CONFIG_FILE_NAME)
+        self.env_config_file = '{}/{}'.format(self.args.config_dir,
+                                              self.ENV_CONFIG_FILE_NAME)
+        self.undercloud_user = 'root'
+        self.undercloud_host = '192.0.2.1'
+        self.undercloud_key = '{}/{}'.format(self.SSH_DIR,
+                                             self.UNDERCLOUD_KEY_FILE)
+        self.overcloud_config_file = '{}/{}'\
+            .format(self.args.config_dir, self.OVERCLOUDRC_FILE)
+        self.overcloud_key = '{}/{}'.format(self.SSH_DIR,
+                                            self.OVERCLOUD_KEY_FILE)
+        self.overcloud_key_container = '{}/{}'.format(self.MOUNT_SSH_DIR,
+                                                      self.OVERCLOUD_KEY_FILE)
+        self.undercloud_ip = None
+        self.overcloud_ip = None
+        self.conf_lines = {}
+        self.env_config = None
+
+    def get_args(self):
+        # try to read scan plan from command line parameters
+        parser = argparse.ArgumentParser()
+        parser.add_argument('-m', '--mongo_config', nargs='?', type=str,
+                            default=self.DEFAULTS['mongo_config'],
+                            help='name of config file ' +
+                                 'with MongoDB server access details\n'
+                                 '(Default: {})'
+                                 .format(self.DEFAULTS['mongo_config']))
+        parser.add_argument('-d', '--config_dir', nargs='?', type=str,
+                            default=self.DEFAULTS['config_dir'],
+                            help='path to directory with config data\n'
+                                 '(Default: {})'
+                                 .format(self.DEFAULTS['config_dir']))
+        parser.add_argument('-a', '--apex', nargs='?', type=str,
+                            help='name of environment to Apex host')
+        parser.add_argument('-e', '--env', nargs='?', type=str,
+                            default=self.DEFAULTS['env'],
+                            help='name of environment to create'
+                                 '(Default: {})'
+                                  .format(self.DEFAULTS['env']))
+        parser.add_argument('-l', '--loglevel', nargs='?', type=str,
+                            default=self.DEFAULTS['loglevel'],
+                            help='logging level \n(default: "{}")'
+                            .format(self.DEFAULTS['loglevel']))
+        parser.add_argument('-f', '--logfile', nargs='?', type=str,
+                            default=self.DEFAULTS['logfile'],
+                            help='log file \n(default: "{}")'
+                            .format(self.DEFAULTS['logfile']))
+        parser.add_argument('-g', '--git', nargs='?', type=str,
+                            help='URL to clone Git repository\n(default: {})'
+                            .format(self.DEFAULTS['git_repo']),
+                            default=self.DEFAULTS['git_repo'])
+        parser.add_argument('--root', dest='root', action='store_true')
+        parser.add_argument('--no-root', dest='root', action='store_false')
+        parser.set_defaults(root=False)
+        return parser.parse_args()
+
+    @staticmethod
+    def run_cmd(cmd: str ='', use_sudo=True, as_user=None):
+        sudo_prefix = '' if not use_sudo \
+            else 'sudo {} '.format(as_user if as_user else '')
+        command = '{}{}'.format(sudo_prefix, cmd)
+        output = run_command(cmd=command, raise_on_error=True)
+        return output
+
+    def get_undercloud_ip(self):
+        output = self.run_cmd('ifconfig br-admin')
+        lines = output.splitlines()
+        if not lines or len(lines) < 2:
+            self.log.error('Unable to feth inet address, output: {}'
+                           .format(output))
+            return
+        inet_parts = lines[1].split()
+        inet_address = inet_parts[1]
+        return inet_address
+
+    def get_overcloud_ip(self):
+        with open('{}'.format(self.overcloud_config_file)) as rc_file:
+            lines = rc_file.readlines()
+            no_proxy_line = [l for l in lines if 'no_proxy=' in l]
+            no_proxy_line = no_proxy_line[0]
+            value = no_proxy_line[no_proxy_line.index('=')+2:]
+            parts = value.strip().split(',')
+            inet_address = parts[-1]
+            return inet_address
+
+    def set_ssh_dir(self):
+        self.run_cmd('mkdir -p {}'.format(self.SSH_DIR))
+        # will be used to access undercloud VM
+        self.run_cmd('cp /root/.ssh/id_rsa {}'.format(self.undercloud_key))
+        self.run_cmd('cp /root/.ssh/id_rsa.pub {}'
+                     .format(self.UNDERCLOUD_PUBLIC_KEY_FILE))
+        self.run_cmd('chown calipso.calipso {}/uc-id_rsa*'.format(self.SSH_DIR))
+        self.copy_undercloud_file('/home/stack/.ssh/id_rsa',
+                                  local_dir=self.SSH_DIR,
+                                  local_name=self.OVERCLOUD_KEY_FILE)
+        self.copy_undercloud_file('/home/stack/.ssh/id_rsa.pub',
+                                  local_dir=self.SSH_DIR,
+                                  local_name='oc-id_rsa.pub')
+        self.run_cmd('chown calipso.calipso {}/oc-id_rsa*'.format(self.SSH_DIR))
+
+    def copy_undercloud_file(self, file_path, local_dir=None, local_name=None):
+        cmd = 'scp {} -i {} {}@{}:{} {}/{}' \
+            .format(self.SSH_OPTIONS,
+                    self.undercloud_key,
+                    self.undercloud_user, self.undercloud_host,
+                    file_path,
+                    local_dir if local_dir else self.args.config_dir,
+                    local_name if local_name else '')
+        self.run_cmd(cmd)
+
+    def copy_undercloud_conf_file(self, file_name, local_name=None):
+        self.copy_undercloud_file('/home/stack/{}'.format(file_name),
+                                  local_name)
+
+    def get_undercloud_setup(self):
+        self.copy_undercloud_conf_file('undercloud.conf')
+        self.copy_undercloud_conf_file('opnfv-environment.yaml')
+        self.copy_undercloud_conf_file('overcloudrc')
+        self.copy_undercloud_conf_file('stackrc')
+        self.copy_undercloud_conf_file('overcloudrc.v3')
+        self.copy_undercloud_conf_file('deploy_command')
+        self.copy_undercloud_conf_file('apex-undercloud-install.log')
+        self.copy_undercloud_conf_file('undercloud-passwords.conf')
+        self.copy_undercloud_file('/etc/keystone/keystone.conf',
+                                  local_name='uc-keystone.conf')
+        self.run_cmd('mkdir -p {}/deploy_logs'.format(self.args.config_dir))
+        self.copy_undercloud_file('/home/stack/deploy_logs/*',
+                                  local_name='deploy_logs/')
+
+    def fetch_conf_file(self, file_name, target_file, lines_property=None):
+        conf = \
+            self.run_cmd('ssh -i {} {} {}@{} '
+                         'sudo grep -v "^#" {}'
+                         .format(self.overcloud_key,
+                                 self.SSH_OPTIONS,
+                                 self.OVERCLOUD_USER,
+                                 self.overcloud_ip,
+                                 file_name))
+        conf_file_path = '{}/{}'.format(self.args.config_dir, target_file)
+        if lines_property:
+            self.conf_lines[lines_property] = conf.splitlines()
+        with open(conf_file_path, 'w') as conf_file:
+            conf_file.write(conf)
+
+    def fetch_keystone_conf(self):
+        self.fetch_conf_file('/etc/keystone/keystone.conf',
+                             self.OVERCLOUD_KEYSTONE_CONF,
+                             lines_property='keystone_conf')
+
+    def fetch_ml2_conf(self):
+        self.fetch_conf_file('/etc/neutron/plugins/ml2/ml2_conf.ini',
+                             self.OVERCLOUD_ML2_CONF,
+                             lines_property='ml2_conf')
+
+    def fetch_rabbitmq_conf(self):
+        self.fetch_conf_file('/etc/rabbitmq/rabbitmq.config',
+                             self.OVERCLOUD_RABBITMQ_CONF,
+                             lines_property='rabbitmq_conf')
+
+    def copy_local_file_to_overcloud(self, local_file, remote_file_path,
+                                     local_dir=None):
+        source_dir = local_dir if local_dir else self.args.config_dir
+        local_file_path = '{}/{}'.format(source_dir, local_file)
+        cmd = 'scp {} -i {} {} {}@{}:{}' \
+            .format(self.SSH_OPTIONS,
+                    self.overcloud_key,
+                    local_file_path,
+                    self.OVERCLOUD_USER, self.overcloud_ip,
+                    remote_file_path)
+        self.run_cmd(cmd)
+
+    def get_overcloud_keys(self):
+        remote_ssh_dir = '/home/{}/.ssh'.format(self.OVERCLOUD_USER)
+        remote_private_key = '{}/id_rsa'.format(remote_ssh_dir)
+        self.copy_local_file_to_overcloud(self.OVERCLOUD_KEY_FILE,
+                                          remote_private_key,
+                                          local_dir=self.SSH_DIR)
+        public_key = '{}.pub'.format(self.OVERCLOUD_KEY_FILE)
+        remote_public_key = '{}/id_rsa.pub'.format(remote_ssh_dir)
+        self.copy_local_file_to_overcloud(public_key, remote_public_key,
+                                          local_dir=self.SSH_DIR)
+
+    def get_overcloud_setup(self):
+        self.get_overcloud_keys()
+        self.fetch_keystone_conf()
+        self.fetch_ml2_conf()
+        self.fetch_rabbitmq_conf()
+
+    def get_value_from_file(self, file_attr, attr, regex=None, separator='='):
+        line_prefix = 'export ' if separator == '=' else ''
+        prefix = '{}{}{}'.format(line_prefix, attr, separator)
+        lines = self.conf_lines.get(file_attr, {})
+        matches = [l for l in lines if l.startswith(prefix)]
+        if not matches:
+            self.log.error('failed to find attribute {}'.format(attr))
+            return ''
+        line = matches[0].strip()
+        value = line[line.index(separator)+len(separator):]
+        if not regex:
+            return value
+        matches = re.search(regex, value)
+        if not matches:
+            return ''
+        match = matches.group(1)
+        return match
+
+    def get_value_from_rc_file(self, lines, attr, regex=None):
+        return self.get_value_from_file(lines, attr, regex=regex)
+
+    def get_api_config(self):
+        with open('{}'.format(self.overcloud_config_file)) as rc_file:
+            self.conf_lines['overcloudrc'] = rc_file.readlines()
+        api_config = {
+            'name': 'OpenStack',
+            'host': self.overcloud_ip,
+            'port': self.get_value_from_rc_file('overcloudrc',
+                                                'OS_AUTH_URL',
+                                                regex=':(\d+)/'),
+            'user': self.get_value_from_rc_file('overcloudrc', 'OS_USERNAME'),
+            'pwd': self.get_value_from_rc_file('overcloudrc', 'OS_PASSWORD'),
+            'admin_token': self.get_value_from_file('keystone_conf',
+                                                    'admin_token',
+                                                    separator=' = ')
+        }
+        return api_config
+
+    def run_command_on_overcloud(self, cmd):
+        output = \
+            self.run_cmd('ssh -i {} {} {}@{} {}'
+                         .format(self.overcloud_key,
+                                 self.SSH_OPTIONS,
+                                 self.OVERCLOUD_USER,
+                                 self.overcloud_ip,
+                                 shlex.quote(cmd)))
+        return output
+
+    def create_mysql_user(self, host, pwd):
+        mysql_file_name = '/tmp/create_user.sql'
+        # create calipso MySQL user with access from jump host to all tables
+        echo_cmd = "echo \"GRANT ALL PRIVILEGES ON *.* " \
+                   "TO 'calipso'@'{}' " \
+                   "IDENTIFIED BY '{}'; " \
+                   "FLUSH PRIVILEGES;\" > {}"\
+            .format(host, pwd, mysql_file_name)
+        self.run_command_on_overcloud(echo_cmd)
+        run_mysql_cmd = 'sudo mysql < {}'.format(mysql_file_name)
+        self.run_command_on_overcloud(run_mysql_cmd)
+        remove_file_cmd = 'rm {}'.format(mysql_file_name)
+        self.run_command_on_overcloud(remove_file_cmd)
+        return pwd
+
+    def get_mysql_config(self):
+        pwd = self.run_cmd('openssl rand -base64 18').strip()
+        self.create_mysql_user(self.undercloud_ip, pwd)
+        pwd = self.create_mysql_user(self.overcloud_ip, pwd)
+        mysql_config = {
+            'name': 'mysql',
+            'host': self.overcloud_ip,
+            'port': '3306',
+            'user': 'calipso',
+            'pwd': pwd
+        }
+        return mysql_config
+
+    def get_cli_config(self):
+        return {
+            'name': 'CLI',
+            'host': self.overcloud_ip,
+            'user': self.OVERCLOUD_USER,
+            'key': self.overcloud_key_container
+        }
+
+    def get_amqp_config(self):
+        user = self.get_value_from_file('rabbitmq_conf',
+                                        '    {default_user',
+                                        separator=',',
+                                        regex='"(.+)"')
+        pwd = self.get_value_from_file('rabbitmq_conf',
+                                       '    {default_pass',
+                                       separator=',',
+                                       regex='"(.+)"')
+        port = self.get_value_from_file('rabbitmq_conf',
+                                        '    {tcp_listeners',
+                                        separator=',',
+                                        regex=', (\d+)')
+        port = int(port)
+        return {
+            'name': 'AMQP',
+            'host': self.overcloud_ip,
+            'port': port,
+            'user': user,
+            'pwd': pwd
+        }
+
+    def get_monitoring_config(self):
+        return {
+            'name': 'Monitoring',
+            'config_folder': '/local_dir/sensu_config',
+            'env_type': 'production',
+            'rabbitmq_port': '5671',
+            'rabbitmq_user': 'sensu',
+            'server_ip': self.undercloud_ip,
+            'server_name': 'sensu_server',
+            'type': 'Sensu',
+            'provision': 'None',
+            'ssh_port': '20022',
+            'ssh_user': 'root',
+            'ssh_password': 'osdna',
+            'api_port': 4567,
+            'rabbitmq_pass': 'osdna'
+        }
+
+    def prepare_env_configuration_array(self):
+        config_array = [
+            self.get_api_config(),
+            self.get_mysql_config(),
+            self.get_cli_config(),
+            self.get_amqp_config(),
+            self.get_monitoring_config()
+        ]
+        self.env_config['configuration'] = config_array
+
+    UI_USER = 'wNLeBJxNDyw8G7Ssg'
+
+    def add_env_ui_conf(self):
+        self.env_config.update({
+            'user': self.UI_USER,
+            'auth': {
+                'view-env': [self.UI_USER],
+                'edit-env': [self.UI_USER]
+            }
+        })
+
+    def get_mechanism_driver(self):
+        driver = self.get_value_from_file('ml2_conf', 'mechanism_drivers',
+                                          separator=' =')
+        return 'OVS' if driver == 'openvswitch' else driver
+
+    def set_env_level_attributes(self):
+        self.env_config.update({
+            'distribution': 'Apex',
+            'distribution_version': 'Euphrates',
+            'type_drivers': self.get_value_from_file('ml2_conf',
+                                                     'tenant_network_types',
+                                                     separator=' = '),
+            'mechanism_drivers': [self.get_mechanism_driver()],
+            "operational": "running",
+            "scanned": False,
+            "type": "environment",
+            "app_path": "/home/scan/calipso_prod/app",
+            "listen": True,
+            "enable_monitoring": True,
+            "aci_enabled": False,
+            "last_scanned": "",
+            "monitoring_setup_done": False
+        })
+
+    def prepare_env_config(self):
+        self.prepare_env_configuration_array()
+        self.set_env_level_attributes()
+        self.add_env_ui_conf()
+        config_dump = json.dumps(self.env_config, sort_keys=True, indent=4,
+                                 separators=(',', ': '))
+        with open(self.env_config_file, 'w') as config_file:
+            config_file.write(config_dump)
+
+    def setup_environment_config(self, config_file):
+        self.run_cmd('mkdir -p {}'.format(self.args.config_dir))
+        self.env_config = {'name': self.args.env}
+        self.undercloud_ip = self.get_undercloud_ip()
+        config_file.write('jumphost_admin_ip {}\n'.format(self.undercloud_ip))
+        self.set_ssh_dir()
+        self.get_undercloud_setup()
+        self.overcloud_ip = self.get_overcloud_ip()
+        config_file.write('overcloud_admin_ip {}\n'.format(self.overcloud_ip))
+        self.get_overcloud_setup()
+        # now get correct IP of overcloud from RabbitMQ setup
+        self.overcloud_ip = self.get_value_from_file('rabbitmq_conf',
+                                                     '    {tcp_listeners',
+                                                     regex='"(.*)"',
+                                                     separator=',')
+        self.prepare_env_config()
+
+    def get(self):
+        try:
+            print('Fetching Apex environment settings')
+            self.log = FileLogger(self.args.logfile)
+            self.run_cmd('mkdir -p {}'.format(self.args.config_dir))
+            with open(self.config_file, 'w') as config_file:
+                self.setup_environment_config(config_file)
+            print('Finished fetching Apex environment settings')
+            return True, 'Environment setup finished successfully'
+        except RuntimeError as e:
+            return False, str(e)
+
+
+if __name__ == '__main__':
+    fetcher = ApexEnvironmentFetcher()
+    ret, msg = fetcher.get()
+    if not ret:
+        if fetcher.log:
+            fetcher.log.error(msg)
+        else:
+            print(msg)
+    sys.exit(0 if ret else 1)
diff --git a/app/install/db/apex_environment_config.json b/app/install/db/apex_environment_config.json
new file mode 100644 (file)
index 0000000..918cd01
--- /dev/null
@@ -0,0 +1,3 @@
+{
+    "_id" : "apex_environment_config_temp_id"
+}
\ No newline at end of file
diff --git a/app/install/db/api_tokens.json b/app/install/db/api_tokens.json
new file mode 100644 (file)
index 0000000..94cc63a
--- /dev/null
@@ -0,0 +1,7 @@
+{ 
+    "token" : "ec56537a-a448-43f9-b36e-3e2bee44f018", 
+    "issued_at" : "2017-04-06T14:32:17.893797Z", 
+    "expires_at" : "2017-11-11T15:32:17.893769Z", 
+    "lifetime" : "86400", 
+    "method" : "credentials"
+}
diff --git a/app/install/db/connection_tests.json b/app/install/db/connection_tests.json
new file mode 100644 (file)
index 0000000..9965ea3
--- /dev/null
@@ -0,0 +1,156 @@
+[
+{ 
+    "environment" : "My-Environment", 
+    "test_targets" : [
+        "AMQP", 
+        "CLI", 
+        "ACI", 
+        "mysql", 
+        "OpenStack", 
+        "Monitoring"
+    ], 
+    "test_results" : {
+        "AMQP" : false, 
+        "CLI" : true, 
+        "ACI" : false, 
+        "mysql" : false, 
+        "OpenStack" : false, 
+        "Monitoring" : false
+    },
+    "targets_configuration" : 
+    [
+        {
+            "name" : "OpenStack", 
+            "host" : "1.1.1.1", 
+            "port" : "5000", 
+            "user" : "admin", 
+            "pwd" : "admin", 
+            "admin_token" : "tokentoken"
+        }, 
+        {
+            "name" : "ACI", 
+            "host" : "2.2.2.2", 
+            "user" : "admin", 
+            "pwd" : "password"
+        }, 
+        {
+            "name" : "mysql", 
+            "host" : "1.1.1.1", 
+            "port" : "3307", 
+            "user" : "root", 
+            "pwd" : "password"
+        }, 
+        {
+            "name" : "CLI", 
+            "host" : "1.1.1.7", 
+            "user" : "root", 
+            "key" : "/local_dir/ACI-id_rsa"
+        }, 
+        {
+            "name" : "AMQP", 
+            "host" : "1.1.1.1", 
+            "port" : "5673", 
+            "user" : "nova", 
+            "pwd" : "password"
+        }, 
+        {
+            "name" : "Monitoring", 
+            "config_folder" : "/local_dir/sensu_config", 
+            "env_type" : "production", 
+            "rabbitmq_port" : "5671", 
+            "rabbitmq_user" : "sensu", 
+            "server_ip" : "the_sensu_server", 
+            "server_name" : "sensu_server", 
+            "type" : "Sensu", 
+            "provision" : "None", 
+            "ssh_port" : "20022", 
+            "ssh_user" : "root", 
+            "ssh_password" : "osdna", 
+            "api_port" : 4567, 
+            "rabbitmq_pass" : "osdna"
+        }
+    ],  
+    "submit_timestamp" : "2017-05-17T07:53:09.194+0000", 
+    "response_time" : "78ms", 
+    "response_timestamp" : "2017-03-17T11:00:17.939+0000", 
+    "status" : "response", 
+    "last_response_message" : "cli says yes i am alive"
+},
+{ 
+    "environment" : "Mirantis-Liberty", 
+    "test_targets" : [
+        "AMQP", 
+        "CLI", 
+        "ACI", 
+        "mysql", 
+        "OpenStack", 
+        "Monitoring"
+    ],
+    "targets_configuration" : 
+    [
+        {
+            "name" : "OpenStack", 
+            "host" : "1.1.1.1", 
+            "port" : "5000", 
+            "user" : "admin", 
+            "pwd" : "admin", 
+            "admin_token" : "tokentoken"
+        }, 
+        {
+            "name" : "ACI", 
+            "host" : "2.2.2.2", 
+            "user" : "admin", 
+            "pwd" : "password"
+        }, 
+        {
+            "name" : "mysql", 
+            "host" : "1.1.1.1", 
+            "port" : "3307", 
+            "user" : "root", 
+            "pwd" : "password"
+        }, 
+        {
+            "name" : "CLI", 
+            "host" : "1.1.1.7", 
+            "user" : "root", 
+            "key" : "/local_dir/ACI-id_rsa"
+        }, 
+        {
+            "name" : "AMQP", 
+            "host" : "1.1.1.1", 
+            "port" : "5673", 
+            "user" : "nova", 
+            "pwd" : "password"
+        }, 
+        {
+            "name" : "Monitoring", 
+            "config_folder" : "/local_dir/sensu_config", 
+            "env_type" : "production", 
+            "rabbitmq_port" : "5671", 
+            "rabbitmq_user" : "sensu", 
+            "server_ip" : "the_sensu_server", 
+            "server_name" : "sensu_server", 
+            "type" : "Sensu", 
+            "provision" : "None", 
+            "ssh_port" : "20022", 
+            "ssh_user" : "root", 
+            "ssh_password" : "osdna", 
+            "api_port" : 4567, 
+            "rabbitmq_pass" : "osdna"
+        }
+    ],  
+    "test_results" : {
+        "AMQP" : false, 
+        "CLI" : false, 
+        "ACI" : false, 
+        "mysql" : true, 
+        "OpenStack" : false, 
+        "Monitoring" : false
+    }, 
+    "submit_timestamp" : "2017-05-17T07:53:09.194+0000", 
+    "response_time" : "56ms", 
+    "response_timestamp" : "2017-05-17T11:00:17.939+0000", 
+    "status" : "response", 
+    "last_response_message" : "mysql says yes i am alive"
+}
+]
index 2ad8921..15522b7 100644 (file)
         {
             "value" : "10239", 
             "label" : "10239"
+        },
+        {
+            "value" : "10918",
+            "label" : "10918"
         }
     ]
 },
             "value" : "switch"
         }
     ]
+},
+{
+    "name" : "configuration_targets",
+    "data" : [
+        {
+            "label" : "AMQP",
+            "value" : "AMQP"
+        },
+        {
+            "label" : "CLI",
+            "value" : "CLI"
+        },
+        {
+            "label" : "ACI",
+            "value" : "ACI"
+        },
+        {
+            "label" : "mysql",
+            "value" : "mysql"
+        },
+        {
+            "label" : "OpenStack",
+            "value" : "OpenStack"
+        },
+        {
+            "label" : "Monitoring",
+            "value" : "Monitoring"
+        }
+    ]
 }
 ]
index 93971a2..d7157e7 100644 (file)
@@ -7,14 +7,14 @@
             "name" : "OpenStack", 
             "admin_token" : "dummy_token", 
             "user" : "adminuser", 
-            "port" : 5000
+            "port" : "5000"
             "pwd" : "dummy_pwd", 
             "host" : "10.0.0.1"
         }, 
         {
             "name" : "mysql", 
             "pwd" : "dummy_pwd",
-            "port" : 3307
+            "port" : "3307"
             "user" : "mysqluser", 
             "host" : "10.0.0.1"
         }, 
         {
             "name" : "AMQP", 
             "pwd" : "dummy_pwd",
-            "port" : 5673
+            "port" : "5673"
             "user" : "rabbitmquser", 
             "host" : "10.0.0.1"
         }, 
         {
-            "rabbitmq_port" : 5671
+            "rabbitmq_port" : "5671"
             "ssh_user" : "root", 
             "server_name" : "sensu_server", 
             "env_type" : "production", 
             "provision" : "None", 
             "name" : "Monitoring", 
-            "ssh_port" : 20022
+            "ssh_port" : "20022"
             "rabbitmq_pass" : "dummy_pwd", 
             "ssh_password" : "dummy_pwd", 
             "rabbitmq_user" : "sensu", 
index 9a9ddcb..0d507c3 100644 (file)
 [
-{ 
+  {
     "environment" : {
-        "distribution" : "Mirantis", 
-        "distribution_version" : "10.0", 
-        "mechanism_drivers" : "OVS", 
-        "type_drivers" : "vlan"
-    }, 
-    "features" : {
-        "listening" : true, 
-        "scanning" : true, 
-        "monitoring" : true
-    }
-},
-{  
-    "environment" : {
-        "distribution" : "Mercury", 
-        "distribution_version" : "10239", 
-        "mechanism_drivers" : "OVS", 
-        "type_drivers" : "vlan"
-    }, 
-    "features" : {
-        "listening" : true, 
-        "scanning" : true, 
-        "monitoring" : false
-    }
-},
-{  
-    "environment" : {
-        "distribution" : "Apex", 
-        "distribution_version" : "Euphrates", 
-        "mechanism_drivers" : "OVS", 
-        "type_drivers" : "vxlan"
-    }, 
-    "features" : {
-        "listening" : true, 
-        "scanning" : true, 
-        "monitoring" : true
-    }
-},
-{  
-    "environment" : {
-        "distribution" : "Stratoscale", 
-        "distribution_version" : "2.1.6", 
-        "mechanism_drivers" : "OVS", 
-        "type_drivers" : "vlan"
-    }, 
-    "features" : {
-        "listening" : false, 
-        "scanning" : true, 
-        "monitoring" : true
-    }
-},
-{  
-    "environment" : {
-        "distribution" : "Mirantis", 
-        "distribution_version" : "6.0", 
-        "mechanism_drivers" : "OVS", 
-        "type_drivers" : "vxlan"
-    }, 
-    "features" : {
-        "listening" : true, 
-        "scanning" : true, 
-        "monitoring" : true
-    }
-},
-{  
-    "environment" : {
-        "distribution" : "Mirantis", 
-        "distribution_version" : "7.0", 
-        "mechanism_drivers" : "OVS", 
-        "type_drivers" : "vxlan"
-    }, 
-    "features" : {
-        "listening" : true, 
-        "scanning" : true, 
-        "monitoring" : true
-    }
-},
-{  
-    "environment" : {
-        "distribution" : "Mirantis", 
-        "distribution_version" : "8.0", 
-        "mechanism_drivers" : "OVS", 
-        "type_drivers" : "vxlan"
-    }, 
+      "distribution" : "Apex",
+      "distribution_version" : ["Euphrates"],
+      "mechanism_drivers" : "OVS",
+      "type_drivers" : "vxlan"
+    },
     "features" : {
-        "listening" : true, 
-        "scanning" : true, 
-        "monitoring" : true
+      "listening" : true,
+      "scanning" : true,
+      "monitoring" : true
     }
-},
-{  
+  },
+  {
     "environment" : {
-        "distribution" : "Mirantis", 
-        "distribution_version" : "9.1", 
-        "mechanism_drivers" : "OVS", 
-        "type_drivers" : "vxlan"
-    }, 
+      "distribution" : "Devstack",
+      "distribution_version" : ["Mitaka"],
+      "mechanism_drivers" : "VPP",
+      "type_drivers" : "vlan"
+    },
     "features" : {
-        "listening" : false, 
-        "scanning" : true, 
-        "monitoring" : true
+      "listening" : true,
+      "scanning" : true,
+      "monitoring" : true
     }
-},
-{  
+  },
+  {
     "environment" : {
-        "distribution" : "RDO", 
-        "distribution_version" : "Mitaka", 
-        "mechanism_drivers" : "OVS", 
-        "type_drivers" : "vxlan"
-    }, 
+      "distribution" : "Devstack",
+      "distribution_version" : ["Mitaka"],
+      "mechanism_drivers" : "VPP",
+      "type_drivers" : "vxlan"
+    },
     "features" : {
-        "listening" : true, 
-        "scanning" : true, 
-        "monitoring" : true
+      "listening" : true,
+      "scanning" : true,
+      "monitoring" : true
     }
-},
-{  
+  },
+  {
     "environment" : {
-        "distribution" : "RDO", 
-        "distribution_version" : "Liberty", 
-        "mechanism_drivers" : "OVS", 
-        "type_drivers" : "vxlan"
-    }, 
+      "distribution" : "Mercury",
+      "distribution_version" : ["10239"],
+      "mechanism_drivers" : "OVS",
+      "type_drivers" : "vlan"
+    },
     "features" : {
-        "listening" : true, 
-        "scanning" : true, 
-        "monitoring" : true
+      "listening" : true,
+      "scanning" : true,
+      "monitoring" : false
     }
-},
-{  
-    "environment" : {
-        "distribution" : "Mirantis", 
-        "distribution_version" : "9.0", 
-        "mechanism_drivers" : "OVS", 
-        "type_drivers" : "vxlan"
-    }, 
+  },
+  { 
     "features" : {
         "listening" : true, 
-        "scanning" : true, 
-        "monitoring" : true
-    }
-},
-{  
-    "environment" : {
-        "distribution" : "Mirantis", 
-        "distribution_version" : "9.0", 
-        "mechanism_drivers" : "OVS", 
-        "type_drivers" : "vlan"
+        "monitoring" : true, 
+        "scanning" : true
     }, 
-    "features" : {
-        "listening" : true, 
-        "scanning" : true, 
-        "monitoring" : true
-    }
-},
-{  
     "environment" : {
-        "distribution" : "Mirantis", 
-        "distribution_version" : "8.0", 
         "mechanism_drivers" : "OVS", 
-        "type_drivers" : "vlan"
-    }, 
-    "features" : {
-        "listening" : true, 
-        "scanning" : true, 
-        "monitoring" : true
-    }
-},
-{  
-    "environment" : {
-        "distribution" : "Mirantis", 
-        "distribution_version" : "6.0", 
-        "mechanism_drivers" : "OVS", 
-        "type_drivers" : "vlan"
-    }, 
-    "features" : {
-        "listening" : true, 
-        "scanning" : true, 
-        "monitoring" : true
-    }
-},
-{  
-    "environment" : {
-        "distribution" : "Mirantis", 
-        "distribution_version" : "7.0", 
-        "mechanism_drivers" : "OVS", 
-        "type_drivers" : "vlan"
-    }, 
-    "features" : {
-        "listening" : true, 
-        "scanning" : true, 
-        "monitoring" : true
-    }
-},
-{  
-    "environment" : {
-        "distribution" : "Mirantis", 
-        "distribution_version" : "9.1", 
-        "mechanism_drivers" : "OVS", 
-        "type_drivers" : "vlan"
-    }, 
-    "features" : {
-        "listening" : true, 
-        "scanning" : true, 
-        "monitoring" : true
-    }
-},
-{  
-    "environment" : {
-        "distribution" : "RDO", 
-        "distribution_version" : "Mitaka", 
-        "mechanism_drivers" : "VPP", 
-        "type_drivers" : "vxlan"
-    }, 
-    "features" : {
-        "listening" : true, 
-        "scanning" : true, 
-        "monitoring" : true
-    }
-},
-{  
-    "environment" : {
-        "distribution" : "RDO", 
-        "distribution_version" : "Mitaka", 
-        "mechanism_drivers" : "VPP", 
-        "type_drivers" : "vlan"
-    }, 
-    "features" : {
-        "listening" : true, 
-        "scanning" : true, 
-        "monitoring" : true
-    }
-},
-{  
-    "environment" : {
-        "distribution" : "Devstack", 
-        "distribution_version" : "Mitaka", 
-        "mechanism_drivers" : "VPP", 
-        "type_drivers" : "vlan"
-    }, 
-    "features" : {
-        "listening" : true, 
-        "scanning" : true, 
-        "monitoring" : true
-    }
-},
-{  
-    "environment" : {
-        "distribution" : "Devstack", 
-        "distribution_version" : "Mitaka", 
-        "mechanism_drivers" : "VPP", 
-        "type_drivers" : "vxlan"
-    }, 
-    "features" : {
-        "listening" : true, 
-        "scanning" : true, 
-        "monitoring" : true
-    }
-},
-{  
-    "environment" : {
-        "distribution" : "RDO", 
-        "distribution_version" : "Mitaka", 
-        "mechanism_drivers" : "OVS", 
-        "type_drivers" : "vlan"
-    }, 
-    "features" : {
-        "listening" : true, 
-        "scanning" : true, 
-        "monitoring" : true
-    }
-},
-{  
-    "environment" : {
-        "distribution" : "RDO", 
-        "distribution_version" : "Liberty", 
-        "mechanism_drivers" : "OVS", 
-        "type_drivers" : "vlan"
-    }, 
-    "features" : {
-        "listening" : true, 
-        "scanning" : true, 
-        "monitoring" : true
-    }
-}
+        "type_drivers" : "vlan", 
+        "distribution" : "Mercury", 
+        "distribution_version" : ["10918"]
+    }
+  },
+  {
+    "environment" : {
+      "distribution" : "Mirantis",
+      "distribution_version" : [
+        "6.0",
+        "7.0",
+        "8.0",
+        "9.0",
+        "9.1",
+        "10.0"
+      ],
+      "mechanism_drivers" : "OVS",
+      "type_drivers" : "vlan"
+    },
+    "features" : {
+      "listening" : true,
+      "scanning" : true,
+      "monitoring" : true
+    }
+  },
+  {
+    "environment" : {
+      "distribution" : "Mirantis",
+      "distribution_version" : [
+        "6.0",
+        "7.0",
+        "8.0",
+        "9.0",
+        "9.1",
+        "10.0"
+      ],
+      "mechanism_drivers" : "OVS",
+      "type_drivers" : "vxlan"
+    },
+    "features" : {
+      "listening" : true,
+      "scanning" : true,
+      "monitoring" : true
+    }
+  },
+  {
+    "environment" : {
+      "distribution" : "RDO",
+      "distribution_version" : [
+        "Liberty",
+        "Mitaka"
+      ],
+      "mechanism_drivers" : "OVS",
+      "type_drivers" : "vlan"
+    },
+    "features" : {
+      "listening" : true,
+      "scanning" : true,
+      "monitoring" : true
+    }
+  },
+  {
+    "environment" : {
+      "distribution" : "RDO",
+      "distribution_version" : [
+        "Liberty",
+        "Mitaka"
+      ],
+      "mechanism_drivers" : "OVS",
+      "type_drivers" : "vxlan"
+    },
+    "features" : {
+      "listening" : true,
+      "scanning" : true,
+      "monitoring" : true
+    }
+  },
+  {
+    "environment" : {
+      "distribution" : "RDO",
+      "distribution_version" : ["Mitaka"],
+      "mechanism_drivers" : "VPP",
+      "type_drivers" : "vxlan"
+    },
+    "features" : {
+      "listening" : true,
+      "scanning" : true,
+      "monitoring" : true
+    }
+  },
+  {
+    "environment" : {
+      "distribution" : "RDO",
+      "distribution_version" : ["Mitaka"],
+      "mechanism_drivers" : "VPP",
+      "type_drivers" : "vlan"
+    },
+    "features" : {
+      "listening" : true,
+      "scanning" : true,
+      "monitoring" : true
+    }
+  },
+  {
+    "environment" : {
+      "distribution" : "Stratoscale",
+      "distribution_version" : ["2.1.6"],
+      "mechanism_drivers" : "OVS",
+      "type_drivers" : "vlan"
+    },
+    "features" : {
+      "listening" : false,
+      "scanning" : true,
+      "monitoring" : true
+    }
+  }
 ]
diff --git a/app/install/db/user_settings.json b/app/install/db/user_settings.json
new file mode 100644 (file)
index 0000000..ad0e868
--- /dev/null
@@ -0,0 +1,4 @@
+{
+    "messages_view_backward_delta" : 1209600000,
+    "user_id" : "wNLeBJxNDyw8G7Ssg"
+}
index b1798f7..6a4f926 100644 (file)
@@ -1,6 +1,6 @@
 user admin
 password password
-url ldap://korlev-calipso-dev.cisco.com:389
+url ldap://your-server.cisco.com:389
 user_id_attribute CN
 user_pass_attribute userpassword
 user_objectclass inetOrgPerson
index 03c9069..e940054 100644 (file)
@@ -7,6 +7,7 @@
 # which accompanies this distribution, and is available at                    #
 # http://www.apache.org/licenses/LICENSE-2.0                                  #
 ###############################################################################
+import datetime
 from typing import Union
 
 from bson import ObjectId
@@ -26,9 +27,9 @@ class Message:
                  display_context: Union[str, ObjectId] = None,
                  level: str = DEFAULT_LEVEL,
                  object_type: str = None,
-                 ts: str = None,
-                 received_ts: str = None,
-                 finished_ts: str = None):
+                 ts: datetime = None,
+                 received_ts: datetime = None,
+                 finished_ts: datetime = None):
         super().__init__()
 
         if level and level.lower() in self.LEVELS:
index a299076..1436a46 100644 (file)
@@ -89,10 +89,9 @@ class MonitoringCheckHandler(SpecialCharConverter):
         level = error_level if error_level\
             else ERROR_LEVEL[check_result['status']]
         dt = datetime.datetime.utcfromtimestamp(check_result['executed'])
-        ts = stringify_datetime(dt)
         message = Message(msg_id=msg_id, env=self.env, source=SOURCE_SYSTEM,
                           object_id=obj_id, object_type=obj_type,
                           display_context=display_context, level=level,
-                          msg=check_result, ts=ts)
+                          msg=check_result, ts=dt)
         collection = self.inv.collections['messages']
         collection.insert_one(message.get())
index 7002ed7..6356f06 100644 (file)
@@ -72,12 +72,21 @@ class TestEnvironmentConfigs(TestBase):
                                   },
                                   expected_code=base.BAD_REQUEST_CODE)
 
+    def test_get_environment_configs_list_with_wrong_distribution_version(self):
+        self.validate_get_request(environment_configs.URL,
+                                  params={
+                                      "distribution_version":
+                                          environment_configs.WRONG_DIST_VER
+                                  },
+                                  expected_code=base.BAD_REQUEST_CODE)
+
     @patch(base.RESPONDER_BASE_READ)
     def test_get_environment_configs_list_with_distribution(self, read):
         self.validate_get_request(environment_configs.URL,
                                   params={
                                       "distribution":
-                                          environment_configs.CORRECT_DISTRIBUTION
+                                          environment_configs.
+                                          CORRECT_DISTRIBUTION
                                   },
                                   mocks={
                                       read: environment_configs.
@@ -377,11 +386,12 @@ class TestEnvironmentConfigs(TestBase):
 
     def mock_validate_env_config_with_supported_envs(self, scanning,
                                                      monitoring, listening):
-        InventoryMgr.is_feature_supported_in_env = lambda self, matches, feature: {
-            EnvironmentFeatures.SCANNING: scanning,
-            EnvironmentFeatures.MONITORING: monitoring,
-            EnvironmentFeatures.LISTENING: listening
-        }[feature]
+        InventoryMgr.is_feature_supported_in_env = \
+            lambda self, matches, feature: {
+                EnvironmentFeatures.SCANNING: scanning,
+                EnvironmentFeatures.MONITORING: monitoring,
+                EnvironmentFeatures.LISTENING: listening
+            }[feature]
 
     @patch(base.RESPONDER_BASE_WRITE)
     def test_post_environment_config(self, write):
index d320340..b99d5bb 100644 (file)
@@ -53,7 +53,9 @@ WRONG_ENV_TYPE = ""
 CORRECT_ENV_TYPE = "development"
 
 WRONG_DISTRIBUTION = "wrong-environment"
-CORRECT_DISTRIBUTION = "Mirantis-6.0"
+WRONG_DIST_VER = "wrong-environment"
+CORRECT_DISTRIBUTION = "Mirantis"
+CORRECT_DIST_VER = "6.0"
 
 WRONG_OBJECT_ID = "58a2406e6a283a8bee15d43"
 CORRECT_OBJECT_ID = "58a2406e6a283a8bee15d43f"
@@ -150,11 +152,8 @@ CONSTANTS_BY_NAMES = {
         "production"
     ],
     "distributions": [
-        "Mirantis-6.0",
-        "Mirantis-7.0",
-        "Mirantis-8.0",
-        "Mirantis-9.0",
-        "RDO-Juno"
+        "Mirantis",
+        "RDO"
     ],
     "environment_operational_status": [
         "stopped",
index a9e8885..4cea105 100644 (file)
@@ -15,7 +15,9 @@ URL = "/environment_configs"
 NAME = "Mirantis-Liberty-API"
 UNKNOWN_NAME = "UNKNOWN NAME"
 WRONG_DISTRIBUTION = base.WRONG_DISTRIBUTION
+WRONG_DIST_VER = base.WRONG_DIST_VER
 CORRECT_DISTRIBUTION = base.CORRECT_DISTRIBUTION
+CORRECT_DIST_VER = base.CORRECT_DIST_VER
 WRONG_MECHANISM_DRIVER = base.WRONG_MECHANISM_DRIVER
 CORRECT_MECHANISM_DRIVER = base.CORRECT_MECHANISM_DRIVER
 WRONG_TYPE_DRIVER = base.WRONG_TYPE_DRIVER
@@ -29,11 +31,13 @@ BOOL_LISTEN = BOOL_SCANNED = \
 
 ENV_CONFIGS = [
     {
-        "distribution": "Mirantis-8.0",
+        "distribution": "Mirantis",
+        "distribution_version": "8.0",
         "name": "Mirantis-Liberty-API"
     },
     {
-        "distribution": "Mirantis-9.0",
+        "distribution": "Mirantis",
+        "distribution_version": "9.0",
         "name": "Mirantis-Liberty"
     }
 ]
@@ -44,7 +48,8 @@ ENV_CONFIGS_RESPONSE = {
 
 ENV_CONFIGS_WITH_SPECIFIC_NAME = [
     {
-        "distribution": "Mirantis-8.0",
+        "distribution": "Mirantis",
+        "distribution_version": "8.0",
         "name": NAME
     }
 ]
@@ -52,10 +57,12 @@ ENV_CONFIGS_WITH_SPECIFIC_NAME = [
 ENV_CONFIGS_WITH_SPECIFIC_DISTRIBUTION = [
     {
         "distribution": CORRECT_DISTRIBUTION,
+        "distribution_version": CORRECT_DIST_VER,
         "name": "Mirantis-Liberty-API",
     },
     {
         "distribution": CORRECT_DISTRIBUTION,
+        "distribution_version": CORRECT_DIST_VER,
         "name": "Mirantis-Liberty"
     }
 ]
@@ -206,7 +213,8 @@ ENV_CONFIG = {
             "type": "Sensu"
         }
     ],
-    "distribution": "Mirantis-8.0",
+    "distribution": "Mirantis",
+    "distribution_version": "8.0",
     "last_scanned": "2017-03-16T11:14:54Z",
     "listen": True,
     "mechanism_drivers": [
index c126b2b..33185ec 100644 (file)
@@ -84,8 +84,12 @@ class TestBase(TestCase):
                               expected_code,
                               expected_response)
 
-    def get_updated_data(self, original_data, deleted_keys=[], updates={}):
+    def get_updated_data(self, original_data, deleted_keys=None, updates=None):
         copy_data = copy.deepcopy(original_data)
+        if deleted_keys is None:
+            deleted_keys = []
+        if updates is None:
+            updates = {}
 
         for key in deleted_keys:
             del copy_data[key]
index 04a1982..542f84e 100644 (file)
@@ -25,7 +25,7 @@ class TestInterfaceAdd(TestEvent):
     def get_by_id(self, env, object_id):
         interface = self.values["payload"]["router_interface"]
         host_id = self.values["publisher_id"].replace("network.", "", 1)
-        router_id = encode_router_id(host_id, interface['id'])
+        router_id = encode_router_id(interface['id'])
 
         if object_id == host_id:
             return HOST
index e416be4..7c3684a 100644 (file)
@@ -34,8 +34,7 @@ class TestInterfaceDelete(TestEvent):
         self.payload = self.values['payload']
         self.interface = self.payload['router_interface']
         self.port_id = self.interface['port_id']
-        self.host_id = self.values["publisher_id"].replace("network.", "", 1)
-        self.router_id = encode_router_id(self.host_id, self.interface['id'])
+        self.router_id = encode_router_id(self.interface['id'])
 
         port_delete_mock = port_delete_class_mock.return_value
         port_delete_mock.delete_port.return_value = EventResult(result=True)
index 03be8df..b450cf5 100644 (file)
@@ -45,7 +45,7 @@ class TestRouterAdd(TestEvent):
         self.router = self.payload['router']
         self.network_id = self.router['external_gateway_info']['network_id']
         self.host_id = self.values["publisher_id"].replace("network.", "", 1)
-        self.router_id = encode_router_id(self.host_id, self.router['id'])
+        self.router_id = encode_router_id(self.router['id'])
 
         self.inv.get_by_id.side_effect = self.get_by_id
 
index 390bd6e..93f44a3 100644 (file)
@@ -36,7 +36,7 @@ class TestRouterUpdate(TestEvent):
         self.payload = self.values['payload']
         self.router = self.payload['router']
         self.host_id = self.values['publisher_id'].replace("network.", "", 1)
-        self.router_id = encode_router_id(self.host_id, self.router['id'])
+        self.router_id = encode_router_id(self.router['id'])
         self.gw_port_id = ROUTER_DOCUMENT['gw_port_id']
 
         scanner_mock = scanner_class_mock.return_value
index cca43be..8e7eb5d 100644 (file)
@@ -41,7 +41,8 @@ CONFIGURATIONS = {
             "pwd": "NF2nSv3SisooxPkCTr8fbfOa"
         }
     ],
-    "distribution": "Mirantis-8.0",
+    "distribution": "Mirantis",
+    "distribution_version": "8.0",
     "last_scanned:": "5/8/16",
     "name": "Mirantis-Liberty-Xiaocong",
     "network_plugins": [
index d32e1ed..1d14450 100644 (file)
 import time
 
 from discover.fetchers.cli.cli_access import CliAccess
+from discover.configuration import Configuration
+from test.fetch.api_fetch.test_data.configurations import CONFIGURATIONS
 from test.fetch.cli_fetch.test_data.cli_access import *
 from test.fetch.test_fetch import TestFetch
-from unittest.mock import MagicMock, patch
-from utils.ssh_conn import SshConn
+from unittest.mock import MagicMock
 
 
 class TestCliAccess(TestFetch):
@@ -22,6 +23,11 @@ class TestCliAccess(TestFetch):
         super().setUp()
         self.configure_environment()
         self.cli_access = CliAccess()
+        self.conf = Configuration()
+        self.cli_access.configuration = self.conf
+        self.conf.use_env = MagicMock()
+        self.conf.environment = CONFIGURATIONS
+        self.conf.configuration = CONFIGURATIONS["configuration"]
 
     def check_run_result(self, is_gateway_host,
                          enable_cache,
@@ -40,7 +46,8 @@ class TestCliAccess(TestFetch):
         self.ssh_conn.exec.return_value = exec_result
         self.ssh_conn.is_gateway_host.return_value = is_gateway_host
         result = self.cli_access.run(COMMAND, COMPUTE_HOST_ID,
-                                     on_gateway=False, enable_cache=enable_cache)
+                                     on_gateway=False,
+                                     enable_cache=enable_cache)
         self.assertEqual(result, expected_result, err_msg)
 
         # reset the cached commands after testing
index 71efd3b..10c67e1 100644 (file)
@@ -7,19 +7,39 @@
 # which accompanies this distribution, and is available at                    #
 # http://www.apache.org/licenses/LICENSE-2.0                                  #
 ###############################################################################
+
+
+def require_open(method):
+    def wrapped(self, *args, **kwargs):
+        if self.closed:
+            raise ValueError("Cursor is closed")
+        return method(self, *args, **kwargs)
+    return wrapped
+
+
 class MockCursor:
 
     def __init__(self, result):
         self.result = result
         self.current = 0
+        self.closed = False
 
+    @require_open
     def __next__(self):
         if self.current < len(self.result):
-            next = self.result[self.current]
+            nxt = self.result[self.current]
             self.current += 1
-            return next
+            return nxt
         else:
             raise StopIteration
 
+    @require_open
     def __iter__(self):
         return self
+
+    @require_open
+    def fetchall(self):
+        return self.result
+
+    def close(self):
+        self.closed = True
index 6188ddf..b6d344c 100644 (file)
@@ -37,13 +37,13 @@ NETWORK_AGENT = [
 NETWORK_AGENT_WITH_MECHANISM_DRIVERS_IN_CONFIG_RESULTS = [
     {
         'configurations': {},
-        'id': 'OVS-1764430c-c09e-4717-86fa-c04350b1fcbb',
+        'id': 'neutron-openvswitch-agent-1764430c-c09e-4717-86fa-c04350b1fcbb',
         'binary': 'neutron-openvswitch-agent',
         'name': 'neutron-openvswitch-agent'
     },
     {
         'configurations': {},
-        'id': 'OVS-2c2ddfee-91f9-47da-bd65-aceecd998b7c',
+        'id': 'neutron-dhcp-agent-2c2ddfee-91f9-47da-bd65-aceecd998b7c',
         'binary': 'neutron-dhcp-agent',
         'name': 'neutron-dhcp-agent'
     }
@@ -52,13 +52,13 @@ NETWORK_AGENT_WITH_MECHANISM_DRIVERS_IN_CONFIG_RESULTS = [
 NETWORK_AGENT_WITHOUT_MECHANISM_DRIVERS_IN_CONFIG_RESULTS = [
     {
         'configurations': {},
-        'id': 'network_agent-1764430c-c09e-4717-86fa-c04350b1fcbb',
+        'id': 'neutron-openvswitch-agent-1764430c-c09e-4717-86fa-c04350b1fcbb',
         'binary': 'neutron-openvswitch-agent',
         'name': 'neutron-openvswitch-agent'
     },
     {
         'configurations': {},
-        'id': 'network_agent-2c2ddfee-91f9-47da-bd65-aceecd998b7c',
+        'id': 'neutron-dhcp-agent-2c2ddfee-91f9-47da-bd65-aceecd998b7c',
         'binary': 'neutron-dhcp-agent',
         'name': 'neutron-dhcp-agent'
     }
index a5bc63d..2bd1784 100644 (file)
@@ -34,10 +34,12 @@ VEDGE_WITHOUT_TUNNEL_TYPES = {
     }
 }
 NON_ICEHOUSE_CONFIGS = {
-    "distribution": "Mirantis-8.0"
+    "distribution": "Mirantis",
+    "distribution_version": "8.0"
 }
 ICEHOUSE_CONFIGS = {
-    "distribution": "Canonical-icehouse"
+    "distribution": "Canonical",
+    "distribution_version": "icehouse"
 }
 HOST = {
     "host": "node-5.cisco.com",
index 818704c..c1f9d4f 100644 (file)
@@ -166,3 +166,58 @@ DOC_TO_GET_OVERLAY = {
     "agent_type": "Open vSwitch agent",
     "configurations": {"tunneling_ip": "192.168.2.3"},
 }
+
+LIST_IFACES_LINES = [
+    "eth0",
+    "p",
+    "t"
+]
+LIST_IFACES_NAMES = LIST_IFACES_LINES
+LIST_IFACES_LINES_MIRANTIS = {
+    "eth0--br-eth0",
+    "phy-eth0"
+}
+LIST_IFACES_NAMES_MIRANTIS = ["eth0"]
+
+VEDGE_CONFIGURATIONS_MIRANTIS = {
+    "bridge_mappings": {
+        "br-prv": "eth0"
+    }
+}
+VEDGE_CONFIGURATIONS = {
+    "bridge_mappings": {
+        "physnet1": "eth0",
+        "physnet2": "p",
+        "physnet3": "t",
+        "physnet4": "p",
+        "physnet5": "p"
+    }
+}
+
+VEDGE_MIRANTIS = {
+    'host': HOST['host'],
+    'ports': {
+        "eth0": {"name": "eth0", "id": "eth0-port_id"}
+    },
+    'configurations': VEDGE_CONFIGURATIONS_MIRANTIS
+}
+VEDGE = {
+    'host': HOST['host'],
+    'ports': {
+        "eth0": {"name": "eth0", "id": "eth0-port_id"},
+        "p": {"name": "p", "id": "p-port_id"},
+        "t": {"name": "t", "id": "t-port_id"}
+    },
+    'configurations': VEDGE_CONFIGURATIONS
+}
+
+ANOTHER_DIST = "another distribution"
+
+PNICS_MIRANTS = {
+    "eth0": {"name": "eth0", "mac_address": "eth0 mac_address"}
+}
+PNICS = {
+    "eth0": {"name": "eth0", "mac_address": "eth0 mac_address"},
+    "p": {"name": "p", "mac_address": "p mac_address"},
+    "t": {"name": "t", "mac_address": "t mac_address"}
+}
index 7d29622..a161e03 100644 (file)
@@ -32,11 +32,13 @@ class TestDbFetchOteps(TestFetch):
         original_get_vconnector = self.fetcher.get_vconnector
         self.fetcher.get_vconnector = MagicMock()
         self.fetcher.inv.get_by_id = MagicMock(side_effect=[vedge, host])
+        original_get_env_config = self.fetcher.config.get_env_config
         self.fetcher.config.get_env_config = MagicMock(return_value=config)
         self.fetcher.get_objects_list_for_id = MagicMock(return_value=oteps_from_db)
         results = self.fetcher.get(VEDGE_ID)
         self.assertEqual(results, expected_results, err_msg)
         self.fetcher.get_vconnector = original_get_vconnector
+        self.fetcher.config.get_env_config = original_get_env_config
 
     def test_get(self):
         test_cases = [
index 0cfb500..9916e5d 100644 (file)
@@ -7,6 +7,8 @@
 # which accompanies this distribution, and is available at                    #
 # http://www.apache.org/licenses/LICENSE-2.0                                  #
 ###############################################################################
+import copy
+
 from discover.fetchers.db.db_fetch_vedges_ovs import DbFetchVedgesOvs
 from test.fetch.test_fetch import TestFetch
 from test.fetch.db_fetch.test_data.db_fetch_vedges_ovs import *
@@ -20,6 +22,12 @@ class TestDbFetchVedgesOvs(TestFetch):
         self.configure_environment()
         self.fetcher = DbFetchVedgesOvs()
         self.fetcher.set_env(self.env)
+        self.original_inv_set = self.fetcher.inv.set
+        self.fetcher.inv.set = MagicMock()
+
+    def tearDown(self):
+        super().tearDown()
+        self.fetcher.inv.set = self.original_inv_set
 
     def check_get_result(self,
                          objects_from_db, host,
@@ -32,7 +40,8 @@ class TestDbFetchVedgesOvs(TestFetch):
         original_fetch_ports = self.fetcher.fetch_ports
         original_get_overlay_tunnels = self.fetcher.get_overlay_tunnels
 
-        self.fetcher.get_objects_list_for_id = MagicMock(return_value=objects_from_db)
+        self.fetcher.get_objects_list_for_id = \
+            MagicMock(return_value=objects_from_db)
         self.fetcher.inv.get_by_id = MagicMock(return_value=host)
         self.fetcher.run_fetch_lines = MagicMock(return_value=vsctl_lines)
         self.fetcher.fetch_ports = MagicMock(return_value=ports)
@@ -96,7 +105,7 @@ class TestDbFetchVedgesOvs(TestFetch):
         results = self.fetcher.fetch_ports_from_dpctl(HOST['id'])
         self.fetcher.run_fetch_lines = original_run_fetch_lines
         self.assertEqual(results, DPCTL_RESULTS,
-                         "Can' t get correct ports info from dpctl lines")
+                         "Can't get correct ports info from dpctl lines")
 
     def test_fetch_port_tags_from_vsctl(self):
         ports = self.fetcher.fetch_port_tags_from_vsctl(VSCTL_LINES,
@@ -108,3 +117,80 @@ class TestDbFetchVedgesOvs(TestFetch):
         results = self.fetcher.get_overlay_tunnels(DOC_TO_GET_OVERLAY,
                                                    VSCTL_LINES)
         self.assertEqual(results, TUNNEL_PORTS)
+
+    @staticmethod
+    def get_test_pnic_for_interface_mirantis(search: dict,
+                                             get_single: bool=True):
+        if not get_single:
+            # we're only supposed to get calls with get_single == True
+            return []
+        return PNICS_MIRANTS.get(search.get('name'), {})
+
+    @staticmethod
+    def get_test_pnic_for_interface(search: dict,
+                                    get_single: bool=True):
+        if not get_single:
+            # we're only supposed to get calls with get_single == True
+            return []
+        return PNICS.get(search.get('name'), {})
+
+    @staticmethod
+    def get_expected_results_for_get_pnics(test_pnics: dict, ports: dict,
+                                           ifaces_names: list) -> dict:
+        expected_results = {}
+        for p in test_pnics.values():
+            if p.get("name") not in ifaces_names:
+                continue
+            p1 = copy.deepcopy(p)
+            name = p1["name"]
+            port = ports[name]
+            p1["port_id"] = port["id"]
+            expected_results[name] = p1
+        return expected_results
+
+    def test_get_pnics(self):
+        expected_results = \
+            self.get_expected_results_for_get_pnics(PNICS_MIRANTS,
+                                                    VEDGE_MIRANTIS["ports"],
+                                                    LIST_IFACES_NAMES_MIRANTIS)
+        self.check_get_pnics_for_dist(VEDGE_MIRANTIS,
+                                      LIST_IFACES_LINES_MIRANTIS,
+                                      LIST_IFACES_NAMES_MIRANTIS,
+                                      expected_results,
+                                      self.get_test_pnic_for_interface_mirantis,
+                                      self.fetcher.MIRANTIS_DIST,
+                                      ver="6.0",
+                                      msg="Incorrect get_pnics result "
+                                          "(Mirantis)")
+        expected_results = \
+            self.get_expected_results_for_get_pnics(PNICS,
+                                                    VEDGE["ports"],
+                                                    LIST_IFACES_NAMES)
+        self.check_get_pnics_for_dist(VEDGE,
+                                      LIST_IFACES_LINES,
+                                      LIST_IFACES_NAMES,
+                                      expected_results,
+                                      self.get_test_pnic_for_interface,
+                                      ANOTHER_DIST,
+                                      msg="Incorrect get_pnics result")
+
+    def check_get_pnics_for_dist(self, test_vedge,
+                                 ifaces_list_output, ifaces_list_clear,
+                                 expected_results,
+                                 pnic_find_func,
+                                 dist, ver=None, msg=None):
+        self.fetcher.configuration.environment = {
+            "distribution": dist,
+            "distribution_version": ver
+        }
+        original_run_fetch_lines = self.fetcher.run_fetch_lines
+        self.fetcher.run_fetch_lines = \
+            MagicMock(return_value=ifaces_list_output)
+        original_find_items = self.fetcher.inv.find_items
+        self.fetcher.inv.find_items = pnic_find_func
+        vedge = copy.deepcopy(test_vedge)
+        results = self.fetcher.get_pnics(vedge)
+        self.fetcher.run_fetch_lines = original_run_fetch_lines
+        self.fetcher.inv.find_items = original_find_items
+        self.assertTrue(vedge.get("pnic") in ifaces_list_clear)
+        self.assertEqual(results, expected_results, msg)
index 59ad649..96dbc23 100644 (file)
@@ -58,7 +58,8 @@ CONFIGURATIONS = {
             "type": "Sensu"
         }
     ],
-    "distribution": "Mirantis-8.0",
+    "distribution": "Mirantis",
+    "distribution_version": "8.0",
     "last_scanned:": "5/8/16",
     "name": "Mirantis-Liberty-Nvn",
     "mechanism_drivers": [
index 36c2033..23838aa 100644 (file)
@@ -120,7 +120,8 @@ CONFIGURATIONS = {
             "pwd": "NF2nSv3SisooxPkCTr8fbfOa"
         }
     ],
-    "distribution": "Mirantis-8.0",
+    "distribution": "Mirantis",
+    "distribution_version": "8.0",
     "last_scanned:": "5/8/16",
     "name": "Mirantis-Liberty-Nvn",
     "mechanism_drivers": [
@@ -330,7 +331,8 @@ CONFIGURATIONS_WITHOUT_MECHANISM_DRIVERS = {
             "pwd": "NF2nSv3SisooxPkCTr8fbfOa"
         }
     ],
-    "distribution": "Mirantis-8.0",
+    "distribution": "Mirantis",
+    "distribution_version": "8.0",
     "last_scanned:": "5/8/16",
     "name": "Mirantis-Liberty-Nvn",
     "operational": "yes",
diff --git a/app/test/utils/test_cli_dist_translator.py b/app/test/utils/test_cli_dist_translator.py
new file mode 100644 (file)
index 0000000..e6a8080
--- /dev/null
@@ -0,0 +1,38 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems)   #
+# and others                                                                  #
+#                                                                             #
+# All rights reserved. This program and the accompanying materials            #
+# are made available under the terms of the Apache License, Version 2.0       #
+# which accompanies this distribution, and is available at                    #
+# http://www.apache.org/licenses/LICENSE-2.0                                  #
+###############################################################################
+import unittest
+
+from utils.cli_dist_translator import CliDistTranslator
+
+
+class TestCliDistTranslator(unittest.TestCase):
+
+    MERCURY_DIST = 'Mercury'
+    MERCURY_VER = '10239'
+
+    SOURCE_TEXT = 'some text'
+    IP_LINK_TEXT = 'ip link show'
+    IP_LINK_TRANSLATED_MERCURY = \
+        'docker exec --user root ovs_vswitch_10239 ip link show'
+
+    def test_unknown_dist(self):
+        translator = CliDistTranslator('UNKNOWN')
+        result = translator.translate(self.SOURCE_TEXT)
+        self.assertEqual(result, self.SOURCE_TEXT,
+                         'unknown dist should not cause translation')
+
+    def test_mercury_dist(self):
+        translator = CliDistTranslator(self.MERCURY_DIST, self.MERCURY_VER)
+        result = translator.translate(self.SOURCE_TEXT)
+        self.assertEqual(result, self.SOURCE_TEXT,
+                         'known dist should not translate unrelated texts')
+        result = translator.translate(self.IP_LINK_TEXT)
+        self.assertEqual(result, self.IP_LINK_TRANSLATED_MERCURY,
+                         'incorrect translation of command for mercury dist')
diff --git a/app/utils/cli_dist_translator.py b/app/utils/cli_dist_translator.py
new file mode 100644 (file)
index 0000000..4073bb2
--- /dev/null
@@ -0,0 +1,59 @@
+###############################################################################
+# Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems)   #
+# and others                                                                  #
+#                                                                             #
+# All rights reserved. This program and the accompanying materials            #
+# are made available under the terms of the Apache License, Version 2.0       #
+# which accompanies this distribution, and is available at                    #
+# http://www.apache.org/licenses/LICENSE-2.0                                  #
+###############################################################################
+
+class CliDistTranslator:
+
+    DOCKER_CALL = 'docker exec --user root'
+
+    TRANSLATIONS = {
+        # special handling of cli commands in Mercury environments
+        'Mercury': {
+            'ip netns list':
+                '{docker_call} neutron_l3_agent_{version} {cmd};;;'
+                '{docker_call} neutron_dhcp_agent_{version} {cmd}',
+            'ip netns exec qdhcp': \
+                '{docker_call} neutron_dhcp_agent_{version} {cmd}',
+            'ip netns exec qrouter': \
+                '{docker_call} neutron_l3_agent_{version} {cmd}',
+            'virsh': '{docker_call} novalibvirt_{version} {cmd}',
+            'ip link': '{docker_call} ovs_vswitch_{version} {cmd}',
+            'ip -d link': '{docker_call} ovs_vswitch_{version} {cmd}',
+            'bridge fdb show': '{docker_call} ovs_vswitch_{version} {cmd}',
+            'brctl': '{docker_call} ovs_vswitch_{version} {cmd}',
+            'ovs-vsctl': '{docker_call} ovs_vswitch_{version} {cmd}',
+            'ovs-dpctl': '{docker_call} ovs_vswitch_{version} {cmd}'
+        }
+    }
+
+    def __init__(self, dist: str, dist_version: str=''):
+        self.translation = self.TRANSLATIONS.get(dist, {})
+        self.dist_version = dist_version
+
+    def translate(self, command_to_translate: str) -> str:
+        for command in self.translation.keys():
+            if command in command_to_translate:
+                return self.command_translation(command_to_translate,
+                                                command)
+        return command_to_translate
+
+    def command_translation(self, command_to_translate: str,
+                            translation_key: str) -> str:
+        cmd_translation = self.translation.get(translation_key)
+        if not cmd_translation:
+            return command_to_translate
+        translation_dict = {
+            'docker_call': self.DOCKER_CALL,
+            'version': self.dist_version,
+            'cmd': translation_key
+        }
+        cmd_translation = cmd_translation.format(**translation_dict)
+        cmd_translation = command_to_translate.replace(translation_key,
+                                                       cmd_translation)
+        return cmd_translation
index 257b0e3..77c1165 100644 (file)
@@ -77,13 +77,16 @@ class InventoryMgr(MongoAccess, metaclass=Singleton):
         self.set_collection("clique_constraints")
         self.set_collection("cliques")
         self.set_collection("monitoring_config")
-        self.set_collection("constants", use_default_name=True)
         self.set_collection("scans")
         self.set_collection("messages")
-        self.set_collection("monitoring_config_templates",
-                            use_default_name=True)
         self.set_collection("environments_config")
         self.set_collection("supported_environments")
+        self.set_collection("constants",
+                            use_default_name=True)
+        self.set_collection("monitoring_config_templates",
+                            use_default_name=True)
+        self.set_collection("api_tokens",
+                            use_default_name=True)
 
     def clear(self, scan_plan):
         if scan_plan.inventory_only:
@@ -348,9 +351,13 @@ class InventoryMgr(MongoAccess, metaclass=Singleton):
             if isinstance(env_config['mechanism_drivers'], list) \
             else env_config['mechanism_drivers']
 
-        full_env = {'environment.distribution': env_config['distribution'],
-                    'environment.type_drivers': env_config['type_drivers'],
-                    'environment.mechanism_drivers': mechanism_driver}
+        full_env = {
+            'environment.distribution': env_config['distribution'],
+            'environment.distribution_version':
+                {"$in": [env_config['distribution_version']]},
+            'environment.type_drivers': env_config['type_drivers'],
+            'environment.mechanism_drivers': mechanism_driver
+        }
         return self.is_feature_supported_in_env(full_env, feature)
 
     def is_feature_supported_in_env(self, env_def: dict,
index b69270e..ffb6f85 100644 (file)
@@ -44,10 +44,9 @@ class MongoLoggingHandler(logging.Handler):
         # make ID from current timestamp
         now = datetime.datetime.utcnow()
         d = now - datetime.datetime(1970, 1, 1)
-        ts = stringify_datetime(now)
         timestamp_id = '{}.{}.{}'.format(d.days, d.seconds, d.microseconds)
         source = self.SOURCE_SYSTEM
         message = Message(msg_id=timestamp_id, env=self.env, source=source,
-                          msg=Logger.formatter.format(record), ts=ts,
+                          msg=Logger.formatter.format(record), ts=now,
                           level=record.levelname)
         self.inv.collections['messages'].insert_one(message.get())
\ No newline at end of file
index d39794f..d4599f1 100644 (file)
@@ -86,7 +86,7 @@ class MongoAccess(DictNamingConverter):
         self.prepare_connect_uri()
         MongoAccess.client = MongoClient(
             self.connect_params["server"],
-            self.connect_params["port"]
+            int(self.connect_params["port"])
         )
         MongoAccess.db = getattr(MongoAccess.client,
                                  config_params.get('auth_db', self.DB_NAME))
index 385dea7..ae7b518 100644 (file)
@@ -147,8 +147,8 @@ def setup_args(args: dict,
     return dict(defaults, **args)
 
 
-def encode_router_id(host_id: str, uuid: str):
-    return '-'.join([host_id, 'qrouter', uuid])
+def encode_router_id(uuid: str):
+    return '-'.join(['qrouter', uuid])
 
 
 def decode_router_id(router_id: str):
index 4e1c40a..f6c271c 100644 (file)
@@ -11,6 +11,7 @@ import { SimpleSchema } from 'meteor/aldeed:simple-schema';
 import * as R from 'ramda';
 import { Roles } from 'meteor/alanning:roles';
 import { Environments } from '/imports/api/environments/environments';
+import { UserSettings } from '/imports/api/user-settings/user-settings';
 
 let userSchema = new SimpleSchema({
   _id: { type: String },
@@ -48,6 +49,12 @@ export const insert = new ValidatedMethod({
 
     addRole(viewEnvs, 'view-env', userId);
     addRole(editEnvs, 'edit-env', userId);
+    
+    let userSettings =  UserSettings.schema.clean({});
+    userSettings = R.merge(userSettings, {
+      user_id: userId,
+    });
+    UserSettings.insert(userSettings);
   }
 });
 
@@ -58,7 +65,7 @@ export const update = new ValidatedMethod({
   validate: userSchema
     .pick([
       '_id',
-     // 'password',
+      // 'password',
       'viewEnvs',
       'viewEnvs.$',
       'editEnvs',
diff --git a/ui/imports/api/connection-tests/connection-tests.js b/ui/imports/api/connection-tests/connection-tests.js
new file mode 100644 (file)
index 0000000..7acb549
--- /dev/null
@@ -0,0 +1,41 @@
+/////////////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) and others /
+//                                                                                      /
+// All rights reserved. This program and the accompanying materials                     /
+// are made available under the terms of the Apache License, Version 2.0                /
+// which accompanies this distribution, and is available at                             /
+// http://www.apache.org/licenses/LICENSE-2.0                                           /
+/////////////////////////////////////////////////////////////////////////////////////////
+import { Mongo } from 'meteor/mongo';
+import { SimpleSchema } from 'meteor/aldeed:simple-schema';
+//import * as R from 'ramda';
+
+export const ConnectionTests = new Mongo.Collection('connection_tests', { idGeneration: 'MONGO' });
+
+let simpleSchema = new SimpleSchema({
+  _id: { type: { _str: { type: String, regEx: SimpleSchema.RegEx.Id } } },
+  environment: {
+    type: String,
+  },
+  
+  test_targets: {
+    type: [String],
+  },
+
+  test_configurations: {
+    type: [Object],
+    blackbox: true
+  },
+
+  submit_timestamp: {
+    type: String,
+  },
+
+  status: {
+    type: String,
+    defaultValue: 'request'
+  }
+});
+
+ConnectionTests.schema = simpleSchema;
+ConnectionTests.attachSchema(ConnectionTests.schema);
diff --git a/ui/imports/api/connection-tests/methods.js b/ui/imports/api/connection-tests/methods.js
new file mode 100644 (file)
index 0000000..7badfb1
--- /dev/null
@@ -0,0 +1,52 @@
+/////////////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) and others /
+//                                                                                      /
+// All rights reserved. This program and the accompanying materials                     /
+// are made available under the terms of the Apache License, Version 2.0                /
+// which accompanies this distribution, and is available at                             /
+// http://www.apache.org/licenses/LICENSE-2.0                                           /
+/////////////////////////////////////////////////////////////////////////////////////////
+
+import * as R from 'ramda';
+import { ValidatedMethod } from 'meteor/mdg:validated-method';
+import { ConnectionTests } from './connection-tests';
+import { getSchemaForGroupName } from '/imports/api/environments/environments';
+
+export const insert = new ValidatedMethod({
+  name: 'connection_tests.insert',
+  validate: ConnectionTests.simpleSchema()
+    .pick([
+      'environment',
+      'test_configurations',
+      'test_configurations.$',
+    ]).validator({ clean: true, filter: false }), 
+  run({
+    environment,
+    test_configurations,
+  }) {
+    let connection_test = ConnectionTests.schema.clean({});
+
+    test_configurations = R.filter((config) => {
+      let validationContext = getSchemaForGroupName(config.name).newContext();
+      try {
+        let result = validationContext.validate(config);
+        return result;
+      } catch (_e) {
+        return false;
+      }
+    }, test_configurations);
+
+    let test_targets = R.map((config) => config.name, test_configurations);
+    let submit_timestamp = moment().format();
+
+    connection_test = R.merge(connection_test, {
+      environment,
+      test_targets,
+      test_configurations,
+      submit_timestamp
+    });
+
+    let insertResult = ConnectionTests.insert(connection_test);
+    return insertResult;
+  },
+});
diff --git a/ui/imports/api/connection-tests/server/publications.js b/ui/imports/api/connection-tests/server/publications.js
new file mode 100644 (file)
index 0000000..4a092b5
--- /dev/null
@@ -0,0 +1,21 @@
+/////////////////////////////////////////////////////////////////////////////////////////
+// Copyright (c) 2017 Koren Lev (Cisco Systems), Yaron Yogev (Cisco Systems) and others /
+//                                                                                      /
+// All rights reserved. This program and the accompanying materials                     /
+// are made available under the terms of the Apache License, Version 2.0                /
+// which accompanies this distribution, and is available at                             /
+// http://www.apache.org/licenses/LICENSE-2.0                                           /
+/////////////////////////////////////////////////////////////////////////////////////////
+import { Meteor } from 'meteor/meteor';
+import * as R from 'ramda';
+import { ConnectionTests } from '../connection-tests.js';
+
+Meteor.publish('connection_tests?_id', function (_id) {
+  console.log('server subscribtion to: connection_tests?_id');
+  console.log('-_id: ', R.toString(_id));
+
+  let query = {
+    _id: _id,
+  };
+  return ConnectionTests.find(query);
+});
index 2b27f8a..edeaaad 100644 (file)
@@ -51,7 +51,10 @@ export const MonitoringSchema = new SimpleSchema({
     defaultValue: 'sensu'
   }, 
 
-  rabbitmq_pass: { type: String },
+  rabbitmq_pass: { 
+    type: String,
+    defaultValue: 'osdna'
+  },
 
   server_ip: {
     type: String,
@@ -108,7 +111,7 @@ export const MonitoringSchema = new SimpleSchema({
 
   ssh_password: {
     type: String,
-    defaultValue: 'calipso',
+    defaultValue: 'osdna',
     optional: true
   },
 
index 22e49cf..e196a69 100644 (file)
@@ -180,7 +180,7 @@ let simpleSchema = new SimpleSchema({
   },
   distribution: {
     type: String,
-    defaultValue: 'Mirantis-8.0',
+    defaultValue: 'Mirantis',
     custom: function () {
       let that = this;
       let constsDist = Constants.findOne({ name: 'distributions' });
@@ -193,6 +193,20 @@ let simpleSchema = new SimpleSchema({
       }
     },
   },
+  distribution_version: {
+    type: String,
+    custom: function () {
+      let that = this;
+      let constsDist = Constants.findOne({ name: 'distribution_versions' });
+
+      if (R.isNil(constsDist.data)) { return 'notAllowed'; }
+      let dist_versions = constsDist.data;
+
+      if (R.isNil(R.find(R.propEq('value', that.value), dist_versions))) {
+        return 'notAllowed';
+      }
+    },
+  },
   last_scanned: {
     type: String, defaultValue: ''
   },
@@ -219,7 +233,7 @@ let simpleSchema = new SimpleSchema({
 
   mechanism_drivers: {
     type: [String],
-    defaultValue: ['ovs'],
+    defaultValue: ['OVS'],
     minCount: 1,
     custom: function () {
       let that = this;
@@ -325,7 +339,7 @@ SimpleSchema.messages({
 Environments.schema = simpleSchema;
 Environments.attachSchema(Environments.schema);
 
-function getSchemaForGroupName(groupName) {
+export function getSchemaForGroupName(groupName) {
   switch (groupName) {
   case 'mysql':
     return MysqlSchema;
@@ -440,13 +454,14 @@ function extractCalcEnvSupportedRelatedValues(schemaHelper) {
   let dbNode = getDbNode(schemaHelper);
 
   let dist = extractValue('distribution', schemaHelper, dbNode);
+  let dist_version = extractValue('distribution_version', schemaHelper, dbNode);
   let typeDrivers = extractValue('type_drivers', schemaHelper, dbNode);
   let mechDrivers = extractValue('mechanism_drivers', schemaHelper, dbNode);
   let enable_monitoring = extractValue('enable_monitoring', schemaHelper, dbNode);
   let listen = extractValue('listen', schemaHelper, dbNode);
 
-  let isMonitoringSupportedRes = isMonitoringSupported(dist, typeDrivers, mechDrivers);
-  let isListeningSupportedRes = isListeningSupported(dist, typeDrivers, mechDrivers);
+  let isMonitoringSupportedRes = isMonitoringSupported(dist, dist_version, typeDrivers, mechDrivers);
+  let isListeningSupportedRes = isListeningSupported(dist, dist_version, typeDrivers, mechDrivers);
 
   return {
     enable_monitoring,
index 6d5e73e..66d1557 100644 (file)
@@ -28,6 +28,7 @@ export const insert = new ValidatedMethod({
       'configuration', 
       'configuration.$', 
       'distribution', 
+      'distribution_version', 
       'name', 
       'type_drivers',
       'mechanism_drivers',
@@ -40,6 +41,7 @@ export const insert = new ValidatedMethod({
   run({
     configuration,
     distribution,
+    distribution_version,
     name,
     type_drivers,
     mechanism_drivers,
@@ -64,6 +66,7 @@ export const insert = new ValidatedMethod({
     environment = R.merge(environment, {
       configuration,
       distribution,
+      distribution_version,
       name,
       type_drivers,
       mechanism_drivers,
index 54d4565..4d06a33 100644 (file)
@@ -54,7 +54,6 @@ let schema = new SimpleSchema({
   },
   scheduled_timestamp: {
     type: Date,
-    defaultValue: null,
     optional: true,
   }
 });
@@ -81,3 +80,5 @@ export const subsScheduledScansPageAmountSorted = 'scheduled_scans?page&amount&s
 export const subsScheduledScansPageAmountSortedCounter = `${subsScheduledScansPageAmountSorted}!counter`;
 
 export const subsScheduledScansId = 'scheduled_scans?_id';
+
+export const subsScheduledScansEnv = 'scheduled_scans?env';
index 9359c20..f4c5b9b 100644 (file)
@@ -2,10 +2,12 @@ import { Meteor } from 'meteor/meteor';
 import * as R from 'ramda';
 import { Counts } from 'meteor/tmeasday:publish-counts';
 
-import { ScheduledScans,
+import { 
+  ScheduledScans,
   subsScheduledScansPageAmountSorted,
   subsScheduledScansPageAmountSortedCounter,
   subsScheduledScansId,
+  subsScheduledScansEnv,
 } from '../scheduled-scans.js';
 
 Meteor.publish(subsScheduledScansPageAmountSorted, function (
@@ -24,7 +26,7 @@ Meteor.publish(subsScheduledScansPageAmountSorted, function (
   let sortParams = {};
 
   sortParams = R.ifElse(R.isNil, R.always(sortParams), 
-      R.assoc(R.__, sortDirection, sortParams))(sortField);
+    R.assoc(R.__, sortDirection, sortParams))(sortField);
 
   console.log('sort params:', sortParams);
 
@@ -50,3 +52,13 @@ Meteor.publish(subsScheduledScansId, function (_id) {
   let query = { _id: _id };
   return ScheduledScans.find(query); 
 });
+
+Meteor.publish(subsScheduledScansEnv, function (env) {
+  console.log(`server subscribtion: ${subsScheduledScansEnv}`);
+  console.log('-env: ', env);
+
+  //let that = this;
+
+  let query = { environment: env };
+  return ScheduledScans.find(query); 
+});
index 55c5745..6637b74 100644 (file)
@@ -14,14 +14,21 @@ export const SupportedEnvironments = new Mongo.Collection(
 
 export const subsNameSupportedEnvs = 'supported-environments';
 
-export function isMonitoringSupported(distribution, type_drivers, mechanism_drivers) {
+export function isMonitoringSupported(
+  distribution, 
+  distribution_version, 
+  type_drivers, 
+  mechanism_drivers
+) {
   console.log('isMonitoringSupported');
   console.log(`distribution: ${R.toString(distribution)}`);
+  console.log(`distribution_version: ${R.toString(distribution_version)}`);
   console.log(`type_drivers: ${R.toString(type_drivers)}`);
   console.log(`mechanism_drivers: ${R.toString(mechanism_drivers)}`);
 
   let result = SupportedEnvironments.find({
     'environment.distribution': distribution,
+    'environment.distribution_version': { $in: [ distribution_version ] },
     'environment.type_drivers': type_drivers,
     'environment.mechanism_drivers': { $in: mechanism_drivers },
     'features.monitoring': true
@@ -31,14 +38,21 @@ export function isMonitoringSupported(distribution, type_drivers, mechanism_driv
   return result;
 }
 
-export function isListeningSupported(distribution, type_drivers, mechanism_drivers) {
+export function isListeningSupported(
+  distribution, 
+  distribution_version, 
+  type_drivers, 
+  mechanism_drivers
+) {
   console.log('isListeningSupported');
   console.log(`distribution: ${R.toString(distribution)}`);
+  console.log(`distribution: ${R.toString(distribution)}`);
   console.log(`type_drivers: ${R.toString(type_drivers)}`);
   console.log(`mechanism_drivers: ${R.toString(mechanism_drivers)}`);
 
   let result = SupportedEnvironments.find({
     'environment.distribution': distribution,
+    'environment.distribution_version': { $in: [ distribution_version ] },
     'environment.type_drivers': type_drivers,
     'environment.mechanism_drivers': { $in: mechanism_drivers },
     'features.listening': true
similarity index 77%
rename from ui/imports/api/configurations/methods.js
rename to ui/imports/api/user-settings/methods.js
index 7366e3e..e948c1e 100644 (file)
@@ -7,12 +7,12 @@
 // http://www.apache.org/licenses/LICENSE-2.0                                           /
 /////////////////////////////////////////////////////////////////////////////////////////
 import { ValidatedMethod } from 'meteor/mdg:validated-method';
-import { Configurations } from '/imports/api/configurations/configurations';
+import { UserSettings } from '/imports/api/user-settings/user-settings';
 import * as R from 'ramda';
 
 export const save = new ValidatedMethod({
-  name: 'configurations.save',
-  validate: Configurations.simpleSchema()
+  name: 'user-settings.save',
+  validate: UserSettings.simpleSchema()
     .pick([
       'messages_view_backward_delta'
     ]).validator({ clean: true, filter: false }),
@@ -21,19 +21,19 @@ export const save = new ValidatedMethod({
   }) {
 
     let userId = this.userId;
-    let conf = Configurations.findOne({ user_id: userId });
+    let userSettings = UserSettings.findOne({ user_id: userId });
 
-    if (conf) {
-      Configurations.update({ _id: conf._id}, { $set: {
+    if (userSettings) {
+      UserSettings.update({ _id: userSettings._id}, { $set: {
         messages_view_backward_delta: messages_view_backward_delta
       }});
     } else {
-      let item =  Configurations.schema.clean({});
+      let item =  UserSettings.schema.clean({});
       item = R.merge(item, {
         user_id: userId,
         messages_view_backward_delta: messages_view_backward_delta
       });
-      Configurations.insert(item);
+      UserSettings.insert(item);
     }
   }
 });
@@ -8,14 +8,14 @@
 /////////////////////////////////////////////////////////////////////////////////////////
 import { Meteor } from 'meteor/meteor';
 
-import { Configurations } from '../configurations.js';
+import { UserSettings } from '../user-settings.js';
 
-Meteor.publish('configurations?user', function () {
-  console.log('server subscribtion: configurations?user');
+Meteor.publish('user_settings?user', function () {
+  console.log('server subscribtion: user_settings?user');
 
   let userId = this.userId;
 
   let query = { user_id: userId };
   console.log('-query: ', query);
-  return Configurations.find(query); 
+  return UserSettings.find(query); 
 });
@@ -10,7 +10,7 @@ import { Mongo } from 'meteor/mongo';
 import { SimpleSchema } from 'meteor/aldeed:simple-schema';
 //import * as R from 'ramda';
 
-export const Configurations = new Mongo.Collection('configurations', { idGeneration: 'MONGO' });
+export const UserSettings = new Mongo.Collection('user_settings', { idGeneration: 'MONGO' });
 
 let schema = {
   _id: { type: { _str: { type: String, regEx: SimpleSchema.RegEx.Id } } },
@@ -25,5 +25,5 @@ let schema = {
 };
 
 let simpleSchema = new SimpleSchema(schema);
-Configurations.schema = simpleSchema;
-Configurations.attachSchema(Configurations.schema);
+UserSettings.schema = simpleSchema;
+UserSettings.attachSchema(UserSettings.schema);
index 19be6ac..d16ce00 100644 (file)
@@ -11,6 +11,18 @@ export let imagesForNodeType = {
     warning: 'ic_dns_black_48dp_2x-orange.png',
     error: 'ic_dns_black_48dp_2x-red.png',
   },
+  'host_pnic': {
+    default: 'ic_device_hub_black_24dp_2x.png',
+    ok: 'ic_device_hub_black_24dp_2x-green.png',
+    warning: 'ic_device_hub_black_24dp_2x-orange.png',
+    error: 'ic_device_hub_black_24dp_2x-red.png',
+  },
+  'switch_pnic': {
+    default: 'ic_device_hub_black_24dp_2x.png',
+    ok: 'ic_device_hub_black_24dp_2x-green.png',
+    warning: 'ic_device_hub_black_24dp_2x-orange.png',
+    error: 'ic_device_hub_black_24dp_2x-red.png',
+  },
   'vconnector': {
     default: 'ic_settings_input_composite_black_48dp_2x.png',
     ok: 'ic_settings_input_composite_black_48dp_2x-green.png',
@@ -48,6 +60,12 @@ export let imagesForNodeType = {
     warning: 'ic_keyboard_return_black_48dp_2x-orange.png',
     error: 'ic_keyboard_return_black_48dp_2x-red.png',
   },
+  'view_group-host': {
+    default: 'ic_tv_black_24dp_2x.png'
+  },
+  'view_group-switch': {
+    default: 'ic_zoom_out_map_black_24dp_2x.png'
+  },
 };
 
 export let defaultNodeTypeImage = {
index 6ac478c..6439511 100644 (file)
@@ -36,4 +36,4 @@ import '/imports/ui/components/messages-list/messages-list';
 import '/imports/ui/components/message/message';
 import '/imports/ui/components/dashboard/dashboard';
 import '/imports/ui/components/new-scanning/new-scanning';
-import '/imports/ui/components/configuration/configuration';
+import '/imports/ui/components/user-settings/user-settings';
index 89023b6..23eaf50 100644 (file)
@@ -30,3 +30,9 @@ Template.registerHelper('countOf', function (name) {
     return Counter.get(name);
   }
 });
+
+
+Template.registerHelper('jsonAsString', function (val) {
+  let str = JSON.stringify(val, null, 4);
+  return str;
+});
index 71d6887..f0be6ce 100644 (file)
@@ -54,7 +54,10 @@ import '../../api/accounts/methods';
 import '../../api/supported_environments/server/publications';
 import '../../api/supported_environments/methods';
 
-import '../../api/configurations/server/publications';
-import '../../api/configurations/methods';
+import '../../api/user-settings/server/publications';
+import '../../api/user-settings/methods';
+
+import '../../api/connection-tests/server/publications';
+import '../../api/connection-tests/methods';
 
 import '../../api/migrations/migrations';
index e59414e..b20ccac 100644 (file)
@@ -74,7 +74,7 @@
                 {{/if }}
 
                 <li class="dropdown-header">
-                  <a href="{{pathFor route='configuration' query=''}}" >Configuration</a>
+                  <a href="{{pathFor route='user-settings' query=''}}" >User Settings</a>
                 </li>
             </ul>
         </div>
index e86f8d8..e379007 100644 (file)
@@ -15,7 +15,7 @@ import { Messages } from '/imports/api/messages/messages';
 import { Roles } from 'meteor/alanning:roles';
 import { ReactiveDict } from 'meteor/reactive-dict';
 
-import { Configurations } from '/imports/api/configurations/configurations';
+import { UserSettings } from '/imports/api/user-settings/user-settings';
 
 import './alarm-icons.html';     
 
@@ -32,9 +32,9 @@ Template.alarmIcons.onCreated(function () {
   });
 
   instance.autorun(function () {
-    instance.subscribe('configurations?user');
-    Configurations.find({user_id: Meteor.userId()}).forEach((conf) => {
-      instance.state.set('msgsViewBackDelta', conf.messages_view_backward_delta); 
+    instance.subscribe('user_settings?user');
+    UserSettings.find({user_id: Meteor.userId()}).forEach((userSettings) => {
+      instance.state.set('msgsViewBackDelta', userSettings.messages_view_backward_delta); 
     });
   });
 
index fb42d70..625f8ee 100644 (file)
@@ -23,7 +23,7 @@ import { Counts } from 'meteor/tmeasday:publish-counts';
 //import { Messages } from '/imports/api/messages/messages';
 import { store } from '/imports/ui/store/store';
 import { setMainAppSelectedEnvironment } from '/imports/ui/actions/main-app.actions';
-import { Configurations } from '/imports/api/configurations/configurations';
+import { UserSettings } from '/imports/api/user-settings/user-settings';
 
 import '/imports/ui/components/messages-info-box/messages-info-box';
 import '/imports/ui/components/environment-box/environment-box';
@@ -62,9 +62,9 @@ Template.Dashboard.onCreated(function () {
   });
 
   instance.autorun(function () {
-    instance.subscribe('configurations?user');
-    Configurations.find({user_id: Meteor.userId()}).forEach((conf) => {
-      instance.state.set('msgsViewBackDelta', conf.messages_view_backward_delta); 
+    instance.subscribe('user_settings?user');
+    UserSettings.find({user_id: Meteor.userId()}).forEach((userSettings) => {
+      instance.state.set('msgsViewBackDelta', userSettings.messages_view_backward_delta); 
     });
   });
 
index f8c7221..27e2780 100644 (file)
         <div class="cl-data"><div class="">{{ node.admin_state_up }}</div></div>
       </div>
     </div>
+
+    <div class="sm-info-json">
+      <div class="cl-label">All Fields</div>
+      <div class="cl-data">{{ jsonAsString node }}</div>
+    </div>
   </div>
 </div>
 </template>
index d51b393..e49ea6e 100644 (file)
@@ -9,6 +9,7 @@
     flex: 1;
     display: flex;
     flex-flow: column nowrap;
+    overflow: auto;
 
     .sm-info-title
       color: #0a9ad7;
           color: black;
           font-weight: bold;
           
+    .sm-info-json
+      >.cl-label
+        color: black;
+        font-weight: bold;
+        
+      >.cl-data
+        white-space: pre;
index 51b7afa..dc22ac4 100644 (file)
@@ -71,7 +71,7 @@
   </div>
 
   <div class="form-group">
-    <div class="col-sm-offset-2 col-sm-10">
+    <div class="col-sm-offset-2">
       <button type="button" 
         class="mdl-button mdl-js-button mdl-button--raised mdl-js-ripple-effect mdl-button--colored toast"
         >Test connection</button>
index 7d93687..bfe8935 100644 (file)
@@ -39,7 +39,11 @@ Template.EnvAciInfo.events({
   'click .sm-next-button': function () {
     let instance = Template.instance();
     instance.data.onNextRequested(); 
-  }
+  },
+
+  'click .js-test-connection' : function (e, instance) {
+    instance.data.onTestConnection();
+  },
 });
    
 /*  
index 02b923d..e925f9e 100644 (file)
@@ -93,9 +93,9 @@
   </div>
 
   <div class="form-group">
-    <div class="col-sm-offset-2 col-sm-2">
+    <div class="col-sm-offset-2">
       <button type="button" 
-        class="mdl-button mdl-js-button mdl-button--raised mdl-js-ripple-effect mdl-button--colored toast"
+        class="mdl-button mdl-js-button mdl-button--raised mdl-js-ripple-effect mdl-button--colored toast js-test-connection"
         >Test connection</button>
     </div>
 
index 3e1522a..498a403 100644 (file)
@@ -38,7 +38,11 @@ Template.EnvAmqpCredentialsInfo.events({
   'click .sm-next-button': function () {
     let instance = Template.instance();
     instance.data.onNextRequested(); 
-  }
+  },
+
+  'click .js-test-connection' : function (e, instance) {
+    instance.data.onTestConnection();
+  },
 });
    
 /*  
index b8b7d26..f5d5df7 100644 (file)
         </div>
       </div>
 
+      <div class="form-group">
+        <label for="" 
+          class="col-sm-2 control-label"
+          >Distribution Version</label>
+
+        <div class="col-sm-3">
+          {{> SelectModel(createSelectArgs
+                values=model.distribution_version
+                key="distribution_version"
+                disabled=(isFieldDisabled 'distribution_version' disabled)
+                options=distributionVersionOptions
+                showNullOption=true)
+          }}
+        </div>
+
+        <div class="col-sm-4">
+          <p>Enter type of the distribution</p>
+        </div>
+      </div>
+
       <div class="form-group">
         <label for="" 
           class="col-sm-2 control-label"
index d866c0e..a5ed0d6 100644 (file)
@@ -75,6 +75,12 @@ Template.EnvMainInfo.helpers({
     return item.data;
   },
 
+  distributionVersionOptions: function () {
+    let item = Constants.findOne({ name: 'distribution_versions' });
+    if (R.isNil(item)) { return []; }
+    return item.data;
+  },
+
   /* depracated 
   networkOptions: function () {
     let item = Constants.findOne({ name: 'network_plugins' });
@@ -118,6 +124,10 @@ function isDisabledByField(fieldName, actionName) {
   if (R.contains(fieldName, ['name', 'distribution']) && actionName !== 'insert') {
     return true;
   }
+
+  if (R.contains(fieldName, ['name', 'distribution_version']) && actionName !== 'insert') {
+    return true;
+  }
   
   return false;
 }
index e24c016..95c52a0 100644 (file)
@@ -93,9 +93,9 @@
   </div>
 
   <div class="form-group">
-    <div class="col-sm-offset-2 col-sm-2">
+    <div class="col-sm-offset-2">
       <button type="button" 
-        class="mdl-button mdl-js-button mdl-button--raised mdl-js-ripple-effect mdl-button--colored toast"
+        class="mdl-button mdl-js-button mdl-button--raised mdl-js-ripple-effect mdl-button--colored toast js-test-connection"
         >Test connection</button>
     </div>
 
index 89b9fba..8aaefe4 100644 (file)
@@ -38,7 +38,11 @@ Template.EnvMasterHostCredentialsInfo.events({
   'click .sm-next-button': function () {
     let instance = Template.instance();
     instance.data.onNextRequested(); 
-  }
+  },
+
+  'click .js-test-connection' : function (e, instance) {
+    instance.data.onTestConnection();
+  },
 });
    
 /*  
index ee0cc04..d114ffa 100644 (file)
     </div>
   </div>
 
+  <div class="form-group">
+    <div class="col-sm-offset-2">
+      <button type="button" 
+        class="mdl-button mdl-js-button mdl-button--raised mdl-js-ripple-effect mdl-button--colored toast js-test-connection">Test connection</button>
+    </div>
+  </div>
+
 </div>
 </template>
index cbe5e47..9e15c45 100644 (file)
@@ -43,6 +43,9 @@ Template.EnvMonitoringInfo.rendered = function() {
  */
 
 Template.EnvMonitoringInfo.events({
+  'click .js-test-connection' : function (e, instance) {
+    instance.data.onTestConnection();
+  },
 });
    
 /*  
index 9b820ba..0d92bcd 100644 (file)
   </div>
 
   <div class="form-group">
-    <div class="col-sm-offset-2 col-sm-10">
+    <div class="col-sm-offset-2">
       <button type="button" 
-        class="mdl-button mdl-js-button mdl-button--raised mdl-js-ripple-effect mdl-button--colored toast"
+        class="mdl-button mdl-js-button mdl-button--raised mdl-js-ripple-effect mdl-button--colored toast js-test-connection"
         >Test connection</button>
     </div>
 
index 296379c..7a32ab9 100644 (file)
@@ -39,7 +39,11 @@ Template.EnvNfvInfo.events({
   'click .sm-next-button': function () {
     let instance = Template.instance();
     instance.data.onNextRequested(); 
-  }
+  },
+
+  'click .js-test-connection' : function (e, instance) {
+    instance.data.onTestConnection();
+  },
 });
    
 /*  
index 13967d5..f679918 100644 (file)
@@ -93,9 +93,9 @@
   </div>
 
   <div class="form-group">
-    <div class="col-sm-offset-2 col-sm-2">
+    <div class="col-sm-offset-2">
       <button type="button" 
-        class="mdl-button mdl-js-button mdl-button--raised mdl-js-ripple-effect mdl-button--colored toast"
+        class="mdl-button mdl-js-button mdl-button--raised mdl-js-ripple-effect mdl-button--colored toast js-test-connection"
         >Test connection</button>
     </div>
 
index 961e5b6..b8b80c0 100644 (file)
@@ -38,7 +38,11 @@ Template.EnvOpenStackDbCredentialsInfo.events({
   'click .sm-next-button': function () {
     let instance = Template.instance();
     instance.data.onNextRequested(); 
-  }
+  },
+
+  'click .js-test-connection' : function (e, instance) {
+    instance.data.onTestConnection();
+  },
 });
    
 /*  
index 3f35b9a..26594e4 100644 (file)
   </div>
 
   <div class="form-group">
-    <div class="col-sm-offset-2 col-sm-2">
+    <div class="col-sm-offset-2">
       <button type="button" 
-        class="mdl-button mdl-js-button mdl-button--raised mdl-js-ripple-effect mdl-button--colored toast">Test connection</button>
+        class="mdl-button mdl-js-button mdl-button--raised mdl-js-ripple-effect mdl-button--colored js-test-connection toast js-test-connection">Test connection</button>
     </div>
 
     <div class="col-sm-offset-2 col-sm-10 btn-mgt-5">
index 0f503f1..0bc640b 100644 (file)
@@ -38,7 +38,11 @@ Template.EnvOsApiEndpointInfo.events({
   'click .sm-next-button': function () {
     let instance = Template.instance();
     instance.data.onNextRequested(); 
-  }
+  },
+
+  'click .js-test-connection' : function (e, instance) {
+    instance.data.onTestConnection();
+  },
 });
    
 /*  
index 4fb6366..da0d631 100644 (file)
@@ -26,7 +26,7 @@ import { calcIconForMessageLevel, lastMessageTimestamp, calcColorClassForMessage
 import { Counts } from 'meteor/tmeasday:publish-counts';
 import { Roles } from 'meteor/alanning:roles';
 //import { idToStr } from '/imports/lib/utilities';
-import { Configurations } from '/imports/api/configurations/configurations';
+import { UserSettings } from '/imports/api/user-settings/user-settings';
 import { Counter } from 'meteor/natestrauser:publish-performant-counts';
         
 import '/imports/ui/components/data-cubic/data-cubic';
@@ -167,9 +167,9 @@ Template.EnvironmentDashboard.onCreated(function() {
   });
 
   instance.autorun(function () {
-    instance.subscribe('configurations?user');
-    Configurations.find({user_id: Meteor.userId()}).forEach((conf) => {
-      instance.state.set('msgsViewBackDelta', conf.messages_view_backward_delta); 
+    instance.subscribe('user_settings?user');
+    UserSettings.find({user_id: Meteor.userId()}).forEach((userSettings) => {
+      instance.state.set('msgsViewBackDelta', userSettings.messages_view_backward_delta); 
     });
   });
 
index ddc97cf..bc6cc55 100644 (file)
@@ -9,11 +9,12 @@
 /*
  */
 
-import { Meteor } from 'meteor/meteor';
+//import { Meteor } from 'meteor/meteor';
 import { Session } from 'meteor/session';
 import { Template } from 'meteor/templating';
 import { ReactiveDict } from 'meteor/reactive-dict';
 import * as R from 'ramda';
+import { ConnectionTests } from '/imports/api/connection-tests/connection-tests';
 
 import { Environments } from '/imports/api/environments/environments';
 import { subsNameSupportedEnvs, 
@@ -39,6 +40,8 @@ import {
   update
 } from '/imports/api/environments/methods';
 
+import { insert as insertConnectionTests } from '/imports/api/connection-tests/methods';
+
 /*
  * Lifecycles
  */
@@ -54,6 +57,7 @@ Template.EnvironmentWizard.onCreated(function(){
     isMessage: false,
     message: null,
     disabled: false,
+    connectionTestId: null
   });
 
   instance.autorun(function () {
@@ -77,19 +81,40 @@ Template.EnvironmentWizard.onCreated(function(){
     let action = instance.state.get('action');
     if (action === 'update') {
       Environments.find({'name': envName})
-      .forEach(function (envItem) {
-        instance.state.set('environmentModel', R.clone(envItem));
-      });
+        .forEach(function (envItem) {
+          instance.state.set('environmentModel', R.clone(envItem));
+        });
     } else if (action === 'insert') {
       instance.state.set('environmentModel', generateNewEnv());
     }
   });
 
+  instance.autorun(function () {
+    let connectionTestId = instance.state.get('connectionTestId');
+    if (R.isNil(connectionTestId)) { return; }
+    
+    instance.subscribe('connection_tests?_id', connectionTestId);
+    ConnectionTests.find({ _id: connectionTestId }).forEach((connTest) => {
+      if (connTest.status !== 'response') { 
+        return; 
+      }
+
+      R.mapObjIndexed((success, groupName) => {
+        if (success) {
+          toastr.success(`${groupName} connection is OK`, { timeOut: 5000 });
+        } else {
+          toastr.error(`${groupName} connection is DOWN`, { timeOut: 5000 });
+        }
+      }, connTest.test_results);
+    });
+  });
+
   instance.storeUnsubscribe = store.subscribe(() => {
     let i18n = store.getState().api.i18n;
     instance.state.set('i18n', i18n);
   });
 
+
   let i18n = store.getState().api.i18n;
   instance.state.set('i18n', i18n);
 });
@@ -135,6 +160,7 @@ Template.EnvironmentWizard.helpers({
 
     let isMonSupportedRes = isMonitoringSupported(
       environmentModel.distribution, 
+      environmentModel.distribution_version, 
       environmentModel.type_drivers,
       environmentModel.mechanism_drivers
     );
@@ -149,6 +175,7 @@ Template.EnvironmentWizard.helpers({
     
     let isListeningSupportedRes = isListeningSupported(
       environmentModel.distribution, 
+      environmentModel.distribution_version, 
       environmentModel.type_drivers,
       environmentModel.mechanism_drivers
     );
@@ -171,6 +198,28 @@ Template.EnvironmentWizard.helpers({
         isMonitoringDisabled: isMonitoringDisabled,
         setModel: function (newModel) {
           Session.set('isDirty', true);
+
+          if (newModel.aci_enabled) {
+            let monitoringGroup = getGroupInArray('Monitoring', newModel.configuration);
+            newModel = setConfigurationGroup('Monitoring', monitoringGroup, newModel);
+          } else {
+            newModel = removeConfigurationGroup('Monitoring', newModel);
+          }
+
+          if (newModel.enable_monitoring) {
+            let monitoringGroup = getGroupInArray('ACI', newModel.configuration);
+            newModel = setConfigurationGroup('ACI', monitoringGroup, newModel);
+          } else {
+            newModel = removeConfigurationGroup('ACI', newModel);
+          }
+
+          if (newModel.listen) {
+            let monitoringGroup = getGroupInArray('AMQP', newModel.configuration);
+            newModel = setConfigurationGroup('AMQP', monitoringGroup, newModel);
+          } else {
+            newModel = removeConfigurationGroup('AMQP', newModel);
+          }
+
           instance.state.set('environmentModel', newModel);
         },
         onNextRequested: activateNextTab.bind(null, 'endpoint-panel'),
@@ -192,6 +241,9 @@ Template.EnvironmentWizard.helpers({
         },
         onNextRequested: activateNextTab.bind(null, 'db-credentials'),
         action: action,
+        onTestConnection: function () {
+          testConnection(instance);
+        },
       }
     }, {
       label: 'OS DB Credentials',
@@ -209,6 +261,9 @@ Template.EnvironmentWizard.helpers({
         },
         onNextRequested: activateNextTab.bind(null, 'master-host'),
         action: action,
+        onTestConnection: function () {
+          testConnection(instance);
+        },
       }
     }, {
       label: 'Master Host Credentials',
@@ -226,6 +281,9 @@ Template.EnvironmentWizard.helpers({
         },
         onNextRequested: activateNextTab.bind(null, 'amqp'),
         action: action,
+        onTestConnection: function () {
+          testConnection(instance);
+        },
       }
     }, {
       label: 'AMQP Credentials',
@@ -243,6 +301,9 @@ Template.EnvironmentWizard.helpers({
         },
         onNextRequested: activateNextTab.bind(null, 'aci'),
         action: action,
+        onTestConnection: function () {
+          testConnection(instance);
+        },
       }
     }, 
     /*  {
@@ -279,6 +340,9 @@ Template.EnvironmentWizard.helpers({
         },
         onNextRequested: activateNextTab.bind(null, 'monitoringInfo'),
         action: action,
+        onTestConnection: function () {
+          testConnection(instance);
+        },
       }
     }, {
       label: 'Monitoring',
@@ -296,6 +360,9 @@ Template.EnvironmentWizard.helpers({
           instance.state.set('environmentModel', newModel);
         },
         action: action,
+        onTestConnection: function () {
+          testConnection(instance);
+        },
       }
     }];
   },
@@ -327,14 +394,18 @@ Template.EnvironmentWizard.helpers({
  */
 
 Template.EnvironmentWizard.events({
+  /*
   'click .toast' : function () {
     toastr.success('Have fun storming the castle!', 'Open Stack server says');
   },
+  */
 
   // todo: research: seems not implemented
+  /*
   'click .fa-trash' : function () {
     Meteor.call('deleteRecipe', this._id);
   },
+  */
 
   'click .sm-submit-button': function () {
     let instance = Template.instance();
@@ -391,6 +462,36 @@ function processActionResult(instance, error) {
   }
 }
 
+function processInsertTestConnnectionResult(instance, error, itemId) {
+  if (error) {
+    instance.state.set('isError', true);
+    instance.state.set('isSuccess', false);
+    instance.state.set('isMessage', true);  
+
+    if (typeof error === 'string') {
+      instance.state.set('message', error);
+    } else {
+      let message = error.message;
+      if (error.errors) {
+        message = R.reduce((acc, errorItem) => {
+          return acc + '\n- ' + errorItem.name;
+        }, message, error.errors);
+      }
+      instance.state.set('message', message);
+    }
+
+    return;
+  } 
+
+  instance.state.set('connectionTestId', itemId);
+
+  instance.state.set('isError', false);
+  instance.state.set('isSuccess', true);
+  instance.state.set('isMessage', true);  
+
+  instance.state.set('message', 'Connection send to be tested');
+}
+
 function getGroupInArray(groupName, array) {
   let group = R.find(R.propEq('name', groupName), array);
   return group ? group : createNewConfGroup(groupName);
@@ -407,6 +508,12 @@ function setConfigurationGroup(groupName, group, model) {
   return newModel;
 }
 
+function removeConfigurationGroup(groupName, model) {
+  let newConfiguration = removeGroupInArray(groupName, model.configuration);
+  let newModel = R.assoc('configuration', newConfiguration, model);
+  return newModel;
+}
+
 function doSubmit(instance) {
   let action = instance.state.get('action');
   let environment = instance.state.get(
@@ -422,6 +529,7 @@ function doSubmit(instance) {
     insert.call({
       configuration: environment.configuration,
       distribution: environment.distribution,
+      distribution_version: environment.distribution_version,
       name: environment.name,
       type_drivers: environment.type_drivers,
       mechanism_drivers: environment.mechanism_drivers,
@@ -450,3 +558,11 @@ function doSubmit(instance) {
     break;
   }
 }
+
+function testConnection(instance) {
+  let environmentModel = instance.state.get('environmentModel');
+  insertConnectionTests.call({
+    environment: environmentModel.name,
+    test_configurations: environmentModel.configuration,
+  }, processInsertTestConnnectionResult.bind(null, instance));
+}
index 96bc48c..e12921b 100644 (file)
 
         <div class="sm-main-content-segment">
 
-          {{#if selectedNodeType }}
-            {{#if (getShow 'dashboard') }}
-              <div class="sm-dashboard">
-                {{> UI.dynamic template=dashboardTemplate 
-                               data=(argsDashboard rdxSelectedNodeId) }}
-              </div>
-            {{/if }}
-            {{#if (getShow 'graph') }}
-              {{#if isSelectedNodeAGraph }}
+          {{#if isLoading }}
+
+            <div class="sm-loading-message"></div>
 
-                <!--div class="sm-graph">
-                  > d3graph argsD3Graph
-                </div-->
+          {{else }}
 
-                <div class="sm-network-graph">
-                  {{> NetworkGraphManager argsNetworkGraphManager }}
+            {{#if selectedNodeType }}
+              {{#if (getShow 'dashboard') }}
+                <div class="sm-dashboard">
+                  {{> UI.dynamic template=dashboardTemplate 
+                                 data=(argsDashboard rdxSelectedNodeId) }}
                 </div>
+              {{/if }}
+              {{#if (getShow 'graph') }}
+                {{#if isSelectedNodeAGraph }}
 
-                {{> GraphTooltipWindow (argsGraphTooltipWindow graphTooltipWindow) }}
+                  <!--div class="sm-graph">
+                    > d3graph argsD3Graph
+                  </div-->
 
-                {{#if showVedgeInfoWindow }}
-                  {{> VedgeInfoWindow (argsVedgeInfoWindow vedgeInfoWindow) }}
-                {{/if }}
-              
-              {{else }}
+                  <div class="sm-network-graph">
+                    {{> NetworkGraphManager argsNetworkGraphManager }}
+                  </div>
 
-              <div class="sm-node-no-graph-data-msg">{{ rPath i18n 'components.environment.noGraphForLeafMsg' }}</div>
+                  {{> GraphTooltipWindow (argsGraphTooltipWindow graphTooltipWindow) }}
 
+                  {{#if showVedgeInfoWindow }}
+                    {{> VedgeInfoWindow (argsVedgeInfoWindow vedgeInfoWindow) }}
+                  {{/if }}
+                
+                {{else }}
+
+                <div class="sm-node-no-graph-data-msg">{{ rPath i18n 'components.environment.noGraphForLeafMsg' }}</div>
+
+                {{/if }}
               {{/if }}
             {{/if }}
-          {{/if }}
+          {{/if }} <!-- if isLoading -->
         </div>
 
     </div>
index 1f0e723..9df6046 100644 (file)
@@ -96,6 +96,7 @@ Template.Environment.onCreated(function () {
     vedgeInfoWindow: { node: null, left: 0, top: 0, show: false },
     dashboardName: 'environment',
     collapsedSideMenu: instance.collapsedSideMenu,
+    isLoading: false,
   });
 
   instance.currentData = new ReactiveVar(null, EJSON.equals);
@@ -226,27 +227,44 @@ Template.Environment.onCreated(function () {
     
   });
 
-    /*
-    (() => {
-      if (R.isNil(controller.params.query.selectedNodeId) &&
-          R.isNil(selectedNodeId)) {
-        return;
-      }
+  /*
+  (() => {
+    if (R.isNil(controller.params.query.selectedNodeId) &&
+        R.isNil(selectedNodeId)) {
+      return;
+    }
 
-      let srlSelectedNodeId = idToStr(selectedNodeId);
-      if (R.equals(controller.params.query.selectedNodeId, srlSelectedNodeId)) {
-        return;
-      }
+    let srlSelectedNodeId = idToStr(selectedNodeId);
+    if (R.equals(controller.params.query.selectedNodeId, srlSelectedNodeId)) {
+      return;
+    }
 
-      setTimeout(() => {
-        Router.go('environment', 
-          { _id: controller.params._id }, 
-          { query: { selectedNodeId: srlSelectedNodeId } });
-      }, 1);
+    setTimeout(() => {
+      Router.go('environment', 
+        { _id: controller.params._id }, 
+        { query: { selectedNodeId: srlSelectedNodeId } });
+    }, 1);
+
+  })();
+  */
 
-    })();
-    */
+  let prevIdPath = null;
+  instance.autorun(function () {
+    let idPath = instance.rdxSelectedNodeIdPath.get();
+    if (prevIdPath !== idPath) {
+      prevIdPath = idPath;
+      instance.state.set('isLoading', true);
+    }
+  });
 
+  instance.autorun(function () {
+    let isLoading = instance.state.get('isLoading');
+    if (isLoading) {
+      setTimeout(() => {
+        instance.state.set('isLoading', false);
+      }, 200);
+    }
+  });
 });
 
 Template.Environment.onDestroyed(function () {
@@ -294,6 +312,11 @@ Template.Environment.helpers({
     return instance.state.get(key);
   },
 
+  isLoading: function () {
+    let instance = Template.instance();
+    return instance.state.get('isLoading');
+  },
+
   argsNavMenu: function (envName, mainNode) {
     let instance = Template.instance();
     return {
index f383877..5a3b309 100644 (file)
@@ -82,7 +82,7 @@ Template.GeneralFolderNodeDashboard.helpers({
   argsMainCubic: function (childrenCount) {
     return {
       header: R.path(['components', 'generalFolderNodeDashboard', 'mainCubic', 'header']
-        )(store.getState().api.i18n),
+      )(store.getState().api.i18n),
       dataInfo: R.toString(childrenCount), 
       icon: new Icon({ type: 'fa', name: 'desktop' }),
     };
index dbeb55c..371a983 100644 (file)
@@ -25,6 +25,9 @@ import './graph-tooltip-window.html';
   
 Template.GraphTooltipWindow.onCreated(function() {
   let instance = this;
+  instance.simpleState = {
+    gotIn: false,
+  };
 
   instance.autorun(() => {
     new SimpleSchema({
@@ -47,14 +50,19 @@ Template.GraphTooltipWindow.rendered = function() {
  */
 
 Template.GraphTooltipWindow.events({
-  'mouseout .os-graph-tooltip-window': function(_e, _instance) {
-    /*
+  'mouseenter .os-graph-tooltip-window': function(e, instance) {
+    instance.simpleState.gotIn = true;
+  },
+
+  'mouseleave .os-graph-tooltip-window': function(e, instance) {
     if (!instance.data.show) { return; }
 
-    e.preventDefault();
-    e.stopPropagation();
-    store.dispatch(closeGraphTooltipWindow());
-    */
+    //e.preventDefault();
+    //e.stopPropagation();
+    if (instance.simpleState.gotIn) {
+      instance.simpleState.gotIn = false;
+      store.dispatch(closeGraphTooltipWindow());
+    }
   },
 
   'click .os-graph-tooltip-window': function(e, instance) {
index 877d68b..11485fd 100644 (file)
@@ -56,4 +56,4 @@
 @import 'network-graph-manager/*';
 @import 'network-graph/*';
 @import 'environment-box/*';
-@import 'configuration/*';
+@import 'user-settings/*';
index d720be1..3508372 100644 (file)
@@ -70,7 +70,7 @@
           {{ getAttrDisabled }}
           class="sm-input-message cl-input"
           rows="10"
-          >{{ asString (getModelField 'message') }}</textarea>
+          >{{ jsonAsString (getModelField 'message') }}</textarea>
         <div class="cl-field-desc">Message</div>
       </div>
 
index 41ea53d..10ff830 100644 (file)
@@ -151,7 +151,7 @@ Template.Message.helpers({
 
   sourceSystemsList: function () {
     return R.ifElse(R.isNil, R.always([]), R.prop('data')
-      )(Constants.findOne({ name: 'message_source_systems' }));
+    )(Constants.findOne({ name: 'message_source_systems' }));
   },
 
   getAttrDisabled: function () {
@@ -204,11 +204,6 @@ Template.Message.helpers({
     return calcActionLabel(action);
   },
 
-  asString: function (val) {
-    let str = JSON.stringify(val, null, 4);
-    return str;
-  },
-
   argsInvPropDisplay: function (env, nodeId) {
     return {
       env: env,
index ed556c0..5cd4f35 100644 (file)
@@ -14,6 +14,7 @@ import { store } from '/imports/ui/store/store';
 import { activateGraphTooltipWindow } from '/imports/ui/actions/graph-tooltip-window.actions';
 import { closeGraphTooltipWindow } from '/imports/ui/actions/graph-tooltip-window.actions';
 //import { activateVedgeInfoWindow } from '/imports/ui/actions/vedge-info-window.actions';
+import { EJSON } from 'meteor/ejson';
         
 import '/imports/ui/components/network-graph/network-graph';
 
@@ -31,7 +32,14 @@ Template.NetworkGraphManager.onCreated(function() {
     id_path: null,
     graphDataChanged: null,
     isReady: false,
+    inventoriesToFind: [],
+    cliquesToFind: [],
+    linksToFind: [],
+    nodesToFind: [],
+    graphLinks: [],
+    graphNodes: [],
   });
+
   instance.simpleState = {
     graphData: {
       links: [],
@@ -55,10 +63,116 @@ Template.NetworkGraphManager.onCreated(function() {
     let id_path = instance.state.get('id_path');
 
     instance.simpleState.graphData = generateGraphData();
+    instance.state.set('graphDataChanged', null);
     instance.state.set('isReady', false);
+    instance.state.set('inventoriesToFind', []);
+    instance.state.set('cliquesToFind', []);
+    instance.state.set('linksToFind', []);
+    instance.state.set('nodesToFind', []);
+    instance.state.set('graphLinks', []);
+    instance.state.set('graphNodes', []);
+
+    //instance.subscribe('attributes_for_hover_on_data');
+    //subscribeToNodeAndRelatedData(id_path, instance, instance.simpleState);
+    instance.state.set('inventoriesToFind', [id_path]);
+  });
+
+  instance.autorun(function () {
+    let inventories = instance.state.get('inventoriesToFind');
+    if (inventories.length <= 0) { 
+      return; 
+    }
+
+    instance.subscribe('inventory?id_path', inventories[0]);
+
+    // id_path: assumption - unique
+    Inventory.find({ id_path: inventories[0] }).forEach((inventory) => {
+      if (! inventory.clique) {
+        return;
+      }
+
+      instance.state.set('cliquesToFind', [inventory._id]);
+    });
+  });
+
+  instance.autorun(function () {
+    let cliques = instance.state.get('cliquesToFind');
+    if (cliques.length <= 0) { 
+      return; 
+    }
 
-    instance.subscribe('attributes_for_hover_on_data');
-    subscribeToNodeAndRelatedData(id_path, instance, instance.simpleState);
+    // focal point: assumption - unique per inventory node.
+    let mainNodeIdStr = cliques[0]._str;
+    instance.subscribe('cliques?focal_point', mainNodeIdStr);
+
+    Cliques.find({ focal_point: new Mongo.ObjectID(mainNodeIdStr) }).forEach( function (cliqueItem) {
+      instance.state.set('linksToFind', cliqueItem.links);
+    });
+  });
+
+  instance.autorun(function () {
+    let linksToFind = instance.state.get('linksToFind');
+    if (linksToFind.length <= 0) { 
+      return; 
+    }
+
+    // Find links for focal point.
+    instance.subscribe('links?_id-in', linksToFind);
+
+    Links.find({ _id: {$in: linksToFind} }).forEach(function(link) {
+      let graphLinks = EJSON.parse(instance.state.keys['graphLinks']);
+      graphLinks = R.concat([link], graphLinks);
+      instance.state.set('graphLinks', graphLinks);
+    });
+  });
+
+  instance.autorun(function () {
+    let graphLinks = instance.state.get('graphLinks');
+    if (graphLinks.length <= 0) { 
+      return; 
+    }
+
+    instance.simpleState.graphData = addLinksToGraph(graphLinks, instance.simpleState.graphData);
+    instance.state.set('graphDataChanged', Date.now());
+
+    // Find nodes for link
+    // todo: remove dubplicates.
+    let nodesIds = R.chain(link => {
+      return [ link['source'], link['target'] ]; 
+    }, graphLinks); 
+
+    let nodesToFind = EJSON.parse(instance.state.keys['nodesToFind']);
+    nodesToFind = R.concat(nodesIds, nodesToFind);
+    instance.state.set('nodesToFind', nodesToFind);
+  });
+
+  instance.autorun(function () {
+    let nodesToFind = instance.state.get('nodesToFind');
+    if (nodesToFind.length <= 0) { 
+      return; 
+    }
+
+    instance.subscribe('inventory?_id-in', nodesToFind);
+
+    Inventory.find({ _id: { $in: nodesToFind } }).forEach(function (node) {
+      let graphNodes = EJSON.parse(instance.state.keys['graphNodes']);
+      graphNodes = R.concat([node], graphNodes);
+      instance.state.set('graphNodes', graphNodes);
+    });
+
+  });
+
+  instance.autorun(function () {
+    let graphNodes = instance.state.get('graphNodes');
+    if (graphNodes.length <= 0) { 
+      return; 
+    }
+
+    instance.simpleState.graphData = addNodesToGraph(graphNodes, instance.simpleState.graphData);
+
+    let isReady = calcIsReady(instance.simpleState.graphData);
+    instance.state.set('graphDataChanged', Date.now());
+    instance.state.set('isReady', isReady);
   });
 });  
 
@@ -111,7 +225,7 @@ Template.NetworkGraphManager.helpers({
             }
 
             store.dispatch(
-              activateGraphTooltipWindow(res.nodeName, res.attributes, x - 30, y - 10));
+              activateGraphTooltipWindow(res.nodeName, res.attributes, x + 30, y - 10));
           });
       },
       onNodeOut: function (_nodeId) {
@@ -127,8 +241,8 @@ Template.NetworkGraphManager.helpers({
         isDragging = false;
       },
       onGroupOver: function () {
-        instance.simpleState.itemOfInterest = null;
-        store.dispatch(closeGraphTooltipWindow());
+        //instance.simpleState.itemOfInterest = null;
+        //store.dispatch(closeGraphTooltipWindow());
       },
       onLinkOver: function (linkId, x, y) {
         if (isDragging) {
@@ -162,46 +276,6 @@ Template.NetworkGraphManager.helpers({
   }
 }); // end: helpers
 
-function subscribeToNodeAndRelatedData(id_path, instance, simpleState) {
-  instance.subscribe('inventory?id_path', id_path);
-
-  // id_path: assumption - unique
-  Inventory.find({ id_path: id_path }).forEach((inventory) => {
-    if (! inventory.clique) {
-      return;
-    }
-
-    // focal point: assumption - unique per inventory node.
-    let mainNodeIdStr = inventory._id._str;
-    instance.subscribe('cliques?focal_point', mainNodeIdStr);
-
-    Cliques.find({ focal_point: new Mongo.ObjectID(mainNodeIdStr) }).forEach( function (cliqueItem) {
-
-      // Find links for focal point.
-      instance.subscribe('links?_id-in', cliqueItem.links);
-
-      Links.find({ _id: {$in: cliqueItem.links} }).forEach(function(link) {
-        simpleState.graphData = addLinkToGraph(link, simpleState.graphData);
-        instance.state.set('graphDataChanged', Date.now());
-
-        // Find nodes for link
-        let nodesIds = [ link['source'], link['target'] ];
-        instance.subscribe('inventory?_id-in', nodesIds);
-
-        Inventory.find({ _id: { $in: nodesIds } }).forEach(function (node) {
-          simpleState.graphData = addNodeToGraph(node, simpleState.graphData);
-          let isReady = calcIsReady(simpleState.graphData);
-          instance.state.set('graphDataChanged', Date.now());
-          instance.state.set('isReady', isReady);
-
-          // Find nodes attributes for links nodes.
-          instance.subscribe('attributes_for_hover_on_data?type', node.type);
-        });
-      });
-    });
-  });
-}
-
 function generateGraphData() {
   return {
     nodes: [],
@@ -210,7 +284,7 @@ function generateGraphData() {
   };
 }
 
-function addLinkToGraph(link, graphData) {
+function genGraphLink(link) {
   let newLink = {
     sourceId: link.source, 
     targetId: link.target, 
@@ -222,6 +296,24 @@ function addLinkToGraph(link, graphData) {
     }
   };
 
+  return newLink;
+}
+
+function addLinksToGraph(linksInfo, graphData) {
+  let newLinks = R.map(link => genGraphLink(link), linksInfo);
+
+  let links = R.unionWith(R.eqBy(R.prop('_osid')), graphData.links, newLinks);
+  links = expandLinks(links, graphData.nodes);
+
+  return R.merge(graphData, {
+    links: links
+  });
+}
+
+/*
+function addLinkToGraph(link, graphData) {
+  let newLink = genGraphLink(link);
+
   let links = R.unionWith(R.eqBy(R.prop('_osid')), graphData.links, [newLink]);
   links = expandLinks(links, graphData.nodes);
 
@@ -229,6 +321,7 @@ function addLinkToGraph(link, graphData) {
     links: links
   });
 }
+*/
 
 function expandLinks(links, nodes) {
   return R.map((link) => {
@@ -248,7 +341,7 @@ function expandLinks(links, nodes) {
   }, links);
 }
 
-function addNodeToGraph(node, graphData) {
+function genGraphNode(node) {
   let newNode = {
     _osid: node._id,
     _osmeta: {
@@ -268,8 +361,30 @@ function addNodeToGraph(node, graphData) {
   })(groupMarkers);
   if (groupKey) {
     newNode = R.assocPath(['_osmeta', 'groupId'], node[groupKey], newNode);
+    newNode = R.assocPath(['_osmeta', 'groupType'], groupKey, newNode);
   }
 
+  return newNode;
+}
+
+function addNodesToGraph(nodesInfo, graphData) {
+  let newNodes = R.map((node) => genGraphNode(node), nodesInfo);
+
+  let nodes = R.unionWith(R.eqBy(R.prop('_osid')), graphData.nodes, newNodes);
+  let links = expandLinks(graphData.links, nodes);
+  let groups = calcGroups(nodes);
+
+  return R.merge(graphData, {
+    nodes: nodes,
+    links: links,
+    groups: groups,
+  });
+}
+
+/*
+function addNodeToGraph(node, graphData) {
+  let newNode = genGraphNode(node);
+
   let nodes = R.unionWith(R.eqBy(R.prop('_osid')), graphData.nodes, [newNode]);
   let links = expandLinks(graphData.links, nodes);
   let groups = calcGroups(nodes);
@@ -280,6 +395,7 @@ function addNodeToGraph(node, graphData) {
     groups: groups,
   });
 }
+*/
 
 function calcIsReady(graphData) {
   return R.all((link) => {
@@ -302,6 +418,7 @@ function calcGroups(nodes) {
         leaves: [node],
         isExpanded: true,
         name: groupId,
+        type: node._osmeta.groupType,
       };
       accGroups = R.append(group, accGroups);
 
index 68b3a57..04b69e3 100644 (file)
@@ -27,6 +27,7 @@ Template.NetworkGraph.onCreated(function() {
   instance.simpleState = {
     graphData: null
   };
+  instance.prevForce = null;
 
   instance.autorun(function () {
     let data = Template.currentData();
@@ -73,11 +74,27 @@ Template.NetworkGraph.rendered = function() {
       instance.onDragStart,
       instance.onDragEnd,
       instance.onGroupOver,
-      instance.onLinkOver
+      instance.onLinkOver,
+      function onNewForce(newForce) {
+        if (instance.prevForce) {
+          instance.prevForce.stop();
+        }
+        instance.prevForce = newForce;
+      }
     );
   });
 };  
 
+Template.NetworkGraph.onDestroyed(function () {
+  let instance = Template.instance();
+  let graphEl = instance.$('.sm-graph')[0];
+  let svg = d3.select(graphEl).select('svg');
+  if (instance.prevForce) {
+    instance.prevForce.stop();
+  }
+  
+  svg.remove();
+});
 /*
  * Events
  */
@@ -138,10 +155,13 @@ function renderGraph(
   onDragStart,
   onDragEnd,
   onGroupOver,
-  onLinkOver
+  onLinkOver,
+  onNewForce
 ) {
 
   let force = genForceCola(cola, d3, w, h);
+  onNewForce(force);
+
   let drag = force.drag()
     .on('start', function (_d) {
       onDragStart();
@@ -295,7 +315,8 @@ function genSvgNodes(g, nodes, drag, onNodeOver, onNodeOut, onNodeClick, onGroup
       onNodeOut(d._osmeta.nodeId);
     })
     .on('click', function (d) {
-      if (R.path(['_osmeta', 'type'], d) === 'view_group') {
+      let type = R.defaultTo('', R.path(['_osmeta', 'type'], d));
+      if (R.contains(type, ['view_group-host', 'view_group-switch'])) {
         onGroupNodeClick(d._osmeta.nodeId);
       }
       onNodeClick(d._osmeta.nodeId);
@@ -624,7 +645,7 @@ function calcClosedGroupsNodes(rejectedGroups, prevViewNodes) {
     return R.append({
       _osid: nodeId,
       _osmeta: {
-        type: 'view_group',
+        type: `view_group-${group.type}`,
         nodeId: group._osid,
       },
       width: 60,
index 891d2b3..27b72c2 100644 (file)
@@ -2,10 +2,11 @@
  * Template Component: NewScanning 
  */
     
-//import { Meteor } from 'meteor/meteor'; 
+import * as R from 'ramda';
 import { Template } from 'meteor/templating';
 import { SimpleSchema } from 'meteor/aldeed:simple-schema';
 import { ReactiveDict } from 'meteor/reactive-dict';
+import { ScheduledScans, subsScheduledScansEnv } from '/imports/api/scheduled-scans/scheduled-scans';
         
 import './new-scanning.html';     
     
@@ -18,15 +19,24 @@ Template.NewScanning.onCreated(function() {
   instance.state = new ReactiveDict();
   instance.state.setDefault({
     env: null,
+    scheduledScanId: null
   });
 
-  instance.autorun(function (env) {
+  instance.autorun(function () {
     let data = Template.currentData();
     new SimpleSchema({
       env: { type: String, optional: true },
     }).validate(data);
 
-    instance.state.set('env', env);
+    instance.state.set('env', data.env);
+  });
+
+  instance.autorun(function () {
+    let env = instance.state.get('env');
+    instance.subscribe(subsScheduledScansEnv, env);
+    ScheduledScans.find({ environment: env }).forEach((schedule) => {
+      instance.state.set('scheduledScanId', schedule._id);
+    });
   });
 });  
 
@@ -55,9 +65,14 @@ Template.NewScanning.helpers({
   },
 
   argsScheduledScan: function (env) {
+    let instance = Template.instance();
+    let scheduledScanId = instance.state.get('scheduledScanId');
+    let action = R.ifElse(R.isNil, R.always('insert'), R.always('update'))(scheduledScanId);
+
     return {
-      action: 'insert',
+      action: action,
       env: env,
+      _id: scheduledScanId,
     };
   },
 }); // end: helpers
index 6e6d3e4..44b2f79 100644 (file)
@@ -283,7 +283,7 @@ function submitItem(instance) {
     }, processActionResult.bind(null, instance));
     break;
   default:
-      // todo
+    // todo
     break;
   }
 }
index 56622dc..3970db0 100644 (file)
@@ -466,7 +466,7 @@ function submitItem(
     break;
 
   default:
-      // todo
+    // todo
     break;
   }
 }
index 2f986f5..a796562 100644 (file)
@@ -44,7 +44,7 @@
           <td>{{ scan.environment }}</td>
           <td>{{ scan.inventory }}</td>
           <td>{{ scan.object_id }}</td>
-          <td>{{ scan.frequency }}</td>
+          <td>{{ scan.freq }}</td>
           <td>{{ scan.submit_timestamp }}</td>
           <td>
             <a href="{{pathFor route='scheduled-scan' 
@@ -1,6 +1,6 @@
-<template name="Configuration">
-<div class="os-configuration cards white">
-  <h3>Configurations</h3>
+<template name="UserSettings">
+<div class="os-user-settings cards white">
+  <h3>UserSettings</h3>
   <form>
     <div class="cl-field-group">
       <label class="cl-field-label">Message view backward delta</label>
@@ -1,5 +1,5 @@
 /*
- * Template Component: Configuration 
+ * Template Component: UserSettings 
  */
     
 //import { Meteor } from 'meteor/meteor'; 
@@ -8,20 +8,20 @@ import { ReactiveDict } from 'meteor/reactive-dict';
 //import { SimpleSchema } from 'meteor/aldeed:simple-schema';
 import * as R from 'ramda';
 
-import { save } from '/imports/api/configurations/methods';
-import { Configurations } from '/imports/api/configurations/configurations';
+import { save } from '/imports/api/user-settings/methods';
+import { UserSettings } from '/imports/api/user-settings/user-settings';
         
-import './configuration.html';     
+import './user-settings.html';     
     
 /*  
  * Lifecycles
  */   
   
-Template.Configuration.onCreated(function() {
+Template.UserSettings.onCreated(function() {
   let instance = this;
   instance.state = new ReactiveDict();
   instance.state.setDefault({
-    model: Configurations.schema.clean({}),
+    model: UserSettings.schema.clean({}),
     actionResult: 'none',
     message: null,
   });
@@ -36,15 +36,15 @@ Template.Configuration.onCreated(function() {
   */
 
   instance.autorun(function () {
-    instance.subscribe('configurations?user');
-    Configurations.find({user_id: Meteor.userId()}).forEach((conf) => {
-      instance.state.set('model', conf);
+    instance.subscribe('user_settings?user');
+    UserSettings.find({user_id: Meteor.userId()}).forEach((userSettings) => {
+      instance.state.set('model', userSettings);
     });
   });
 });  
 
 /*
-Template.Configuration.rendered = function() {
+Template.UserSettings.rendered = function() {
 };  
 */
 
@@ -52,7 +52,7 @@ Template.Configuration.rendered = function() {
  * Events
  */
 
-Template.Configuration.events({
+Template.UserSettings.events({
   'click .js-submit-button': function (event, instance) {
     event.preventDefault(); 
     let msgsViewBackDelta = Number.parseInt(instance.$('.sm-msgs-view-back-delta')[0].value);
@@ -71,7 +71,7 @@ Template.Configuration.events({
  * Helpers
  */
 
-Template.Configuration.helpers({    
+Template.UserSettings.helpers({    
   getModelField: function (fieldName) {
     let instance = Template.instance();
     return R.path([fieldName], instance.state.get('model'));
@@ -1,6 +1,4 @@
-/* Set the component style here */
-// "Configuration"
-.os-configuration
+.os-user-settings
   display: flex;
   flex-flow: column nowrap;
   margin: 20px;
index 0a6e7f1..4e96f9d 100644 (file)
@@ -186,8 +186,8 @@ Router.route('/messages-list', function () {
   this.render('MessagesList');
 }, { });
 
-Router.route('/configuration', function () {
-  this.render('Configuration');
+Router.route('/user-settings', function () {
+  this.render('UserSettings');
 }, { });
 
 Router.route('/message', function () {
diff --git a/ui/public/ic_device_hub_black_24dp_2x-green.png b/ui/public/ic_device_hub_black_24dp_2x-green.png
new file mode 100644 (file)
index 0000000..e196065
Binary files /dev/null and b/ui/public/ic_device_hub_black_24dp_2x-green.png differ
diff --git a/ui/public/ic_device_hub_black_24dp_2x-orange.png b/ui/public/ic_device_hub_black_24dp_2x-orange.png
new file mode 100644 (file)
index 0000000..338f89b
Binary files /dev/null and b/ui/public/ic_device_hub_black_24dp_2x-orange.png differ
diff --git a/ui/public/ic_device_hub_black_24dp_2x-red.png b/ui/public/ic_device_hub_black_24dp_2x-red.png
new file mode 100644 (file)
index 0000000..2688872
Binary files /dev/null and b/ui/public/ic_device_hub_black_24dp_2x-red.png differ
diff --git a/ui/public/ic_device_hub_black_24dp_2x.png b/ui/public/ic_device_hub_black_24dp_2x.png
new file mode 100644 (file)
index 0000000..d5d54dc
Binary files /dev/null and b/ui/public/ic_device_hub_black_24dp_2x.png differ
diff --git a/ui/public/ic_tv_black_24dp_2x.png b/ui/public/ic_tv_black_24dp_2x.png
new file mode 100644 (file)
index 0000000..64bed32
Binary files /dev/null and b/ui/public/ic_tv_black_24dp_2x.png differ
diff --git a/ui/public/ic_zoom_out_map_black_24dp_2x.png b/ui/public/ic_zoom_out_map_black_24dp_2x.png
new file mode 100644 (file)
index 0000000..4302da8
Binary files /dev/null and b/ui/public/ic_zoom_out_map_black_24dp_2x.png differ
diff --git a/ui/public/old-2017-09-25/ic_device_hub_black_24dp_2x-green.png b/ui/public/old-2017-09-25/ic_device_hub_black_24dp_2x-green.png
new file mode 100644 (file)
index 0000000..04d3eaf
Binary files /dev/null and b/ui/public/old-2017-09-25/ic_device_hub_black_24dp_2x-green.png differ
diff --git a/ui/public/old-2017-09-25/ic_device_hub_black_24dp_2x-orange.png b/ui/public/old-2017-09-25/ic_device_hub_black_24dp_2x-orange.png
new file mode 100644 (file)
index 0000000..ff9782a
Binary files /dev/null and b/ui/public/old-2017-09-25/ic_device_hub_black_24dp_2x-orange.png differ
diff --git a/ui/public/old-2017-09-25/ic_device_hub_black_24dp_2x-red.png b/ui/public/old-2017-09-25/ic_device_hub_black_24dp_2x-red.png
new file mode 100644 (file)
index 0000000..2b3e34f
Binary files /dev/null and b/ui/public/old-2017-09-25/ic_device_hub_black_24dp_2x-red.png differ
diff --git a/ui/public/old-2017-09-25/ic_device_hub_black_24dp_2x.png b/ui/public/old-2017-09-25/ic_device_hub_black_24dp_2x.png
new file mode 100644 (file)
index 0000000..69589e4
Binary files /dev/null and b/ui/public/old-2017-09-25/ic_device_hub_black_24dp_2x.png differ
diff --git a/ui/public/old-2017-09-25/ic_tv_black_24dp_2x-old-2017-09-25.png b/ui/public/old-2017-09-25/ic_tv_black_24dp_2x-old-2017-09-25.png
new file mode 100644 (file)
index 0000000..f2ff5cd
Binary files /dev/null and b/ui/public/old-2017-09-25/ic_tv_black_24dp_2x-old-2017-09-25.png differ
diff --git a/ui/public/old-2017-09-25/ic_zoom_out_map_black_24dp_2x-old-2017-09-25.png b/ui/public/old-2017-09-25/ic_zoom_out_map_black_24dp_2x-old-2017-09-25.png
new file mode 100644 (file)
index 0000000..42db20f
Binary files /dev/null and b/ui/public/old-2017-09-25/ic_zoom_out_map_black_24dp_2x-old-2017-09-25.png differ