Merge "Posix compliant username is shown in booking details"
authorSawyer Bergeron <sbergeron@iol.unh.edu>
Wed, 7 Sep 2022 18:07:18 +0000 (18:07 +0000)
committerGerrit Code Review <gerrit@opnfv.org>
Wed, 7 Sep 2022 18:07:18 +0000 (18:07 +0000)
config.env.sample
laas_api_documentation.yaml
src/api/migrations/0022_add_cifile_generated_field.py [new file with mode: 0644]
src/api/urls.py
src/api/views.py
src/booking/stats.py
src/dashboard/admin_utils.py
src/static/package-lock.json

index c47f2bf..baa5b8c 100644 (file)
@@ -25,8 +25,8 @@ DB_PORT=5432
 # tells the dashboard to expect host forwarding from proxy (if using LFID, needs to be True)
 EXPECT_HOST_FORWARDING=False
 
-# string indicating what authorization to deploy with
-AUTH_SETTING=choose_auth # LFID or OAUTH
+# string indicating what authorization to deploy with (LFID)
+AUTH_SETTING=LFID
 
 # SECURITY WARNING: keep the secret key used in production secret!
 SECRET_KEY=http://www.miniwebtool.com/django-secret-key-generator/
@@ -41,15 +41,21 @@ OIDC_CLIENT_SECRET=sample_secret
 OIDC_AUTHORIZATION_ENDPOINT=https://linuxfoundation-test.auth0.com/authorize
 OIDC_TOKEN_ENDPOINT=https://linuxfoundation-test.auth0.com/oauth/token
 OIDC_USER_ENDPOINT=https://linuxfoundation-test.auth0.com/userinfo
+OIDC_AUTHENTICATION_CALLBACK_URL=https://laas.anuket.iol.org/oidc/callback
 
 CLAIMS_ENDPOINT=https://sso.linuxfoundation.org/claims/
 
 OIDC_RP_SIGN_ALGO=RS256
 OIDC_OP_JWKS_ENDPOINT=https://sso.linuxfoundation.org/.well-known/jwks.json
 
+# Jira
+JIRA_URL=https://jira.opnfv.org
+JIRA_USER_NAME=
+JIRA_USER_PASSWORD=
+
 # Rabbitmq
-DEFAULT_USER=opnfv
-DEFAULT_PASS=opnfvopnfv
+RABBITMQ_DEFAULT_USER=opnfv
+RABBITMQ_DEFAULT_PASS=opnfvopnfv
 
 # Jenkins Build Server
 JENKINS_URL=https://build.opnfv.org/ci
@@ -61,5 +67,8 @@ EMAIL_HOST_USER=
 EMAIL_HOST_PASSWORD=
 DEFAULT_FROM_EMAIL=webmaster@localhost
 
+BOOKING_EXPIRE_TIME=30
+BOOKING_MAXIMUM_NUMBER=10
 
 TEMPLATE_OVERRIDE_DIR=laas
+
index ee967b0..d8f6186 100644 (file)
@@ -115,6 +115,29 @@ paths:
           description: Cannnot cancel booking
         '401':
           description: Unauthorized API key
+  '/booking/{bookingID}/details':
+    get:
+      tags:
+        - Bookings
+      summary: Get booking details
+      description: ''
+      operationID: bookingDetails
+      parameters:
+        - in: path
+          name: bookingID
+          required: true
+          type: integer
+      produces:
+        - application/json
+      responses:
+        '200':
+          description: successful operation
+          schema:
+            $ref: '#/definitions/Booking'
+        '404':
+          description: Booking does not exist
+        '401':
+          description: Unauthorized API key
   '/booking/{bookingID}/extendBooking/{days}':
     post:
       tags:
diff --git a/src/api/migrations/0022_add_cifile_generated_field.py b/src/api/migrations/0022_add_cifile_generated_field.py
new file mode 100644 (file)
index 0000000..f83a102
--- /dev/null
@@ -0,0 +1,15 @@
+from django.db import migrations, models
+
+
+class Migration(migrations.Migration):
+    dependencies = [
+        ('api', '0018_cloudinitfile'),
+    ]
+
+    operations = [
+        migrations.AddField(
+            model_name="CloudInitFile",
+            name="generated",
+            field=models.BooleanField(default=False)
+        ),
+    ]
index acef947..cbb453c 100644 (file)
@@ -62,6 +62,7 @@ from api.views import (
     single_image,
     single_opsys,
     create_ci_file,
+    booking_details,
 )
 
 urlpatterns = [
@@ -93,6 +94,7 @@ urlpatterns = [
     path('booking/<int:booking_id>', specific_booking),
     path('booking/<int:booking_id>/extendBooking/<int:days>', extend_booking),
     path('booking/makeBooking', make_booking),
+    path('booking/<int:booking_id>/details', booking_details),
 
     path('resource_inventory/availableTemplates', available_templates),
     path('resource_inventory/<int:template_id>/images', images_for_template),
index 1516374..d5966ed 100644 (file)
@@ -33,7 +33,7 @@ from api.forms import DowntimeForm
 from account.models import UserProfile, Lab
 from booking.models import Booking
 from booking.quick_deployer import create_from_API
-from api.models import LabManagerTracker, get_task, Job, AutomationAPIManager, APILog
+from api.models import LabManagerTracker, get_task, Job, AutomationAPIManager, APILog, GeneratedCloudConfig
 from notifier.manager import NotificationHandler
 from analytics.models import ActiveVPNUser
 from resource_inventory.models import (
@@ -430,7 +430,11 @@ def auth_and_log(request, endpoint):
         token = Token.objects.get(key=user_token)
     except Token.DoesNotExist:
         token = None
-        response = HttpResponse('Unauthorized', status=401)
+        # Added logic to detect malformed token
+        if len(str(user_token)) != 40:
+            response = HttpResponse('Malformed Token', status=401)
+        else:
+            response = HttpResponse('Unauthorized', status=401)
 
     x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
     if x_forwarded_for:
@@ -650,3 +654,103 @@ def list_labs(request):
         lab_list.append(lab_info)
 
     return JsonResponse(lab_list, safe=False)
+
+
+"""
+Booking Details API Views
+"""
+
+
+def booking_details(request, booking_id=""):
+    token = auth_and_log(request, 'booking/{}/details'.format(booking_id))
+
+    if isinstance(token, HttpResponse):
+        return token
+
+    booking = get_object_or_404(Booking, pk=booking_id, owner=token.user)
+
+    # overview
+    overview = {
+        'username': GeneratedCloudConfig._normalize_username(None, str(token.user)),
+        'purpose': booking.purpose,
+        'project': booking.project,
+        'start_time': booking.start,
+        'end_time': booking.end,
+        'pod_definitions': booking.resource.template,
+        'lab': booking.lab
+    }
+
+    # deployment progress
+    task_list = []
+    for task in booking.job.get_tasklist():
+        task_info = {
+            'name': str(task),
+            'status': 'DONE',
+            'lab_response': 'No response provided (yet)'
+        }
+        if task.status < 100:
+            task_info['status'] = 'PENDING'
+        elif task.status < 200:
+            task_info['status'] = 'IN PROGRESS'
+
+        if task.message:
+            if task.type_str == "Access Task" and request.user.id != task.config.user.id:
+                task_info['lab_response'] = '--secret--'
+            else:
+                task_info['lab_response'] = str(task.message)
+        task_list.append(task_info)
+
+    # pods
+    pod_list = []
+    for host in booking.resource.get_resources():
+        pod_info = {
+            'hostname': host.config.name,
+            'machine': host.name,
+            'role': '',
+            'is_headnode': host.config.is_head_node,
+            'image': host.config.image,
+            'ram': {'amount': str(host.profile.ramprofile.first().amount) + 'G', 'channels': host.profile.ramprofile.first().channels},
+            'cpu': {'arch': host.profile.cpuprofile.first().architecture, 'cores': host.profile.cpuprofile.first().cores, 'sockets': host.profile.cpuprofile.first().cpus},
+            'disk': {'size': str(host.profile.storageprofile.first().size) + 'GiB', 'type': host.profile.storageprofile.first().media_type, 'mount_point': host.profile.storageprofile.first().name},
+            'interfaces': [],
+        }
+        try:
+            pod_info['role'] = host.template.opnfvRole
+        except Exception:
+            pass
+        for intprof in host.profile.interfaceprofile.all():
+            int_info = {
+                'name': intprof.name,
+                'speed': intprof.speed
+            }
+            pod_info['interfaces'].append(int_info)
+        pod_list.append(pod_info)
+
+    # diagnostic info
+    diagnostic_info = {
+        'job_id': booking.job.id,
+        'ci_files': '',
+        'pods': []
+    }
+    for host in booking.resource.get_resources():
+        pod = {
+            'host': host.name,
+            'configs': [],
+
+        }
+        for ci_file in host.config.cloud_init_files.all():
+            ci_info = {
+                'id': ci_file.id,
+                'text': ci_file.text
+            }
+            pod['configs'].append(ci_info)
+        diagnostic_info['pods'].append(pod)
+
+    details = {
+        'overview': overview,
+        'deployment_progress': task_list,
+        'pods': pod_list,
+        'diagnostic_info': diagnostic_info,
+        'pdf': booking.pdf
+    }
+    return JsonResponse(str(details), safe=False)
index 70f91fa..5a59d32 100644 (file)
@@ -94,6 +94,7 @@ class StatisticsManager(object):
         proj_count = sorted(Counter(projects).items(), key=lambda x: x[1])
 
         project_keys = [proj[0] for proj in proj_count[-5:]]
+        project_keys = ['None' if x is None else x for x in project_keys]
         project_counts = [proj[1] for proj in proj_count[-5:]]
 
         resources = {key: [x, value] for key, value in profiles.items()}
index 045caeb..75e4f3e 100644 (file)
@@ -27,9 +27,11 @@ from resource_inventory.models import (
 )
 
 import json
+import yaml
 import sys
 import inspect
 import pydoc
+import csv
 
 from django.contrib.auth.models import User
 
@@ -43,9 +45,7 @@ from resource_inventory.pdf_templater import PDFTemplater
 
 from booking.quick_deployer import update_template
 
-from datetime import timedelta
-
-from django.utils import timezone
+from datetime import timedelta, date, datetime, timezone
 
 from booking.models import Booking
 from notifier.manager import NotificationHandler
@@ -225,6 +225,99 @@ def get_info(host_labid, lab_username):
     return info
 
 
+class CumulativeData:
+    use_days = 0
+    count_bookings = 0
+    count_extensions = 0
+
+    def __init__(self, file_writer):
+        self.file_writer = file_writer
+
+    def account(self, booking, usage_days):
+        self.count_bookings += 1
+        self.count_extensions += booking.ext_count
+        self.use_days += usage_days
+
+    def write_cumulative(self):
+        self.file_writer.writerow([])
+        self.file_writer.writerow([])
+        self.file_writer.writerow(['Lab Use Days', 'Count of Bookings', 'Total Extensions Used'])
+        self.file_writer.writerow([self.use_days, self.count_bookings, (self.count_bookings * 2) - self.count_extensions])
+
+
+def get_years_booking_data(start_year=None, end_year=None):
+    """
+    Outputs yearly booking information from the past 'start_year' years (default: current year)
+    until the last day of the end year (default current year) as a csv file.
+    """
+    if start_year is None and end_year is None:
+        start = datetime.combine(date(datetime.now().year, 1, 1), datetime.min.time()).replace(tzinfo=timezone.utc)
+        end = datetime.combine(date(start.year + 1, 1, 1), datetime.min.time()).replace(tzinfo=timezone.utc)
+    elif end_year is None:
+        start = datetime.combine(date(start_year, 1, 1), datetime.min.time()).replace(tzinfo=timezone.utc)
+        end = datetime.combine(date(datetime.now().year, 1, 1), datetime.min.time()).replace(tzinfo=timezone.utc)
+    else:
+        start = datetime.combine(date(start_year, 1, 1), datetime.min.time()).replace(tzinfo=timezone.utc)
+        end = datetime.combine(date(end_year + 1, 1, 1), datetime.min.time()).replace(tzinfo=timezone.utc)
+
+    if (start.year == end.year - 1):
+        file_name = "yearly_booking_data_" + str(start.year) + ".csv"
+    else:
+        file_name = "yearly_booking_data_" + str(start.year) + "-" + str(end.year - 1) + ".csv"
+
+    with open(file_name, "w", newline="") as file:
+        file_writer = csv.writer(file)
+        cumulative_data = CumulativeData(file_writer)
+        file_writer.writerow(
+            [
+                'ID',
+                'Project',
+                'Purpose',
+                'User',
+                'Collaborators',
+                'Extensions Left',
+                'Usage Days',
+                'Start',
+                'End'
+            ]
+        )
+
+        for booking in Booking.objects.filter(start__gte=start, start__lte=end):
+            filtered = False
+            booking_filter = [279]
+            user_filter = ["ParkerBerberian", "ssmith", "ahassick", "sbergeron", "jhodgdon", "rhodgdon", "aburch", "jspewock"]
+            user = booking.owner.username if booking.owner.username is not None else "None"
+
+            for b in booking_filter:
+                if b == booking.id:
+                    filtered = True
+
+            for u in user_filter:
+                if u == user:
+                    filtered = True
+            # trims time delta to the the specified year(s) if between years
+            usage_days = ((end if booking.end > end else booking.end) - (start if booking.start < start else booking.start)).days
+            collaborators = []
+
+            for c in booking.collaborators.all():
+                collaborators.append(c.username)
+
+            if (not filtered):
+                cumulative_data.account(booking, usage_days)
+                file_writer.writerow([
+                    str(booking.id),
+                    str(booking.project),
+                    str(booking.purpose),
+                    str(booking.owner.username),
+                    ','.join(collaborators),
+                    str(booking.ext_count),
+                    str(usage_days),
+                    str(booking.start),
+                    str(booking.end)
+                ])
+        cumulative_data.write_cumulative()
+
+
 def map_cntt_interfaces(labid: str):
     """
     Use this during cntt migrations, call it with a host labid and it will change profiles for this host
@@ -351,6 +444,125 @@ def print_dict_pretty(a_dict):
     print(json.dumps(a_dict, sort_keys=True, indent=4))
 
 
+def import_host(filenames):
+    """
+    Imports host from an array of converted inspection files and if needed creates a new profile for the host.
+    NOTE: CONVERT INSPECTION FILES USING convert_inspect_results(["file", "file"])
+    (original file names not including "-import.yaml" i.e. hpe44) AND FILL IN <NEEDED FIELDS> BEFORE THIS
+    @filenames: array of host import file names to import
+    """
+
+    for filename in filenames:
+
+        # open import file
+        file = open("dashboard/" + filename + "-import.yaml", "r")
+        data = yaml.safe_load(file)
+
+        # if a new profile is needed create one and a matching template
+        if (data["new_profile"]):
+            add_profile(data)
+            print("Profile: " + data["name"] + " created!")
+            make_default_template(
+                ResourceProfile.objects.get(name=data["name"]),
+                Image.objects.get(lab_id=data["image"]).id,
+                None,
+                None,
+                False,
+                False,
+                data["owner"],
+                "unh_iol",
+                True,
+                False,
+                data["temp_desc"]
+            )
+
+            print(" Template: " + data["temp_name"] + " created!")
+
+        # add the server
+        add_server(
+            ResourceProfile.objects.get(name=data["name"]),
+            data["hostname"],
+            data["interfaces"],
+            data["lab"],
+            data["vendor"],
+            data["model"]
+        )
+
+        print(data["hostname"] + " imported!")
+
+
+def convert_inspect_results(files):
+    """
+    Converts an array of inspection result files into templates (filename-import.yaml) to be filled out for importing the servers into the dashboard
+    @files an array of file names (not including the file type. i.e hpe44). Default: []
+    """
+    for filename in files:
+        # open host inspect file
+        file = open("dashboard/" + filename + ".yaml")
+        output = open("dashboard/" + filename + "-import.yaml", "w")
+        data = json.load(file)
+
+        # gather data about disks
+        disk_data = {}
+        for i in data["disk"]:
+
+            # don't include loops in disks
+            if "loop" not in i:
+                disk_data[i["name"]] = {
+                    "capacity": i["size"][:-3],
+                    "media_type": "<\"SSD\" or \"HDD\">",
+                    "interface": "<\"sata\", \"sas\", \"ssd\", \"nvme\", \"scsi\", or \"iscsi\">",
+                }
+
+        # gather interface data
+        interface_data = {}
+        for i in data["interfaces"]:
+            interface_data[data["interfaces"][i]["name"]] = {
+                "speed": data["interfaces"][i]["speed"],
+                "nic_type": "<\"onboard\" or \"pcie\">",
+                "order": "<order in switch>",
+                "mac_address": data["interfaces"][i]["mac"],
+                "bus_addr": data["interfaces"][i]["busaddr"],
+            }
+
+        # gather cpu data
+        cpu_data = {
+            "cores": data["cpu"]["cores"],
+            "architecture": data["cpu"]["arch"],
+            "cpus": data["cpu"]["cpus"],
+            "cflags": "<cflags string>",
+        }
+
+        # gather ram data
+        ram_data = {
+            "amount": data["memory"][:-1],
+            "channels": "<int of ram channels used>",
+        }
+
+        # assemble data for host import file
+        import_data = {
+            "new_profile": "<True or False> (Set to True to create a new profile for the host's type)",
+            "name": "<profile name> (Used to set the profile of a host and for creating a new profile)",
+            "description": "<profile description>",
+            "labs": "<labs using profile>",
+            "temp_name": "<Template name>",
+            "temp_desc": "<template description>",
+            "image": "<image lab_id>",
+            "owner": "<template owner>",
+            "hostname": data["hostname"],
+            "lab": "<lab server is in> (i.e. \"unh_iol\")",
+            "disks": disk_data,
+            "interfaces": interface_data,
+            "cpus": cpu_data,
+            "ram": ram_data,
+            "vendor": "<host vendor>",
+            "model": "<host model>",
+        }
+
+        # export data as yaml
+        yaml.dump(import_data, output)
+
+
 def add_profile(data):
     """
     Used for adding a host profile to the dashboard
index f8eabe4..89a26db 100644 (file)
@@ -1,8 +1,97 @@
 {
   "name": "laas",
   "version": "1.0.0",
-  "lockfileVersion": 1,
+  "lockfileVersion": 2,
   "requires": true,
+  "packages": {
+    "": {
+      "name": "laas",
+      "version": "1.0.0",
+      "license": "Apache-2.0",
+      "dependencies": {
+        "@fortawesome/fontawesome-free": "^5.12.0",
+        "bootstrap": "^4.4.1",
+        "datatables.net-bs4": "^1.10.20",
+        "datatables.net-responsive-bs4": "^2.2.3",
+        "jquery": "^3.4.1",
+        "mxgraph": "^4.0.6",
+        "plotly.js-dist": "^1.51.3",
+        "popper.js": "^1.16.0"
+      }
+    },
+    "node_modules/@fortawesome/fontawesome-free": {
+      "version": "5.12.0",
+      "resolved": "https://registry.npmjs.org/@fortawesome/fontawesome-free/-/fontawesome-free-5.12.0.tgz",
+      "integrity": "sha512-vKDJUuE2GAdBERaQWmmtsciAMzjwNrROXA5KTGSZvayAsmuTGjam5z6QNqNPCwDfVljLWuov1nEC3mEQf/n6fQ==",
+      "engines": {
+        "node": ">=6"
+      }
+    },
+    "node_modules/bootstrap": {
+      "version": "4.4.1",
+      "resolved": "https://registry.npmjs.org/bootstrap/-/bootstrap-4.4.1.tgz",
+      "integrity": "sha512-tbx5cHubwE6e2ZG7nqM3g/FZ5PQEDMWmMGNrCUBVRPHXTJaH7CBDdsLeu3eCh3B1tzAxTnAbtmrzvWEvT2NNEA==",
+      "engines": {
+        "node": ">=6"
+      }
+    },
+    "node_modules/datatables.net": {
+      "version": "1.10.20",
+      "resolved": "https://registry.npmjs.org/datatables.net/-/datatables.net-1.10.20.tgz",
+      "integrity": "sha512-4E4S7tTU607N3h0fZPkGmAtr9mwy462u+VJ6gxYZ8MxcRIjZqHy3Dv1GNry7i3zQCktTdWbULVKBbkAJkuHEnQ==",
+      "dependencies": {
+        "jquery": "3.4.1"
+      }
+    },
+    "node_modules/datatables.net-bs4": {
+      "version": "1.10.20",
+      "resolved": "https://registry.npmjs.org/datatables.net-bs4/-/datatables.net-bs4-1.10.20.tgz",
+      "integrity": "sha512-kQmMUMsHMOlAW96ztdoFqjSbLnlGZQ63iIM82kHbmldsfYdzuyhbb4hTx6YNBi481WCO3iPSvI6YodNec46ZAw==",
+      "dependencies": {
+        "datatables.net": "1.10.20",
+        "jquery": "3.4.1"
+      }
+    },
+    "node_modules/datatables.net-responsive": {
+      "version": "2.2.3",
+      "resolved": "https://registry.npmjs.org/datatables.net-responsive/-/datatables.net-responsive-2.2.3.tgz",
+      "integrity": "sha512-8D6VtZcyuH3FG0Hn5A4LPZQEOX3+HrRFM7HjpmsQc/nQDBbdeBLkJX4Sh/o1nzFTSneuT1Wh/lYZHVPpjcN+Sw==",
+      "dependencies": {
+        "datatables.net": "1.10.20",
+        "jquery": "3.4.1"
+      }
+    },
+    "node_modules/datatables.net-responsive-bs4": {
+      "version": "2.2.3",
+      "resolved": "https://registry.npmjs.org/datatables.net-responsive-bs4/-/datatables.net-responsive-bs4-2.2.3.tgz",
+      "integrity": "sha512-SQaWI0uLuPcaiBBin9zX+MuQfTSIkK1bYxbXqUV6NLkHCVa6PMQK7Rvftj0ywG4R7uOtjbzY8nSVqxEKvQI0Vg==",
+      "dependencies": {
+        "datatables.net-bs4": "1.10.20",
+        "datatables.net-responsive": "2.2.3",
+        "jquery": "3.4.1"
+      }
+    },
+    "node_modules/jquery": {
+      "version": "3.4.1",
+      "resolved": "https://registry.npmjs.org/jquery/-/jquery-3.4.1.tgz",
+      "integrity": "sha512-36+AdBzCL+y6qjw5Tx7HgzeGCzC81MDDgaUP8ld2zhx58HdqXGoBd+tHdrBMiyjGQs0Hxs/MLZTu/eHNJJuWPw=="
+    },
+    "node_modules/mxgraph": {
+      "version": "4.0.6",
+      "resolved": "https://registry.npmjs.org/mxgraph/-/mxgraph-4.0.6.tgz",
+      "integrity": "sha512-5XZXeAkA4k6n4BS05Fxd2cNhMw+3dnlRqAaLtsuXdT0g8BvvEa1VT4jjuGtUW4QTt38Q+I2Dr/3EWiAaGRfAXw=="
+    },
+    "node_modules/plotly.js-dist": {
+      "version": "1.51.3",
+      "resolved": "https://registry.npmjs.org/plotly.js-dist/-/plotly.js-dist-1.51.3.tgz",
+      "integrity": "sha512-Bxz0XBg963gpnbt7FVPEhYvT33JsaKa0hEozXBnQZkiKtsiM2M1lZN6tkEHmq6o1N2K6qJXFtdzCXbZ/hLGV0Q=="
+    },
+    "node_modules/popper.js": {
+      "version": "1.16.0",
+      "resolved": "https://registry.npmjs.org/popper.js/-/popper.js-1.16.0.tgz",
+      "integrity": "sha512-+G+EkOPoE5S/zChTpmBSSDYmhXJ5PsW8eMhH8cP/CQHMFPBG/kC9Y5IIw6qNYgdJ+/COf0ddY2li28iHaZRSjw=="
+    }
+  },
   "dependencies": {
     "@fortawesome/fontawesome-free": {
       "version": "5.12.0",