Add ONAP scenario 99/67199/4 master
authorHarry Huang <huangxiangyu5@huawei.com>
Thu, 28 Feb 2019 06:16:34 +0000 (14:16 +0800)
committerHarry Huang <huangxiangyu5@huawei.com>
Thu, 11 Apr 2019 06:49:48 +0000 (14:49 +0800)
JIRA: -

1. Add ONAP plugin
2. Add ONAP scenario

Change-Id: I80046811207f5786049fb9829e3d9f33ea31978a
Signed-off-by: Harry Huang <huangxiangyu5@huawei.com>
deploy/conf/vm_environment/k8-nosdn-onap-noha.yml [new file with mode: 0644]
deploy/prepare.sh
plugins/onap/roles/tasks/Ubuntu.yml [new file with mode: 0644]
plugins/onap/roles/tasks/main.yml [new file with mode: 0644]
plugins/onap/roles/templates/exports.j2 [new file with mode: 0644]
plugins/onap/roles/vars/main.yml [new file with mode: 0644]

diff --git a/deploy/conf/vm_environment/k8-nosdn-onap-noha.yml b/deploy/conf/vm_environment/k8-nosdn-onap-noha.yml
new file mode 100644 (file)
index 0000000..d7b85a8
--- /dev/null
@@ -0,0 +1,46 @@
+##############################################################################
+# Copyright (c) 2016 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+---
+TYPE: virtual
+FLAVOR: cluster
+
+plugins:
+  - onap: "Enable"
+
+hosts:
+  - name: host1
+    roles:
+      - kube_master
+      - etcd
+      - ha
+
+  - name: host2
+    roles:
+      - kube_node
+
+  - name: host3
+    roles:
+      - kube_node
+
+  - name: host4
+    roles:
+      - kube_node
+
+  - name: host5
+    roles:
+      - kube_node
+
+  - name: host6
+    roles:
+      - kube_node
+
+  - name: host7
+    roles:
+      - kube_node
index 59d7682..f11ae74 100755 (executable)
@@ -53,9 +53,9 @@ function prepare_env() {
     sudo sed -i 's/^.\?tcp_port.*/tcp_port = "16509"/g' /etc/libvirt/libvirtd.conf
     sudo sed -i 's/^.\?listen_addr.*/listen_addr = "0.0.0.0"/g' /etc/libvirt/libvirtd.conf
     sudo sed -i 's/^.\?auth_tcp.*/auth_tcp = "none"/g' /etc/libvirt/libvirtd.conf
     sudo sed -i 's/^.\?tcp_port.*/tcp_port = "16509"/g' /etc/libvirt/libvirtd.conf
     sudo sed -i 's/^.\?listen_addr.*/listen_addr = "0.0.0.0"/g' /etc/libvirt/libvirtd.conf
     sudo sed -i 's/^.\?auth_tcp.*/auth_tcp = "none"/g' /etc/libvirt/libvirtd.conf
-    sudo sed -i 's/^.\?libvirtd_opts.*/libvirtd_opts = "-l"/g' /etc/default/libvirt-bin
-
+    sudo sed -i 's/^.\?libvirtd_opts.*/libvirtd_opts="-d -l"/g' /etc/default/libvirt-bin
     sudo service libvirt-bin restart
     sudo service libvirt-bin restart
+
     if sudo service openvswitch-switch status|grep stop; then
         sudo service openvswitch-switch start
     fi
     if sudo service openvswitch-switch status|grep stop; then
         sudo service openvswitch-switch start
     fi
diff --git a/plugins/onap/roles/tasks/Ubuntu.yml b/plugins/onap/roles/tasks/Ubuntu.yml
new file mode 100644 (file)
index 0000000..a51e5f1
--- /dev/null
@@ -0,0 +1,117 @@
+---
+- name: download helm
+  get_url:
+    url: "{{ helm_url }}"
+    dest: /tmp/helm.tar.gz
+  when: inventory_hostname == groups['kube_master'][0]
+  run_once: true
+
+- name: prepare helm
+  shell:
+    tar -zxf /tmp/helm.tar.gz -C /tmp;
+    mv /tmp/linux-amd64/helm /usr/local/bin/helm
+  when: inventory_hostname == groups['kube_master'][0]
+  run_once: true
+
+- name: install tiller
+  shell: >
+    kubectl create serviceaccount --namespace kube-system tiller;
+    kubectl create clusterrolebinding tiller-cluster-rule
+        --clusterrole=cluster-admin
+        --serviceaccount=kube-system:tiller;
+    helm init --service-account tiller
+  when: inventory_hostname == groups['kube_master'][0]
+  run_once: true
+
+- name: git clone oom
+  git:
+    repo: "{{ oom_repo }}"
+    dest: "{{ oom_dest }}"
+    version: "{{ oom_version }}"
+  when: inventory_hostname == groups['kube_master'][0]
+  run_once: true
+
+- name: prepare local repo
+  shell:
+    nohup /bin/sh -c "helm serve &"
+    while true; do curl -s 127.0.0.1:8879 > /dev/null; if [ $? -eq 0 ]; then break; fi; done
+    helm repo add local http://127.0.0.1:8879
+  when: inventory_hostname == groups['kube_master'][0]
+  run_once: true
+
+- name: add helm plugin
+  shell:
+    cp -rf "{{ oom_dest }}/kubernetes/helm/plugins" ~/.helm/
+  when: inventory_hostname == groups['kube_master'][0]
+  run_once: true
+
+- name: make
+  shell:
+    make all
+  args:
+    chdir: "{{ oom_dest }}/kubernetes"
+  when: inventory_hostname == groups['kube_master'][0]
+
+- name: install nfs master
+  apt:
+    pkg: "nfs-kernel-server"
+    state: "present"
+    update_cache: "yes"
+  when: inventory_hostname == groups['kube_master'][0]
+
+- name: create /dockerdata-nfs
+  file:
+    path: /dockerdata-nfs
+    owner: nobody
+    group: nogroup
+    state: directory
+    mode: 0777
+  when: inventory_hostname == groups['kube_master'][0]
+
+- name: install nfs slave
+  apt:
+    pkg: "nfs-common"
+    state: "present"
+    update_cache: "yes"
+  when: inventory_hostname != groups['kube_master'][0]
+
+- name: create /dockerdata-nfs
+  file:
+    path: /dockerdata-nfs
+    state: directory
+  when: inventory_hostname != groups['kube_master'][0]
+
+- name: render /etc/exports
+  template:
+    src: exports.j2
+    dest: /etc/exports
+  when: inventory_hostname == groups['kube_master'][0]
+
+- name: restart nfs service
+  shell:
+    exportfs -a;
+    systemctl restart nfs-kernel-server
+  when: inventory_hostname == groups['kube_master'][0]
+
+- name: register master hostname
+  debug:
+    msg: "{{ ip_settings[groups['kube_master'][0]]['external']['ip'] }}"
+  register: master_ip
+
+- name:
+  shell:
+    mount {{ master_ip.msg }}:/dockerdata-nfs /dockerdata-nfs/
+  when: inventory_hostname != groups['kube_master'][0]
+
+# yamllint disable rule:line-length
+- name: add mount info
+  lineinfile:
+    path: /etc/fstab
+    line: "{{ master_ip.msg }}:/dockerdata-nfs /dockerdata-nfs  nfs auto,nofail,noatime,nolock,intr,tcp,actimeo=1800 0 0"
+  when: inventory_hostname != groups['kube_master'][0]
+# yamllint enable rule:line-length
+
+- name: deploy onap
+  shell:
+    helm deploy dev local/onap --namespace onap
+  when: inventory_hostname == groups['kube_master'][0]
diff --git a/plugins/onap/roles/tasks/main.yml b/plugins/onap/roles/tasks/main.yml
new file mode 100644 (file)
index 0000000..c9e8042
--- /dev/null
@@ -0,0 +1,11 @@
+#############################################################################
+# Copyright (c) 2019 Intel Corp.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+#############################################################################
+---
+- include: "{{ ansible_distribution }}.yml"
+  when: onap is defined and onap == "Enable"
diff --git a/plugins/onap/roles/templates/exports.j2 b/plugins/onap/roles/templates/exports.j2
new file mode 100644 (file)
index 0000000..8f5a3f6
--- /dev/null
@@ -0,0 +1 @@
+/dockerdata-nfs{% for host in groups.all %}{% if host != groups.kube_master[0] %} {{ ip_settings[host]['external']['ip'] }}(rw,sync,no_root_squash,no_subtree_check){% endif %}{% endfor %}
diff --git a/plugins/onap/roles/vars/main.yml b/plugins/onap/roles/vars/main.yml
new file mode 100644 (file)
index 0000000..83b591a
--- /dev/null
@@ -0,0 +1,13 @@
+#############################################################################
+# Copyright (c) 2019 HUAWEI TECHNOLOGIES CO.,LTD and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+helm_url: https://storage.googleapis.com/kubernetes-helm/helm-v2.9.1-linux-amd64.tar.gz
+oom_repo: https://gerrit.onap.org/r/oom
+oom_dest: /home/oom
+oom_version: casablanca