adding ceph support for Kubernetes. 87/46487/10
authorNarinder Gupta <narinder.gupta@canonical.com>
Tue, 31 Oct 2017 15:53:52 +0000 (10:53 -0500)
committerNarinder Gupta <narinder.gupta@canonical.com>
Wed, 1 Nov 2017 03:07:28 +0000 (22:07 -0500)
Change-Id: If7cf0add214da153daf39967fac15ba027cc3885
Signed-off-by: Narinder Gupta <narinder.gupta@canonical.com>
ci/02-deploybundle.sh
ci/03-maasdeploy.sh
ci/config_tpl/juju2/bundlek8_tpl/bundle.yaml
ci/config_tpl/juju2/bundlek8_tpl/ceph.yaml [new file with mode: 0644]
ci/config_tpl/juju2/bundlek8_tpl/relations.yaml
ci/config_tpl/juju2/bundlek8_tpl/spaces.yaml
ci/default_deployment_config.yaml
ci/deploy.sh
ci/genK8Bundle.py
ci/kubernetes/fetch-charms.sh
labconfig/enea/virtual/pod1/labconfig.yaml [new file with mode: 0644]

index d86d1e0..8decae1 100755 (executable)
@@ -63,8 +63,8 @@ if [[ "$opnfvmodel" = "openstack" ]]; then
     if [ -e ./deployconfig.yaml ]; then
        extport=`grep "ext-port" deployconfig.yaml | cut -d ' ' -f 4 | sed -e 's/ //' | tr ',' ' '`
        datanet=`grep "dataNetwork" deployconfig.yaml | cut -d ' ' -f 4 | sed -e 's/ //'`
-       admnet=`grep "admNetwork" deployconfig.yaml | cut -d ' ' -f 4 | sed -e 's/ //'`
        cephdisk=`grep "ceph-disk" deployconfig.yaml | cut -d ':' -f 2 | sed -e 's/ //'`
+       admnet=`grep "admNetwork" deployconfig.yaml | cut -d ' ' -f 4 | sed -e 's/ //'`
        osdomname=`grep "os-domain-name" deployconfig.yaml | cut -d ':' -f 2 | sed -e 's/ //'`
     fi
 
@@ -91,6 +91,8 @@ if [[ "$opnfvmodel" = "openstack" ]]; then
     else
         sed -i "s/cpu_pin_set: all/cpu_pin_set: 1/g" default_deployment_config.yaml
     fi
+else
+    cephdisk=`grep "ceph-disk" deployconfig.yaml | cut -d ':' -f 2 | sed -e 's/ //'`
 fi
 
 case "$opnfvlab" in
index 992be83..7f587aa 100755 (executable)
@@ -46,18 +46,26 @@ NODE_ARC="$NODE_ARCHES/generic"
 # Install the packages needed
 echo_info "Installing and upgrading required packages"
 #sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 5EDB1B62EC4926EA
-sudo apt-get update -y
+sudo apt-get update -y || true
 sudo apt-get install software-properties-common -y
 sudo apt-add-repository ppa:juju/stable -y
 sudo apt-add-repository ppa:maas/stable -y
-sudo apt-add-repository cloud-archive:ocata -y
-sudo apt-get update -y
+sudo apt-add-repository cloud-archive:pike -y
+if [ "aarch64" == "$NODE_ARCTYPE" ]; then
+sudo add-apt-repository ppa:ubuntu-cloud-archive/pike-staging -y
+fi
+sudo apt-get update -y || true
 #sudo apt-get dist-upgrade -y
+
 sudo apt-get install bridge-utils openssh-server bzr git virtinst qemu-kvm libvirt-bin \
              maas maas-region-controller juju python-pip python-psutil python-openstackclient \
              python-congressclient gsutil charm-tools pastebinit python-jinja2 sshpass \
              openssh-server vlan ipmitool jq expect snap -y
 
+if [ "aarch64" == "$NODE_ARCTYPE" ]; then
+    sudo apt-get install qemu qemu-efi qemu-system-aarch64 -y
+fi
+
 sudo -H pip install --upgrade pip
 
 
@@ -226,9 +234,7 @@ sudo cat $HOME/.ssh/id_rsa.pub >> $HOME/.ssh/authorized_keys
 #
 configuremaas(){
     #reconfigure maas with correct MAAS address.
-    #Below code is needed as MAAS have issue in commissioning without restart.
-    #sudo ./maas-reconfigure-region.sh $MAAS_IP
-    sleep 30
+
     sudo maas-rack config --region-url http://$MAAS_IP:5240/MAAS
 
     sudo maas createadmin --username=ubuntu --email=ubuntu@ubuntu.com --password=ubuntu || true
@@ -371,6 +377,8 @@ addnodes(){
     API_KEY=`sudo maas-region apikey --username=ubuntu`
     maas login $PROFILE $API_SERVERMAAS $API_KEY
 
+    maas $PROFILE maas set-config name=default_min_hwe_kernel value=hwe-16.04-edge || true
+
     # make sure there is no machine entry in maas
     for m in $(maas $PROFILE machines read | jq -r '.[].system_id')
     do
@@ -427,8 +435,8 @@ addnodes(){
 
     virt-install --connect $VIRSHURL --name bootstrap --ram 4098 --cpu $CPU_MODEL --vcpus 2 \
                  --disk size=20,format=qcow2,bus=virtio,cache=directsync,io=native,pool=default \
-                 $netw --boot network,hd,menu=off --noautoconsole \
-                 --print-xml | tee bootstrap
+                 $netw --boot network,hd,menu=off --video virtio --noautoconsole --autostart \
+                 --accelerate --print-xml | tee bootstrap
 
     if [ "$virtinstall" -eq 1 ]; then
         bootstrapmac=`grep  "mac address" bootstrap | head -1 | cut -d '"' -f 2`
@@ -440,7 +448,6 @@ addnodes(){
         done
     fi
     virsh -c $VIRSHURL define --file bootstrap
-    virsh -c $VIRSHURL autostart bootstrap
 
     rm -f bootstrap
 
@@ -464,11 +471,11 @@ addnodes(){
 
             virt-install --connect $VIRSHURL --name $NODE_NAME --ram 8192 --cpu $CPU_MODEL --vcpus 4 \
                      --disk size=120,format=qcow2,bus=virtio,cache=directsync,io=native,pool=default \
-                     $netw $netw --boot network,hd,menu=off --noautoconsole --print-xml | tee $NODE_NAME
+                     $netw $netw --boot network,hd,menu=off --video virtio --noautoconsole --autostart \
+                     --accelerate --print-xml | tee $NODE_NAME
 
             nodemac=`grep  "mac address" $NODE_NAME | head -1 | cut -d '"' -f 2`
             virsh -c $VIRSHURL define --file $NODE_NAME
-            virsh -c $VIRSHURL autostart $NODE_NAME
 
             rm -f $NODE_NAME
             maas $PROFILE machines create autodetect_nodegroup='yes' name=$NODE_NAME \
@@ -542,7 +549,7 @@ setupspacenetwork
 
 #just make sure rack controller has been synced and import only
 # just whether images have been imported or not.
-sudo ./maas-reconfigure-region.sh $MAAS_IP
+#sudo ./maas-reconfigure-region.sh $MAAS_IP
 sleep 120
 
 # Let's add the nodes now. Currently works only for virtual deployment.
index b4ed14a..ef5bbe3 100644 (file)
@@ -26,7 +26,9 @@
 {% else %}
 {% include 'flannel.yaml' %}
 {% endif %}
-
+{% if k8.feature.storage == 'ceph' %}
+{% include 'ceph.yaml' %}
+{% endif %}
 
   relations:
     - [ 'ntp:juju-info', 'nodes:juju-info' ]
diff --git a/ci/config_tpl/juju2/bundlek8_tpl/ceph.yaml b/ci/config_tpl/juju2/bundlek8_tpl/ceph.yaml
new file mode 100644 (file)
index 0000000..5614903
--- /dev/null
@@ -0,0 +1,35 @@
+
+    ceph-mon:
+      charm: "./{{ ubuntu.release }}/ceph-mon"
+      num_units: {{ unit_ceph_qty() }}
+{% if os.service.bindings %}
+      bindings:
+        "": *oam-space
+        public: *ceph-public-space
+        cluster: *ceph-cluster-space
+{% endif %}
+      options:
+        expected-osd-count: {{ unit_ceph_qty() }}
+      to:
+{% for unit_id in to_select(unit_ceph_qty()) %}
+        - "lxd:nodes/{{ unit_id }}"
+{% endfor %}
+
+    ceph-osd:
+      charm: "./{{ ubuntu.release }}/ceph-osd"
+      num_units: {{ opnfv.units }}
+{% if os.service.bindings %}
+      bindings:
+        "": *oam-space
+        public: *ceph-public-space
+        cluster: *ceph-cluster-space
+{% endif %}
+      options:
+        osd-devices: *osd-devices
+        osd-journal: *osd-journal
+        osd-reformat: 'yes'
+      to:
+ {% for unit_id in range(0, opnfv.units) %}
+         - "nodes/{{ unit_id }}"
+ {% endfor %}
+
index 4401820..42c9e13 100644 (file)
@@ -21,4 +21,7 @@
     - [ 'kubernetes-worker:kube-api-endpoint', 'kubeapi-load-balancer:website' ]
     - [ 'kubeapi-load-balancer:certificates', 'easyrsa:client' ]
 {% endif %}
-
+{% if k8.feature.storage == 'ceph' %}
+    - [ 'ceph-osd:mon', 'ceph-mon:osd' ]
+    - [ 'ceph-mon:admin', 'kubernetes-master:ceph-storage' ]
+{% endif %}
index 17dbd7d..01afb34 100644 (file)
@@ -1,5 +1,24 @@
-
-
     # OAM - Operations, Administration and Maintenance
     oam-space:           &oam-space           internal-api
 
+    # CEPH configuration
+    # CEPH access network
+{% if opnfv.spaces_dict.storageaccess is defined %}
+    ceph-public-space:   &ceph-public-space  storage-access-space
+    ceph-access-constr:  &ceph-access-constr  spaces=storage-access-space
+{% else %}
+    ceph-public-space:   &ceph-public-space  internal-api
+    ceph-access-constr:  &ceph-access-constr  spaces=internal-api
+{% endif %}
+
+    # CEPH replication network
+{% if opnfv.spaces_dict.storage is defined %}
+    ceph-cluster-space:  &ceph-cluster-space  storage-cluster
+{% else %}
+    ceph-cluster-space:  &ceph-cluster-space  internal-api
+{% endif %}
+
+    # CEPH OSD and journal devices; temporary workaround for #1674148
+    osd-devices:         &osd-devices         {{ opnfv.storage_dict.ceph.disk }}
+    osd-journal:         &osd-journal
+
index 0ea2a43..0a0f055 100644 (file)
@@ -1,10 +1,10 @@
 ubuntu:
     release: xenial
 os:
-    release: ocata
+    release: pike
     git_repo:
         origin_git: False
-        branch: ocata
+        branch: pike
     hyperconverged: True
     ha:
         mode: ha
@@ -39,5 +39,6 @@ os:
 k8:
     feature:
         loadbalancer: False
+        storage: none
     network:
         controller: nosdn
index 560807f..497d9ed 100755 (executable)
@@ -9,7 +9,7 @@ source common/tools.sh
 
 opnfvsdn=nosdn
 opnfvtype=noha
-openstack=ocata
+openstack=pike
 opnfvlab=default
 opnfvlabfile=
 opnfvrel=e
@@ -23,7 +23,7 @@ maasinstall=0
 usage() { echo "Usage: $0
     [-s|--sdn <nosdn|odl|opencontrail>]
     [-t|--type <noha|ha|tip>]
-    [-o|--openstack <ocata>]
+    [-o|--openstack <ocata|pike>]
     [-l|--lab <default|custom>]
     [-f|--feature <ipv6,dpdk,lxd,dvr,openbaton,multus>]
     [-d|--distro <xenial>]
index f0198d7..688d18e 100644 (file)
@@ -161,6 +161,8 @@ if 'dpdk' in features:
     config['os']['network']['dpdk'] = True
 if 'lb' in features:
     config['k8']['feature']['loadbalancer'] = True
+if 'ceph' in features:
+    config['k8']['feature']['storage'] = 'ceph'
 
 # change ha mode
 config['k8']['network']['controller'] = sdn
index 255f1eb..fd784ba 100755 (executable)
@@ -11,4 +11,6 @@ function build {
 
 # openstack
 bzr branch lp:~narindergupta/opnfv/ntp $distro/ntp
+git clone -b stable/17.08 https://github.com/openstack/charm-ceph-mon.git $distro/ceph-mon
+git clone -b stable/17.08 https://github.com/openstack/charm-ceph-osd.git $distro/ceph-osd
 
diff --git a/labconfig/enea/virtual/pod1/labconfig.yaml b/labconfig/enea/virtual/pod1/labconfig.yaml
new file mode 100644 (file)
index 0000000..cc64acc
--- /dev/null
@@ -0,0 +1,54 @@
+lab:
+  location: virtual
+  racks:
+  - rack: pod1
+    nodes:
+    - name: rack-vir-m1
+      architecture: x86_64
+      roles: [network,control]
+      nics:
+      - ifname: ens3
+        spaces: [admin]
+      - ifname: ens4
+        spaces: [floating]
+    - name: rack-vir-m2
+      architecture: x86_64
+      roles: [compute,control,storage]
+      nics:
+      - ifname: ens3
+        spaces: [admin]
+      - ifname: ens4
+        spaces: [floating]
+    - name: rack-vir-m3
+      architecture: x86_64
+      roles: [compute,control,storage]
+      nics:
+      - ifname: ens3
+        spaces: [admin]
+      - ifname: ens4
+        spaces: [floating]
+    floating-ip-range: 192.168.122.31,192.168.122.49,192.168.122.1,192.168.122.0/24
+    ext-port: "enp2s0"
+    dns: 8.8.8.8
+opnfv:
+  release: d
+  distro: xenial
+  type: noha
+  openstack: newton
+  sdncontroller:
+  - type: nosdn
+  storage:
+  - type: ceph
+    disk: /srv
+  feature: odl_l2
+  spaces:
+  - type: admin
+    bridge: virbr0
+    cidr: 192.168.122.0/24
+    gateway: 192.168.122.1
+    vlan:
+  - type: floating
+    bridge:
+    cidr:
+    gateway:
+    vlan: