support virtlet in container4nfv 61/45161/1
authorGuo Ruijing <ruijing.guo@intel.com>
Sun, 15 Oct 2017 18:21:22 +0000 (11:21 -0700)
committerGuo Ruijing <ruijing.guo@intel.com>
Sun, 15 Oct 2017 18:30:37 +0000 (11:30 -0700)
Change-Id: I913149ecf374a0a8f0d0bbb65aff52854401bbd6
Signed-off-by: Guo Ruijing <ruijing.guo@intel.com>
13 files changed:
src/vagrant/kubeadm_virtlet/Vagrantfile [new file with mode: 0644]
src/vagrant/kubeadm_virtlet/examples/cirros-vm.yaml [new file with mode: 0644]
src/vagrant/kubeadm_virtlet/examples/images.yaml [new file with mode: 0644]
src/vagrant/kubeadm_virtlet/examples/nginx-app.sh [new file with mode: 0755]
src/vagrant/kubeadm_virtlet/examples/nginx-app.yaml [new file with mode: 0644]
src/vagrant/kubeadm_virtlet/examples/virtlet-ds.yaml [new file with mode: 0644]
src/vagrant/kubeadm_virtlet/examples/virtlet.sh [new file with mode: 0755]
src/vagrant/kubeadm_virtlet/host_setup.sh [new file with mode: 0644]
src/vagrant/kubeadm_virtlet/master_setup.sh [new file with mode: 0644]
src/vagrant/kubeadm_virtlet/virtlet/etc/systemd/system/criproxy.service [new file with mode: 0644]
src/vagrant/kubeadm_virtlet/virtlet/etc/systemd/system/dockershim.service [new file with mode: 0644]
src/vagrant/kubeadm_virtlet/virtlet/etc/systemd/system/kubelet.service.d/20-criproxy.conf [new file with mode: 0644]
src/vagrant/kubeadm_virtlet/worker_setup.sh [new file with mode: 0644]

diff --git a/src/vagrant/kubeadm_virtlet/Vagrantfile b/src/vagrant/kubeadm_virtlet/Vagrantfile
new file mode 100644 (file)
index 0000000..aa3792f
--- /dev/null
@@ -0,0 +1,33 @@
+$num_workers=3
+
+Vagrant.require_version ">= 1.8.6"
+Vagrant.configure("2") do |config|
+
+  config.vm.box = "bento/ubuntu-16.04"
+  config.vm.provision "shell", path: "host_setup.sh", privileged: false
+  config.vm.provider :virtualbox do |vb|
+    vb.customize ["modifyvm", :id, "--memory", 4096]
+    vb.customize ["modifyvm", :id, "--cpus", 4]
+    vb.customize ["modifyvm", :id, "--nicpromisc3", "allow-all"]
+    vb.customize "post-boot",["controlvm", :id, "setlinkstate1", "on"]
+  end
+
+  config.vm.provision "shell", path: "host_setup.sh", privileged: false
+
+  config.vm.define "master" do |config|
+    config.vm.hostname = "master"
+    config.vm.provision "shell", path: "master_setup.sh", privileged: false
+    config.vm.network :private_network, ip: "10.96.0.10"
+    config.vm.network :private_network, ip: "10.244.0.10"
+  end
+
+  (1 .. $num_workers).each do |i|
+    config.vm.define vm_name = "worker%d" % [i] do |config|
+      config.vm.hostname = vm_name
+      config.vm.provision "shell", path: "worker_setup.sh", privileged: false
+      config.vm.network :private_network, ip: "10.96.0.#{i+20}"
+      config.vm.network :private_network, ip: "10.244.0.#{i+20}"
+    end
+  end
+
+end
diff --git a/src/vagrant/kubeadm_virtlet/examples/cirros-vm.yaml b/src/vagrant/kubeadm_virtlet/examples/cirros-vm.yaml
new file mode 100644 (file)
index 0000000..8beb03f
--- /dev/null
@@ -0,0 +1,57 @@
+apiVersion: v1
+kind: Pod
+metadata:
+  name: cirros-vm
+  annotations:
+    # This tells CRI Proxy that this pod belongs to Virtlet runtime
+    kubernetes.io/target-runtime: virtlet
+    # An optional annotation specifying the count of virtual CPUs.
+    # Note that annotation values must always be strings,
+    # thus numeric values need to be quoted.
+    # Defaults to "1".
+    VirtletVCPUCount: "1"
+    # CirrOS doesn't load nocloud data from SCSI CD-ROM for some reason
+    VirtletDiskDriver: virtio
+    # inject ssh keys via cloud-init
+    VirtletSSHKeys: |
+      ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCaJEcFDXEK2ZbX0ZLS1EIYFZRbDAcRfuVjpstSc0De8+sV1aiu+dePxdkuDRwqFtCyk6dEZkssjOkBXtri00MECLkir6FcH3kKOJtbJ6vy3uaJc9w1ERo+wyl6SkAh/+JTJkp7QRXj8oylW5E20LsbnA/dIwWzAF51PPwF7A7FtNg9DnwPqMkxFo1Th/buOMKbP5ZA1mmNNtmzbMpMfJATvVyiv3ccsSJKOiyQr6UG+j7sc/7jMVz5Xk34Vd0l8GwcB0334MchHckmqDB142h/NCWTr8oLakDNvkfC1YneAfAO41hDkUbxPtVBG5M/o7P4fxoqiHEX+ZLfRxDtHB53 me@localhost
+    # cloud-init user data
+    VirtletCloudInitUserDataScript: |
+      #!/bin/sh
+      echo "Hi there"
+spec:
+  # This nodeAffinity specification tells Kubernetes to run this
+  # pod only on the nodes that have extraRuntime=virtlet label.
+  # This label is used by Virtlet DaemonSet to select nodes
+  # that must have Virtlet runtime
+  affinity:
+    nodeAffinity:
+      requiredDuringSchedulingIgnoredDuringExecution:
+        nodeSelectorTerms:
+        - matchExpressions:
+          - key: extraRuntime
+            operator: In
+            values:
+            - virtlet
+  containers:
+  - name: cirros-vm
+    # This specifies the image to use.
+    # virtlet/ prefix is used by CRI proxy, the remaining part
+    # of the image name is prepended with https:// and used to download the image
+    image: virtlet/cirros
+    # Virtlet currently ignores image tags, but their meaning may change
+    # in future, so it’s better not to set them for VM pods. If there’s no tag
+    # provided in the image specification kubelet defaults to
+    # imagePullPolicy: Always, which means that the image is always
+    # redownloaded when the pod is created. In order to make pod creation
+    # faster and more reliable, we set imagePullPolicy to IfNotPresent here
+    # so a previously downloaded image is reused if there is one
+    # in Virtlet’s image store
+    imagePullPolicy: IfNotPresent
+    # tty and stdin required for `kubectl attach -t` to work
+    tty: true
+    stdin: true
+    resources:
+      limits:
+        # This memory limit is applied to the libvirt domain definition
+        memory: 160Mi
diff --git a/src/vagrant/kubeadm_virtlet/examples/images.yaml b/src/vagrant/kubeadm_virtlet/examples/images.yaml
new file mode 100644 (file)
index 0000000..3a84585
--- /dev/null
@@ -0,0 +1,3 @@
+translations:
+  - name: cirros
+    url: http://github.com/mirantis/virtlet/releases/download/v0.8.2/cirros.img
diff --git a/src/vagrant/kubeadm_virtlet/examples/nginx-app.sh b/src/vagrant/kubeadm_virtlet/examples/nginx-app.sh
new file mode 100755 (executable)
index 0000000..bfd0613
--- /dev/null
@@ -0,0 +1,10 @@
+#!/usr/bin/env bash
+
+kubectl create -f /vagrant/examples/nginx-app.yaml
+kubectl get nodes
+kubectl get services
+kubectl get pods
+kubectl get rc
+sleep 120
+svcip=$(kubectl get services nginx  -o json | grep clusterIP | cut -f4 -d'"')
+wget http://$svcip
diff --git a/src/vagrant/kubeadm_virtlet/examples/nginx-app.yaml b/src/vagrant/kubeadm_virtlet/examples/nginx-app.yaml
new file mode 100644 (file)
index 0000000..f80881a
--- /dev/null
@@ -0,0 +1,31 @@
+apiVersion: v1
+kind: Service
+metadata:
+  name: nginx
+  labels:
+    app: nginx
+spec:
+  type: NodePort
+  ports:
+  - port: 80
+    protocol: TCP
+    name: http
+  selector:
+    app: nginx
+---
+apiVersion: v1
+kind: ReplicationController
+metadata:
+  name: nginx
+spec:
+  replicas: 2
+  template:
+    metadata:
+      labels:
+        app: nginx
+    spec:
+      containers:
+      - name: nginx
+        image: nginx
+        ports:
+        - containerPort: 80
diff --git a/src/vagrant/kubeadm_virtlet/examples/virtlet-ds.yaml b/src/vagrant/kubeadm_virtlet/examples/virtlet-ds.yaml
new file mode 100644 (file)
index 0000000..ed037d9
--- /dev/null
@@ -0,0 +1,360 @@
+---
+apiVersion: extensions/v1beta1
+kind: DaemonSet
+metadata:
+  name: virtlet
+  namespace: kube-system
+spec:
+  template:
+    metadata:
+      name: virtlet
+      labels:
+        runtime: virtlet
+    spec:
+      hostNetwork: true
+      dnsPolicy: ClusterFirstWithHostNet
+      # hostPID is true to (1) enable VMs to survive virtlet container restart
+      # (to be checked) and (2) to enable the use of nsenter in init container
+      hostPID: true
+      # bootstrap procedure needs to create a configmap in kube-system namespace
+      serviceAccountName: virtlet
+
+      # only run Virtlet pods on the nodes with extraRuntime=virtlet label
+      affinity:
+        nodeAffinity:
+          requiredDuringSchedulingIgnoredDuringExecution:
+            nodeSelectorTerms:
+            - matchExpressions:
+              - key: extraRuntime
+                operator: In
+                values:
+                - virtlet
+
+      initContainers:
+      # The init container first copies virtlet's flexvolume driver
+      # to the default kubelet plugin dir to have it in the proper place by the
+      # time kubelet is restarted by CRI proxy bootstrap procedure.
+      # After that it checks if there's already saved kubelet config
+      # and considers that CRI proxy bootstrap is already done if it exists.
+      # If it doesn't, it drops criproxy binary into /opt/criproxy/bin
+      # if it's not already there and then starts criproxy installation.
+      # The possibility to put criproxy binary in advance into
+      # /opt/criproxy/bin may be helpful for the purpose of
+      # debugging criproxy
+      # At the end it ensures that /var/lib/libvirt/images exists on node.
+      - name: prepare-node
+        image: openretriever/virtlet
+        imagePullPolicy: IfNotPresent
+        command:
+        - /prepare-node.sh
+        volumeMounts:
+        - name: k8s-flexvolume-plugins-dir
+          mountPath: /kubelet-volume-plugins
+        - name: criproxybin
+          mountPath: /opt/criproxy/bin
+        - name: run
+          mountPath: /run
+        - name: dockersock
+          mountPath: /var/run/docker.sock
+        - name: criproxyconf
+          mountPath: /etc/criproxy
+        - name: log
+          mountPath: /hostlog
+        # for ensuring that /var/lib/libvirt/images exists on node
+        - name: var-lib
+          mountPath: /host-var-lib
+        securityContext:
+          privileged: true
+
+      containers:
+      - name: libvirt
+        image: openretriever/virtlet
+        # In case we inject local virtlet image we want to use it not officially available one
+        imagePullPolicy: IfNotPresent
+        command:
+        - /libvirt.sh
+        volumeMounts:
+        - mountPath: /sys/fs/cgroup
+          name: cgroup
+        - mountPath: /lib/modules
+          name: modules
+          readOnly: true
+        - mountPath: /boot
+          name: boot
+          readOnly: true
+        - mountPath: /run
+          name: run
+        - mountPath: /var/lib/virtlet
+          name: virtlet
+        - mountPath: /var/lib/libvirt
+          name: libvirt
+        - mountPath: /var/run/libvirt
+          name: libvirt-sockets
+        # the log dir is needed here because otherwise libvirt will produce errors
+        # like this:
+        # Unable to pre-create chardev file '/var/log/vms/afd75bbb-8e97-11e7-9561-02420ac00002/cirros-vm_0.log': No such file or directory
+        - name: vms-log
+          mountPath: /var/log/vms
+        - name: dev
+          mountPath: /dev
+        securityContext:
+          privileged: true
+        env:
+        - name: VIRTLET_DISABLE_KVM
+          valueFrom:
+            configMapKeyRef:
+              name: virtlet-config
+              key: disable_kvm
+              optional: true
+      - name: virtlet
+        image: openretriever/virtlet
+        # In case we inject local virtlet image we want to use it not officially available one
+        imagePullPolicy: IfNotPresent
+        volumeMounts:
+        - mountPath: /run
+          name: run
+        # /boot and /lib/modules are required by supermin
+        - mountPath: /lib/modules
+          name: modules
+          readOnly: true
+        - mountPath: /boot
+          name: boot
+          readOnly: true
+        - mountPath: /var/lib/virtlet
+          name: virtlet
+        - mountPath: /var/lib/libvirt
+          name: libvirt
+        - mountPath: /etc/cni
+          name: cniconf
+        - mountPath: /opt/cni/bin
+          name: cnibin
+        - mountPath: /var/run/libvirt
+          name: libvirt-sockets
+        - mountPath: /var/lib/cni
+          name: cnidata
+        - mountPath: /usr/libexec/kubernetes/kubelet-plugins/volume/exec
+          name: k8s-flexvolume-plugins-dir
+          # below `:shared` is unofficial way to pass this option docker
+          # which then will allow virtlet to see what kubelet mounts in
+          # underlaying directories, after virtlet container is created
+        - mountPath: /var/lib/kubelet/pods:shared
+          name: k8s-pods-dir
+        - name: vms-log
+          mountPath: /var/log/vms
+        - mountPath: /etc/virtlet/images
+          name: image-name-translations
+        - name: pods-log
+          mountPath: /kubernetes-log
+        securityContext:
+          privileged: true
+        env:
+        - name: VIRTLET_DISABLE_KVM
+          valueFrom:
+            configMapKeyRef:
+              name: virtlet-config
+              key: disable_kvm
+              optional: true
+        - name: VIRTLET_DOWNLOAD_PROTOCOL
+          valueFrom:
+            configMapKeyRef:
+              name: virtlet-config
+              key: download_protocol
+              optional: true
+        - name: VIRTLET_LOGLEVEL
+          valueFrom:
+            configMapKeyRef:
+              name: virtlet-config
+              key: loglevel
+              optional: true
+        - name: VIRTLET_CALICO_SUBNET
+          valueFrom:
+            configMapKeyRef:
+              name: virtlet-config
+              key: calico-subnet
+              optional: true
+        - name: IMAGE_REGEXP_TRANSLATION
+          valueFrom:
+            configMapKeyRef:
+              name: virtlet-config
+              key: image_regexp_translation
+              optional: true
+        - name: IMAGE_TRANSLATIONS_DIR
+          value: /etc/virtlet/images
+        - name: KUBERNETES_POD_LOGS
+          value: "/kubernetes-log"
+        # TODO: should we rename it?
+        - name: VIRTLET_VM_LOG_LOCATION
+          value: "1"
+      - name: vms
+        image: openretriever/virtlet
+        imagePullPolicy: IfNotPresent
+        command:
+        - /vms.sh
+        volumeMounts:
+        - mountPath: /var/lib/virtlet
+          name: virtlet
+        - mountPath: /var/lib/libvirt
+          name: libvirt
+        - name: vms-log
+          mountPath: /var/log/vms
+        - name: dev
+          mountPath: /dev
+      volumes:
+      # /dev is needed for host raw device access
+      - hostPath:
+          path: /dev
+        name: dev
+      - hostPath:
+          path: /sys/fs/cgroup
+        name: cgroup
+      - hostPath:
+          path: /lib/modules
+        name: modules
+      - hostPath:
+          path: /boot
+        name: boot
+      - hostPath:
+          path: /run
+        name: run
+      # TODO: don't hardcode docker socket location here
+      # This will require CRI proxy installation to run
+      # in host mount namespace.
+      - hostPath:
+          path: /var/run/docker.sock
+        name: dockersock
+      - hostPath:
+          path: /var/lib/virtlet
+        name: virtlet
+      - hostPath:
+          path: /var/lib/libvirt
+        name: libvirt
+      - hostPath:
+          path: /etc/cni
+        name: cniconf
+      - hostPath:
+          path: /opt/cni/bin
+        name: cnibin
+      - hostPath:
+          path: /var/lib/cni
+        name: cnidata
+      - hostPath:
+          path: /opt/criproxy/bin
+        name: criproxybin
+      - hostPath:
+          path: /etc/criproxy
+        name: criproxyconf
+      - hostPath:
+          path: /var/log
+        name: log
+      - hostPath:
+          path: /usr/libexec/kubernetes/kubelet-plugins/volume/exec
+        name: k8s-flexvolume-plugins-dir
+      - hostPath:
+          path: /var/lib/kubelet/pods
+        name: k8s-pods-dir
+      - hostPath:
+          path: /var/lib
+        name: var-lib
+      - hostPath:
+          path: /var/log/virtlet/vms
+        name: vms-log
+      - hostPath:
+          path: /var/run/libvirt
+        name: libvirt-sockets
+      - hostPath:
+          path: /var/log/pods
+        name: pods-log
+      - configMap:
+          name: virtlet-image-translations
+        name: image-name-translations
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRoleBinding
+metadata:
+  name: virtlet
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: virtlet
+subjects:
+- kind: ServiceAccount
+  name: virtlet
+  namespace: kube-system
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+  name: virtlet
+  namespace: kube-system
+rules:
+  - apiGroups:
+      - ""
+    resources:
+      - configmaps
+    verbs:
+      - create
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRole
+metadata:
+  name: configmap-reader
+rules:
+- apiGroups:
+  - ""
+  resources:
+  - configmaps
+  verbs:
+  - get
+  - list
+  - watch
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRoleBinding
+metadata:
+  name: kubelet-node-binding
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: configmap-reader
+subjects:
+- apiGroup: rbac.authorization.k8s.io
+  kind: Group
+  name: system:nodes
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+  name: virtlet-crd
+rules:
+  - apiGroups:
+      - "apiextensions.k8s.io"
+    resources:
+      - customresourcedefinitions
+    verbs:
+      - create
+  - apiGroups:
+      - "virtlet.k8s"
+    resources:
+      - virtletimagemappings
+    verbs:
+      - list
+      - get
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRoleBinding
+metadata:
+  name: virtlet-crd
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: virtlet-crd
+subjects:
+- kind: ServiceAccount
+  name: virtlet
+  namespace: kube-system
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: virtlet
+  namespace: kube-system
diff --git a/src/vagrant/kubeadm_virtlet/examples/virtlet.sh b/src/vagrant/kubeadm_virtlet/examples/virtlet.sh
new file mode 100755 (executable)
index 0000000..68d738d
--- /dev/null
@@ -0,0 +1,9 @@
+#!/bin/bash
+
+kubectl label node worker1 extraRuntime=virtlet
+kubectl label node worker2 extraRuntime=virtlet
+kubectl label node worker3 extraRuntime=virtlet
+kubectl create configmap -n kube-system virtlet-config --from-literal=download_protocol=http --from-literal=image_regexp_translation=1 --from-literal=disable_kvm=y
+kubectl create configmap -n kube-system virtlet-image-translations --from-file images.yaml
+kubectl create -f virtlet-ds.yaml
+kubectl create -f cirros-vm.yaml
diff --git a/src/vagrant/kubeadm_virtlet/host_setup.sh b/src/vagrant/kubeadm_virtlet/host_setup.sh
new file mode 100644 (file)
index 0000000..003bf2b
--- /dev/null
@@ -0,0 +1,20 @@
+#!/bin/bash
+
+set -ex
+
+cat << EOF | sudo tee /etc/hosts
+1.8.1.0.1    localhost
+10.96.0.10 master
+10.96.0.21 worker1
+10.96.0.22 worker2
+10.96.0.23 worker3
+EOF
+
+curl -s http://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
+cat <<EOF | sudo tee /etc/apt/sources.list.d/kubernetes.list
+deb http://apt.kubernetes.io/ kubernetes-xenial main
+EOF
+sudo apt-get update
+sudo apt-get install -y docker.io
+sudo apt-get install -y --allow-downgrades kubelet=1.7.0-00 kubeadm=1.7.0-00 kubectl=1.7.0-00 kubernetes-cni=0.5.1-00
+sudo rm -rf /var/lib/kubelet
diff --git a/src/vagrant/kubeadm_virtlet/master_setup.sh b/src/vagrant/kubeadm_virtlet/master_setup.sh
new file mode 100644 (file)
index 0000000..5ae2487
--- /dev/null
@@ -0,0 +1,8 @@
+#!/bin/bash
+
+sudo kubeadm init --apiserver-advertise-address=10.96.0.10  --service-cidr=10.96.0.0/24 --pod-network-cidr=10.244.0.0/16 --token 8c5adc.1cec8dbf339093f0
+sudo cp /etc/kubernetes/admin.conf $HOME/
+sudo chown $(id -u):$(id -g) $HOME/admin.conf
+export KUBECONFIG=$HOME/admin.conf
+echo "export KUBECONFIG=$HOME/admin.conf" >> $HOME/.bash_profile
+kubectl apply -f http://git.io/weave-kube-1.6
diff --git a/src/vagrant/kubeadm_virtlet/virtlet/etc/systemd/system/criproxy.service b/src/vagrant/kubeadm_virtlet/virtlet/etc/systemd/system/criproxy.service
new file mode 100644 (file)
index 0000000..bb2f1de
--- /dev/null
@@ -0,0 +1,11 @@
+[Unit]
+Description=CRI Proxy
+
+[Service]
+ExecStart=/usr/local/bin/criproxy -v 3 -alsologtostderr -connect /var/run/dockershim.sock,virtlet:/run/virtlet.sock -listen /run/criproxy.sock
+Restart=always
+StartLimitInterval=0
+RestartSec=10
+
+[Install]
+WantedBy=kubelet.service
diff --git a/src/vagrant/kubeadm_virtlet/virtlet/etc/systemd/system/dockershim.service b/src/vagrant/kubeadm_virtlet/virtlet/etc/systemd/system/dockershim.service
new file mode 100644 (file)
index 0000000..c629a4b
--- /dev/null
@@ -0,0 +1,11 @@
+[Unit]
+Description=dockershim for criproxy
+
+[Service]
+ExecStart=/usr/local/bin/dockershim ......
+Restart=always
+StartLimitInterval=0
+RestartSec=10
+
+[Install]
+RequiredBy=criproxy.service
diff --git a/src/vagrant/kubeadm_virtlet/virtlet/etc/systemd/system/kubelet.service.d/20-criproxy.conf b/src/vagrant/kubeadm_virtlet/virtlet/etc/systemd/system/kubelet.service.d/20-criproxy.conf
new file mode 100644 (file)
index 0000000..412a48d
--- /dev/null
@@ -0,0 +1,2 @@
+[Service]
+Environment="KUBELET_EXTRA_ARGS=--container-runtime=remote --container-runtime-endpoint=/run/criproxy.sock --image-service-endpoint=/run/criproxy.sock --enable-controller-attach-detach=false"
diff --git a/src/vagrant/kubeadm_virtlet/worker_setup.sh b/src/vagrant/kubeadm_virtlet/worker_setup.sh
new file mode 100644 (file)
index 0000000..bc28dab
--- /dev/null
@@ -0,0 +1,17 @@
+#!/bin/bash
+
+set -ex
+sudo kubeadm join --token 8c5adc.1cec8dbf339093f0 10.96.0.10:6443 || true
+
+sudo docker run --rm openretriever/virtlet tar -c /criproxy | sudo tar -C /usr/local/bin -xv
+sudo ln -s /usr/local/bin/criproxy /usr/local/bin/dockershim
+
+sudo mkdir /etc/criproxy
+sudo touch /etc/criproxy/node.conf
+sudo cp -r /vagrant/virtlet/etc/systemd/system/* /etc/systemd/system/
+sudo systemctl stop kubelet
+sudo systemctl daemon-reload
+sudo systemctl enable criproxy dockershim
+sudo systemctl start criproxy dockershim
+sudo systemctl daemon-reload
+sudo systemctl start kubelet