summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKubernetes Prow Robot <k8s-ci-robot@users.noreply.github.com>2019-07-16 06:43:25 -0700
committerGitHub <noreply@github.com>2019-07-16 06:43:25 -0700
commit452f681d92d98d6d62dfb24fbc9c8da10935632c (patch)
tree1aa15078cdc769555d3a6f8f4081ac0472ec7c06
parentd7ce4319d9eb7ed4f37db627ab17556e2b9f1667 (diff)
parent2f0ce0fe6ba9379f9eb1a3cd1159e67c06ae586d (diff)
Merge pull request #3810 from snowplayfire/kubemark-outside-gce
A guide to set up a kubemark cluster
-rw-r--r--contributors/devel/sig-scalability/hollow-node_simplified_template.yaml93
-rw-r--r--contributors/devel/sig-scalability/kubemark-setup-guide.md75
2 files changed, 168 insertions, 0 deletions
diff --git a/contributors/devel/sig-scalability/hollow-node_simplified_template.yaml b/contributors/devel/sig-scalability/hollow-node_simplified_template.yaml
new file mode 100644
index 00000000..8d33982e
--- /dev/null
+++ b/contributors/devel/sig-scalability/hollow-node_simplified_template.yaml
@@ -0,0 +1,93 @@
+apiVersion: v1
+kind: ReplicationController
+metadata:
+ name: hollow-node
+ namespace: kubemark
+spec:
+ replicas: {{numreplicas}}
+ selector:
+ name: hollow-node
+ template:
+ metadata:
+ labels:
+ name: hollow-node
+ spec:
+ initContainers:
+ - name: init-inotify-limit
+ image: docker.io/busybox:latest
+ command: ['sysctl', '-w', 'fs.inotify.max_user_instances=200']
+ securityContext:
+ privileged: true
+ volumes:
+ - name: kubeconfig-volume
+ secret:
+ secretName: kubeconfig
+ - name: logs-volume
+ hostPath:
+ path: /var/log
+ containers:
+ - name: hollow-kubelet
+ image: {{kubemark_image_registry}}/kubemark:{{kubemark_image_tag}}
+ ports:
+ - containerPort: 4194
+ - containerPort: 10250
+ - containerPort: 10255
+ env:
+ - name: CONTENT_TYPE
+ valueFrom:
+ configMapKeyRef:
+ name: node-configmap
+ key: content.type
+ - name: NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ command:
+ - /bin/sh
+ - -c
+ - /kubemark --morph=kubelet --name=$(NODE_NAME) --kubeconfig=/kubeconfig/kubelet.kubeconfig $(CONTENT_TYPE) --alsologtostderr --v=2
+ volumeMounts:
+ - name: kubeconfig-volume
+ mountPath: /kubeconfig
+ readOnly: true
+ - name: logs-volume
+ mountPath: /var/log
+ resources:
+ requests:
+ cpu: 20m
+ memory: 50M
+ securityContext:
+ privileged: true
+ - name: hollow-proxy
+ image: {{kubemark_image_registry}}/kubemark:{{kubemark_image_tag}}
+ env:
+ - name: CONTENT_TYPE
+ valueFrom:
+ configMapKeyRef:
+ name: node-configmap
+ key: content.type
+ - name: NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ command:
+ - /bin/sh
+ - -c
+ - /kubemark --morph=proxy --name=$(NODE_NAME) --use-real-proxier=false --kubeconfig=/kubeconfig/kubeproxy.kubeconfig $(CONTENT_TYPE) --alsologtostderr --v=2
+ volumeMounts:
+ - name: kubeconfig-volume
+ mountPath: /kubeconfig
+ readOnly: true
+ - name: logs-volume
+ mountPath: /var/log
+ resources:
+ requests:
+ cpu: 20m
+ memory: 50M
+ tolerations:
+ - effect: NoExecute
+ key: node.kubernetes.io/unreachable
+ operator: Exists
+ - effect: NoExecute
+ key: node.kubernetes.io/not-ready
+ operator: Exists
diff --git a/contributors/devel/sig-scalability/kubemark-setup-guide.md b/contributors/devel/sig-scalability/kubemark-setup-guide.md
new file mode 100644
index 00000000..f2f5a81f
--- /dev/null
+++ b/contributors/devel/sig-scalability/kubemark-setup-guide.md
@@ -0,0 +1,75 @@
+## Introduction
+This document serves to understand how to set up kubemark cluster given that a base cluster (to run hollow-node pods) and separate master (to act as master for the hollow nodes) are already present.
+
+## Precondition
+You need kubemark master and external cluster to set up a kubemark cluster.
+
+The functions are as follows:
+
+- kubemark master: can be StandAlone or HA, used to be the kubemark cluster's master
+- external cluster: used to create hollow nodes for the kubemark cluster
+
+## Steps:
+1. Build kubemark image
+
+If you want to build/use your own kubemark image, do as follows. Otherwise skip this section and just use the latest image `staging-k8s.gcr.io/kubemark:latest` from public repo.
+
+- i. pull kubernetes code
+
+```
+cd $GOPATH/src/k8s.io/
+git clone git@github.com:kubernetes/kubernetes.git
+```
+
+- ii. build kubemark binary
+
+```
+./hack/build-go.sh cmd/kubemark/
+cp $GOPATH/src/k8s.io/kubernetes/_output/bin/kubemark $GOPATH/src/k8s.io/kubernetes/cluster/images/kubemark/
+```
+
+- iii. build kubemark image
+
+```
+cd $GOPATH/src/k8s.io/kubernetes/cluster/images/kubemark/
+make build
+```
+
+Then you can get the image named `staging-k8s.gcr.io/kubemark:latest` locally.
+
+- iv. push kubemark image
+
+```
+docker tag staging-k8s.gcr.io/kubemark:latest {{kubemark_image_registry}}/kubemark:{{kubemark_image_tag}}
+docker push {{kubemark_image_registry}}/kubemark:{{kubemark_image_tag}}
+```
+
+2. Create hollow nodes
+
+- i. create namespace, configmap and secret
+
+Copy kubemark master's kubeconfig which is used to configure access, put it on a master of external cluster, rename it as config.
+
+```
+kubectl create ns kubemark
+kubectl create configmap node-configmap -n kubemark --from-literal=content.type="test-cluster"
+kubectl create secret generic kubeconfig --type=Opaque --namespace=kubemark --from-file=kubelet.kubeconfig=config --from-file=kubeproxy.kubeconfig=config
+```
+
+- ii. apply yaml to create hollow nodes
+
+You can use `hollow-node_simplified_template.yaml` in the current directory, or use `hollow-node_template.yaml` in `kubernetes/test/kubemark/resources/`.
+
+Note:
+
+- the parameters `{{numreplicas}}` means the number of hollow nodes in the kubemark cluster
+- the parameters `{{numreplicas}}`, `{{kubemark_image_registry}}` and `{{kubemark_image_tag}}` need to be filled in the simplified template
+- your external cluster should have enough resources to be able to run `{{numreplicas}}` no. of hollow-node pods
+
+```
+kubectl create -f hollow-node_simplified_template.yaml
+```
+
+Waiting for these hollow-node pods to be running. Then you can see these pods register as kubemark master's nodes.
+
+Finally, kubemark master and external cluster set up the kubemark cluster.