summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorjdumars <jdumars@gmail.com>2018-08-11 12:36:56 -0700
committerjdumars <jdumars@gmail.com>2018-08-11 12:36:56 -0700
commitcec8997e80854fc9359331826fcb0a967ffd1f0a (patch)
treeb0a581c201d324fce3424e27b2534bc1c707d14f
parent1f259dce2c74c4d8e98cbb5b732018730c033ddd (diff)
parentb33dca0b5bffcc9513cb5f4cb00a05b1812c4b94 (diff)
Merge branch 'master' of https://github.com/kubernetes/community into apireviews
-rw-r--r--OWNERS_ALIASES8
-rw-r--r--committee-code-of-conduct/OWNERS6
-rw-r--r--committee-code-of-conduct/README.md14
-rw-r--r--committee-code-of-conduct/bootstrapping-process.md6
-rw-r--r--committee-steering/governance/sig-charter-template.md6
-rw-r--r--communication/K8sYoutubeCollaboration.md2
-rw-r--r--communication/meeting-notes-archive/q1-2_2018_community_meeting_minutes.md2329
-rw-r--r--communication/moderation.md2
-rw-r--r--communication/moderators.md62
-rw-r--r--communication/slack-guidelines.md8
-rw-r--r--communication/zoom-guidelines.md3
-rw-r--r--contributors/design-proposals/api-machinery/customresource-conversion-webhook.md859
-rw-r--r--contributors/design-proposals/storage/attacher-detacher-refactor-for-local-storage.md281
-rw-r--r--contributors/design-proposals/storage/volume-topology-scheduling.md88
-rw-r--r--contributors/devel/conformance-tests.md1
-rw-r--r--contributors/devel/cri-testing-policy.md12
-rw-r--r--contributors/guide/README.md132
-rw-r--r--contributors/guide/owners.md2
-rw-r--r--contributors/guide/pull-requests.md6
-rw-r--r--events/community-meeting.md22
-rw-r--r--generator/sig_readme.tmpl6
-rw-r--r--github-management/OWNERS6
-rw-r--r--github-management/README.md25
-rw-r--r--github-management/kubernetes-repositories.md84
-rw-r--r--github-management/opening-a-request.md33
-rw-r--r--github-management/org-owners-guide.md24
-rw-r--r--github-management/permissions.md4
-rw-r--r--keps/NEXT_KEP_NUMBER2
-rw-r--r--keps/sig-auth/0014-dynamic-audit-configuration.md109
-rw-r--r--keps/sig-azure/0018-20180711-azure-availability-zones.md (renamed from sig-azure/0018-20180711-azure-availability-zones.md)0
-rw-r--r--keps/sig-cloud-provider/0018-testgrid-conformance-e2e.md (renamed from keps/sig-cloud-provider/0003-testgrid-conformance-e2e.md)2
-rw-r--r--keps/sig-cloud-provider/0019-cloud-provider-documentation.md145
-rw-r--r--keps/sig-cloud-provider/providers/0004-cloud-provider-template.md15
-rw-r--r--keps/sig-cloud-provider/providers/0020-cloud-provider-alibaba-cloud.md120
-rw-r--r--keps/sig-cloud-provider/providers/0021-cloud-provider-digitalocean.md (renamed from keps/sig-cloud-provider/providers/0017-cloud-provider-digitalocean.md)18
-rw-r--r--keps/sig-cloud-provider/providers/0022-cloud-provider-baiducloud.md (renamed from keps/sig-cloud-provider/providers/cloud-provider-baiducloud.md)35
-rw-r--r--keps/sig-cluster-lifecycle/0008-kubeadm-config-versioning.md (renamed from keps/sig-cluster-lifecycle/0008-20180504-kubeadm-config-beta.md)4
-rw-r--r--keps/sig-cluster-lifecycle/0023-kubeadm-config-v1beta1.md244
-rw-r--r--keps/sig-cluster-lifecycle/0023-kubeadm-init.pngbin0 -> 234721 bytes
-rw-r--r--keps/sig-cluster-lifecycle/0023-kubeadm-join.pngbin0 -> 215009 bytes
-rw-r--r--keps/sig-cluster-lifecycle/0023-kubeadm-reset.pngbin0 -> 118679 bytes
-rw-r--r--keps/sig-cluster-lifecycle/0023-kubeadm-upgrade-apply.pngbin0 -> 328364 bytes
-rw-r--r--keps/sig-cluster-lifecycle/0023-kubeadm-upgrade-node.pngbin0 -> 78744 bytes
-rw-r--r--keps/sig-contributor-experience/0007-20180403-community-forum.md2
-rw-r--r--keps/sig-node/0014-runtime-class.md397
-rw-r--r--sig-api-machinery/README.md18
-rw-r--r--sig-apps/README.md18
-rw-r--r--sig-architecture/README.md30
-rw-r--r--sig-auth/README.md20
-rw-r--r--sig-auth/charter.md69
-rw-r--r--sig-autoscaling/README.md18
-rw-r--r--sig-aws/README.md6
-rw-r--r--sig-azure/README.md13
-rw-r--r--sig-big-data/README.md18
-rw-r--r--sig-cli/README.md20
-rw-r--r--sig-cloud-provider/README.md20
-rw-r--r--sig-cluster-lifecycle/README.md28
-rw-r--r--sig-contributor-experience/README.md20
-rw-r--r--sig-docs/README.md19
-rw-r--r--sig-gcp/README.md24
-rw-r--r--sig-ibmcloud/OWNERS6
-rw-r--r--sig-ibmcloud/README.md6
-rw-r--r--sig-instrumentation/README.md18
-rw-r--r--sig-list.md6
-rw-r--r--sig-multicluster/README.md20
-rw-r--r--sig-network/README.md18
-rw-r--r--sig-node/README.md16
-rw-r--r--sig-openstack/README.md18
-rw-r--r--sig-release/README.md22
-rw-r--r--sig-scalability/README.md18
-rw-r--r--sig-scheduling/README.md18
-rw-r--r--sig-service-catalog/README.md22
-rw-r--r--sig-service-catalog/charter.md170
-rw-r--r--sig-storage/README.md18
-rw-r--r--sig-testing/README.md20
-rw-r--r--sig-vmware/README.md29
-rw-r--r--sig-windows/README.md10
-rw-r--r--sigs.yaml72
78 files changed, 5538 insertions, 434 deletions
diff --git a/OWNERS_ALIASES b/OWNERS_ALIASES
index a0de52e6..11db0730 100644
--- a/OWNERS_ALIASES
+++ b/OWNERS_ALIASES
@@ -24,7 +24,6 @@ aliases:
- justaugustus
- shubheksha
- khenidak
- - colemickens
sig-big-data-leads:
- foxish
- erikerlandson
@@ -38,7 +37,6 @@ aliases:
- hogepodge
- jagosan
sig-cluster-lifecycle-leads:
- - lukemarsden
- roberthbailey
- luxas
- timothysc
@@ -161,4 +159,10 @@ aliases:
- spiffxp
- thockin
- timothysc
+ code-of-conduct-committee:
+ - jdumars
+ - parispittman
+ - eparis
+ - carolynvs
+ - bradamant3
## END CUSTOM CONTENT
diff --git a/committee-code-of-conduct/OWNERS b/committee-code-of-conduct/OWNERS
new file mode 100644
index 00000000..ac10e286
--- /dev/null
+++ b/committee-code-of-conduct/OWNERS
@@ -0,0 +1,6 @@
+reviewers:
+ - code-of-conduct-committee
+approvers:
+ - code-of-conduct-committee
+labels:
+ - committee/conduct
diff --git a/committee-code-of-conduct/README.md b/committee-code-of-conduct/README.md
new file mode 100644
index 00000000..6d602762
--- /dev/null
+++ b/committee-code-of-conduct/README.md
@@ -0,0 +1,14 @@
+# Kubernetes Code of Conduct Committee (CoCC)
+
+The Kubernetes Code of Conduct Committee (CoCC) is the body that is responsible for enforcing and maintaining the Kubernetes Code of Conduct.
+
+The members and their terms are as follows:
+- Jaice Singer Dumars (Google) - 2 years
+- Paris Pittman (Google) - 2 years
+- Carolyn Van Slyck (Microsoft) - 1 year
+- Eric Paris (Red Hat) - 1 year
+- Jennifer Rondeau (Heptio) - 1 year
+
+Please see the [bootstrapping document](./bootstrapping-process.md) for more information on how members are picked, their responsibilities, and how the committee will initially function.
+
+_More information on how to contact this committee and learn about its process to come in the near future. For now, any Code of Conduct or Code of Conduct Committee concerns can be directed to steering-private@kubernetes.io_
diff --git a/committee-code-of-conduct/bootstrapping-process.md b/committee-code-of-conduct/bootstrapping-process.md
index 15083ffa..a98ef9fb 100644
--- a/committee-code-of-conduct/bootstrapping-process.md
+++ b/committee-code-of-conduct/bootstrapping-process.md
@@ -5,7 +5,7 @@ This document (created by the Kubernetes Steering Committee) outlines what the C
## Objectives of the CoCC
* Maintain the Code of Conduct (CoC) document and iterate as needed.
* All CoC revisions must be approved by the Steering Committee.
- * Currently, we use the CNCF CoC. Any adendums or changes based off learnings in the Kubernetes community should be owned by this body of people, the Kubernetes Code of Conduct Committee (CoCC).
+ * Currently, we use the CNCF CoC. Any addendums or changes based off learnings in the Kubernetes community should be owned by this body of people, the Kubernetes Code of Conduct Committee (CoCC).
* Determine and make transparent how CoC issues and incidents are reported and handled in the community.
* Enforce the Code of Conduct within the Kubernetes community.
* Discuss with parties affected
@@ -16,13 +16,13 @@ This document (created by the Kubernetes Steering Committee) outlines what the C
## Formation of the Code of Conduct Committee (CoCC):
* The CoCC consists of 5 members. In the first election, the top 3 voted people will be appointed 2 year terms and the other 2 members will be appointed for a 1 year term.
* CoCC members appointed for a 1 year term may be elected again the following year.
- * The Steering Committee and SIG Chairs are eligble to nominate people for the Code of Conduct Committee (CoCC). _This may change during the next election._
+ * The Steering Committee and SIG Chairs are eligible to nominate people for the Code of Conduct Committee (CoCC). _This may change during the next election._
* The Steering Committee votes on nominees for the CoCC
* Characteristics and Guidance for nominating people for the CoCC:
* Do not have to be part of the Kubernetes or CNCF community
* Previous experience on an Ethics Committee or Code of Conduct Committee is appreciated
* Has demonstrated integrity, professionalism, and positive influence within the community
- * Experience with the tools which we use to communicate (Zoom, Slack, GitHub, etc.) within the Kubernbetes community is appreciated
+ * Experience with the tools which we use to communicate (Zoom, Slack, GitHub, etc.) within the Kubernetes community is appreciated
* Is generally a responsible human
* The members of the Code of Conduct Committee (CoCC) will be public so the people who report CoC issues know exactly who they are working with.
diff --git a/committee-steering/governance/sig-charter-template.md b/committee-steering/governance/sig-charter-template.md
index 77dc777e..7fda2567 100644
--- a/committee-steering/governance/sig-charter-template.md
+++ b/committee-steering/governance/sig-charter-template.md
@@ -11,8 +11,6 @@ necessarily all of the internals.
### In scope
-Link to SIG section in [sigs.yaml]
-
#### Code, Binaries and Services
- list of what qualifies a piece of code, binary or service
@@ -20,7 +18,7 @@ Link to SIG section in [sigs.yaml]
- e.g. *clis for working with Kubernetes APIs*,
- *CI for kubernetes repos*, etc
- **This is NOT** a list of specific code locations,
-- or projects those go in [sigs.yaml]
+- or projects those go in [SIG Subprojects][sig-subprojects]
#### Cross-cutting and Externally Facing Processes
@@ -62,5 +60,5 @@ Pick one:
2. Federation of Subprojects
[sig-governance]: https://github.com/kubernetes/community/blob/master/committee-steering/governance/sig-governance.md
-[sigs.yaml]: https://github.com/kubernetes/community/blob/master/sigs.yaml#L1454
+[sig-subprojects]: https://github.com/kubernetes/community/blob/master/sig-YOURSIG/README.md#subprojects
[Kubernetes Charter README]: https://github.com/kubernetes/community/blob/master/committee-steering/governance/README.md
diff --git a/communication/K8sYoutubeCollaboration.md b/communication/K8sYoutubeCollaboration.md
index 82bda910..eec69b96 100644
--- a/communication/K8sYoutubeCollaboration.md
+++ b/communication/K8sYoutubeCollaboration.md
@@ -40,4 +40,4 @@ Collaboration should simplify things for everyone, but with privilege comes resp
Your community managers are happy to help with any questions that you may have and will do their best to help if anything goes wrong. Please get in touch via [SIG Contributor Experience](https://git.kubernetes.io/community/sig-contributor-experience).
-
+- Check the [centralized list of administrators](./moderators.md) for contact information.
diff --git a/communication/meeting-notes-archive/q1-2_2018_community_meeting_minutes.md b/communication/meeting-notes-archive/q1-2_2018_community_meeting_minutes.md
new file mode 100644
index 00000000..f1a2dad4
--- /dev/null
+++ b/communication/meeting-notes-archive/q1-2_2018_community_meeting_minutes.md
@@ -0,0 +1,2329 @@
+**This is an archive document**
+
+
+## July 26, 2018 ([recording link](https://youtu.be/XPt3ZwZe-VQ))
+
+
+
+* **Moderators**: Chris Short [ContribEx]
+* **Note Taker**: Solly Ross, Josh Berkus
+* **Demo:** EKS - Bryce Carman - [Amazon EKS] (confirmed)
+ * Managed Kubernetes on [https://aws.amazon.com/eks/](https://aws.amazon.com/eks/)
+ * Provisioning
+ * Control plane is hosted/managed by EKS, worker nodes are under control of users
+ * No outside communication with the control plane besides via the load balancer in front of the API server
+ * Can use security groups to limit control-plane-worker-node interaction
+ * Can set role used to create various AWS resources (like loadbalancers) so that you don't have to give EKS full permissions in your account
+ * Can just use VPC and subnets already present in account
+ * Networking
+ * CNI plugin
+ * Usines IP addresses from VPC that the nodes are already part of (integrated with AWS networking)
+ * No overlay network
+ * Can integrate with Calico network policy as well
+ * Designed to isolate control planes from nodes as well
+ * Interaction
+ * Using Heptio authenticator and 1.10 for external authentication for kubectl in order to authenticate against AWS IAM
+ * Just uses the same creds as the AWS CLI -- no separate auth to manage
+ * Demo'd using Helm to create a wordpress site
+ * Questions
+ * Can users scale control plane?
+ * No
+* **Release Updates**
+ * 1.12 - Tim Pepper - Confirmed
+ * **Feature Freeze Tuesday July 31** - next week
+ * [see email on k-dev for more info](https://groups.google.com/d/topic/kubernetes-dev/T-kIHtgS5J4/discussion)
+ * After Tuesday features not captured by the release team must go through the **[exception process](https://github.com/kubernetes/features/blob/master/EXCEPTIONS.md)**.
+ * SIGs should be thinking about their release themes (major work focuses) for the 1.12 release, insuring those are represented in feature issues and have plans for documentation and test coverage.
+ * Not code freeze (that comes later)
+ * 1.11.x - Anirudh Ramamathan - Confirmed
+ * Nothing to report
+* **KEP o' the Week **- KEP 17 - Jordan Liggitt - Confirmed
+ * [KEP 17 - Moving ComponentConfig API types to staging repos](https://github.com/kubernetes/community/blob/master/keps/sig-cluster-lifecycle/0014-20180707-componentconfig-api-types-to-staging.md)
+ * Taking config for core kube components from loose flags to structured config
+ * Kubelet currently has a config file format that's in beta
+ * Makes it easier to look at exactly how a particular component is configured, warn about deprecated config, missing config, etc
+ * Want to put configuration types in separate repo
+ * Tools like kubeadm should be able to import config to manipulate and generate, without pulling in all of Kubernetes
+ * Want to make sure common configuration aspects can be shared, referenced, and reused
+ * client connection info
+ * Leader election
+ * etc
+ * Look over if you are involved in developing the Kube components, or have tooling that sets up the various components
+* **SIG Updates**
+ * Auth - Jordan Liggitt - Confirmed
+ * [https://docs.google.com/presentation/d/1MAIypro-bcLC7wNEnIazYqmCL6ILBN69uUWIBw7QBIY/edit?usp=sharing](https://docs.google.com/presentation/d/1MAIypro-bcLC7wNEnIazYqmCL6ILBN69uUWIBw7QBIY/edit?usp=sharing)
+ * Usability
+ * Multiple Authorizers (e.g. GKE)
+ * Now honor superuser permissions from other authorizers, so if you're a superuser, you can create policy without first explicitly granting yourself those permissions
+ * Now show the error message from all authorizers, instead of just the error from the first authorizer
+ * Show a much cleaner, more succinct and readable error message for failures due to escalations
+ * Features
+ * Kubelet Certs
+ * Better support for delegating to an external credentials providers (e.g. AWS IAM)
+ * Requesting and rotating certs with the CSR API (still requires external approval process for the CSRs)
+ * Scoped service account tokens
+ * Moving towards beta for time-limited and audience-scoped tokens
+ * Audit improvements
+ * Heading towards v1 audit event API
+ * Work ongoing on dynamic audit webhook reg
+ * Instrumentation - Frederic Brancyzk - Confirmed
+ * Heapster deprecation ([https://github.com/kubernetes/heapster/blob/master/docs/deprecation.md](https://github.com/kubernetes/heapster/blob/master/docs/deprecation.md))
+ * Setup removal in 1.12, completely removed as of 1.13
+ * Node metrics work still ongoing, in collaboration with SIG Node
+ * Improve monitoring story around node monitoring
+ * Chime in if you maintain a device plugin or node component
+ * Metrics-server rework ([https://github.com/kubernetes-incubator/metrics-server/pull/65](https://github.com/kubernetes-incubator/metrics-server/pull/65))
+ * call for testing in non-production servers, should make things more stable, has several fixes to communication with nodes
+ * k8s-prometheus-adapter advance configuration merged
+ * Allows more precisely controlling how metrics in the custom metrics API map to Prometheus queries, and how metrics show up in the custom metrics API
+ * A number of third party service involving e2e tests have been put behind a feature flag in the test infrastructure
+ * should improve flaking tests from sig-instrumentation, especially around components that we can't control
+* **Announcements**
+ * **Shoutouts **(mention people on #shoutouts on Slack)
+ * Manjunath Kumatagi for patiently working through issues that will help us run conformance tests on other architectures (say `arm64`). It's taken a really long time to get this far and the end is in sight. Thanks for your hard work across multiple repos and sigs.
+ * Jordan Liggitt for always knowing the answer to ... everything ... and being so available to answer questions. You're an incredible resource and I'm always grateful to lean on you when I need to!
+ * Quinton Hoole for including a "how you can contribute" slide in the SIG Multicluster update in today's community! Way to model SIG leadership in growing the k8s team by facilitating new/increased participation!
+
+
+## July 19, 2018 ([recording link](https://youtu.be/XNLDZYMphuU))
+
+
+
+* **Moderators**: Tim Pepper [ContribEx, Release, VMware]
+* **Note Taker**: Solly Ross
+* **Demo:** Microk8s - [Marco Ceppi](mailto:marco@ceppi.net) (confirmed)
+ * [https://microk8s.io](https://microk8s.io) / #microk8s on Slack / [https://github.com/juju-solutions/microk8s](https://github.com/juju-solutions/microk8s)
+ * Lightweight kubernetes cluster install
+ * Installed, uninstalled with snaps
+ * works across different linux distros, other OSes coming eventually)
+ * Still a bit in beta
+ * Different releases installed with different channels (beta channel is 1.11.0, edge is 1.11.1)
+ * Commands installed namespaced by `microk8s.`
+ * kubectl is `microk8s.kubectl`
+ * Can enable different addons like dns and dashboard with `microk8s.enable`
+ * Cert generation, ingress, storage also available
+ * kubeconfig is scoped just to microk8s.kubectl, doesn't interfere with normal kubectl
+ * `microk8s.reset` resets to blank state
+ * Kubernetes run as systemd services
+ * Service Cluster IP addresses available as normal on host system
+* [ 0:09 ]** Release Updates**
+ * 1.12 [Tim Pepper]
+ * Features collection underway
+ * [https://github.com/kubernetes/kubernetes/issues?q=is%3Aopen+is%3Aissue+milestone%3Av1.12](https://github.com/kubernetes/kubernetes/issues?q=is%3Aopen+is%3Aissue+milestone%3Av1.12)
+ * [https://github.com/kubernetes/features/issues?q=is%3Aopen+is%3Aissue+milestone%3Av1.12](https://github.com/kubernetes/features/issues?q=is%3Aopen+is%3Aissue+milestone%3Av1.12)
+ * **Feature freeze is July 31**
+ * Make sure your SIG features are up to date!** **
+ * 1.11.1 [Anirudh Ramanathan]
+ * After fixing initial release issues (pushing images, cherry picks), release is now out!
+ * [https://groups.google.com/forum/#!topic/kubernetes-announce/tMTjihgETUo](https://groups.google.com/forum/#!topic/kubernetes-announce/tMTjihgETUo)
+ * Issues encountered
+ * [https://github.com/kubernetes/release/issues/586](https://github.com/kubernetes/release/issues/586)
+ * Push permissions for cloud builder to gcr.io/k8s-image-staging (new staging bucket)
+ * [https://github.com/kubernetes/release/issues/587](https://github.com/kubernetes/release/issues/587)
+ * Autopush to release gcr bucket - Ben Elder
+ * Debs/RPMs
+ * Still needs a googler
+ * First non-Google release folks, updating docs to work around some last wrinkles
+* [ 0:13 ] **KEP o' the Week **- none this week
+ * If you want to get a broader audience for an up-and-coming KEP, you can get it discussed here!
+* [ 0:14 ] **SIG Updates**
+ * SIG Big Data (Anirudh Ramanathan, Yinan Li, confirmed)
+ * Deal with big data workloads on Kube
+ * Specifically: Spark, Spark Operator, Apache Airflow, HDFS
+ * Code freeze for Spark coming up, so lots of work there
+ * python support, client node support for things like Jupyter notebooks talking to Spark on Kubernetes)
+ * Stability fixes - better controller logic
+ * Making sure to be level triggered and not edge triggers
+ * Removing some hacks with init containers
+ * Spark ([link](https://issues.apache.org/jira/issues/?jql=project+%3D+SPARK+AND+component+%3D+Kubernetes))
+ * Working towards 2.4 release.
+ * 2.4 code freeze and branch cut on 8/1
+ * Major features
+ * PySpark support
+ * Client mode - support for notebooks
+ * Lots of testing, merged integration tests
+ * Removal of things like init-containers (getting us closer to GA)
+ * Stability fixes - controller logic
+ * Improvements on client side
+ * Future work
+ * Customize pod templates
+ * Dynamic allocation/elasticity
+ * HA driver - might need help from sig-apps to make it work
+ * SparkR and Kerberized HDFS support
+ * Spark Operator new features ([link](https://github.com/GoogleCloudPlatform/spark-on-k8s-operator))
+ * Mutating admission webhook to replace initializer used before
+ * Python support
+ * HDFS support ([link](https://github.com/apache-spark-on-k8s/kubernetes-HDFS))
+ * Assessing demand, making progress
+ * Chart exists in link above
+ * Airflow ([link](https://cwiki.apache.org/confluence/pages/viewpage.action?pageId=71013666))
+ * Blog post went live - 28 June 2018
+ * [https://kubernetes.io/blog/2018/06/28/airflow-on-kubernetes-part-1-a-different-kind-of-operator/](https://kubernetes.io/blog/2018/06/28/airflow-on-kubernetes-part-1-a-different-kind-of-operator/)
+ * SIG Multicluster (Quinton Hoole, confirmed)
+ * [Slides](https://docs.google.com/presentation/d/1vcMLWEMRvg1rSrB1Ha-koxRZ9h1MUESW9q-c_cDP5n0/edit#slide=id.gc6f73a04f_0_0)
+ * Goals
+ * Solving common challenges releated to managing multiple clusters
+ * Applications that run across multiple clusters
+ * Subprojects
+ * Cluster Federation (v2) [[https://github.com/kubernetes-sigs/federation-v2](https://github.com/kubernetes-sigs/federation-v2) ]
+ * Work across different clusters, same or different cloud provider
+ * V1 was a POC, won't be developed further
+ * V2 focuses on decoupled, reusable components
+ * V2 has feature parity with v1, is alpha
+ * Highlights
+ * CRDs for control planes, installed in existing cluster
+ * Generic impl for all kube types (including CRDs) for propagating any types into all clusters, with basic per-cluster customization
+ * Several higher-level controllers, for example:
+ * migration of RS and deployments between clusters
+ * Managing federated DNS
+ * Management of Jobs
+ * Management of HPA to manage global limits
+ * Uses cluster registry
+ * Next steps
+ * Federated status
+ * Federated read access (e.g. view all pods across all clusters)
+ * affinity/anti-affinity for bunches objects or namespaces to a particular cluster
+ * RBAC enforcement
+ * Please comment if you have suggestions for API, before moves to beta
+ * Contributions to code also welcome
+ * Cluster Registry [[https://github.com/kubernetes/cluster-registry](https://github.com/kubernetes/cluster-registry) ]
+ * Fairly stable and complete
+ * Multicluster Ingress [[https://github.com/GoogleCloudPlatform/k8s-multicluster-ingress](https://github.com/GoogleCloudPlatform/k8s-multicluster-ingress) ]
+ * Look at repo for more information
+ * Questions
+ * Cluster registry vs cluster API?
+ * Cluster API is to create clusters, cluster registry is for using already-existing clusters
+ * Maybe could disambiguate the terms better, manage overlap
+ * SIG Scheduling (Bobby Salamat, confirmed)
+ * 1.11 Update
+ * Pod Priority and Preemption to beta, available by default
+ * Improved the feature, restricted a bit to avoid allowing untrusted users to create high-prio pods, only allow super-high-priority pods in kube-system namespace
+ * DaemonSet scheduling in default scheduler (alpha)
+ * 1.12 Update
+ * Focus on performance
+ * Improved equivalence cache (pod with similar spec probably fits on same node unless the node has changed)
+ * 3x performance improvement now
+ * Helps with scheduling large replica sets, etc
+ * Working on proposal for gang scheduling [link here]
+ * Proposal for scheduling framework, direction might change a bit [link here]
+ * Moving to beta
+ * Taint by condition, taint-based eviction
+ * Equivalence cache
+ * DaemonSet scheduling in default scheduler
+ * Want to graduate descheduler out of incubator
+* **Announcements**
+ * **Shoutouts **(mention people on #shoutouts on Slack)
+ * Jeremy Rickard: Shout out to @mbauer for really pushing to make service catalog use prow and to improve out PR reviewing and testing process
+ * Christoph Blecker: Two shoutouts I wanted to get out this week:
+ * First, shoutout to @matthyx who has been very active in k/test-infra recently and has been making a number of different contributions from fixing bugs, to adding new features to our automation. He's been eager to help and has stuck with some of the more complex changes that require many comments and interactions (sig-bikeshed ftw :bikeshed:)
+ * Second, shoutout to @nikhita! I could easily stop right there, as her many contributions to the project really speak for themselves. I want to call out though the little chopping wood and carrying water tasks she does that may not be as obvious.. like ensuring that stale issues are reviewed and either closed or marked as still relevant, or welcoming new contributors with an emoji or two. It's these kinds of things that exemplify what the Kubernetes community is all about.
+ * Benjamin Elder: shoutout to @Quang Huynh for continuing to send k/test-infra fixes and push through to flesh out the PR status page (especially https://github.com/kubernetes/test-infra/pull/8612) long after his internship! :simple_smile: Hopefully we can hopefully start using the Prow PR status page more widely now thanks to all the hard work there :tada:
+ * Aaron Crickenberger: shoutout to @bentheelder for helping push kubernetes v1.11.1 images out (fixing the symptom), and getting the appropriate folks within google involved to ensure there is now a team owning a better solution to the problem (fixing the problem); this is continued progress toward decoupling google.com as a requirement for releases
+ * [Kubernetes wins most impact award at OSCON](https://twitter.com/oscon/status/1019992011858894849)!!! (-paris; Tim to read)
+
+
+## July 12, 2018 ([Recording](https://youtu.be/OBubmJhr8lE))
+
+
+
+* **Moderators**: Paris Pittman [ContribEx, Google]
+* **Note Taker**: Josh Berkus [Release]
+* **Demo:** No demo today - see you next week!
+* [ 0:01 ]** Release Updates**
+ * 1.12: [Tim Pepper]
+ * Release cycle is underway!
+ * Team: [https://git.k8s.io/sig-release/releases/release-1.12/release_team.md](https://na01.safelinks.protection.outlook.com/?url=https%3A%2F%2Fgit.k8s.io%2Fsig-release%2Freleases%2Frelease-1.12%2Frelease_team.md&data=02%7C01%7Ctpepper%40vmware.com%7C89885a0f477d469ddcb308d5e6ba8e88%7Cb39138ca3cee4b4aa4d6cd83d9dd62f0%7C1%7C0%7C636668611291496196&sdata=S%2FDlfHfkgz1G9NkGVMtjDIa%2FY%2BkzpAkKwpOU0HgaGfc%3D&reserved=0)
+ * Schedule: [https://git.k8s.io/sig-release/releases/release-1.12/release-1.12.md](https://na01.safelinks.protection.outlook.com/?url=https%3A%2F%2Fgit.k8s.io%2Fsig-release%2Freleases%2Frelease-1.12%2Frelease-1.12.md&data=02%7C01%7Ctpepper%40vmware.com%7C89885a0f477d469ddcb308d5e6ba8e88%7Cb39138ca3cee4b4aa4d6cd83d9dd62f0%7C1%7C0%7C636668611291506201&sdata=ZzUn8hRsKf3E1poF2i5%2BVgmpm0UYnM3rZh1iiy%2Br1QM%3D&reserved=0)
+ * Features collection is happening now, see:
+ * [https://github.com/kubernetes/features/issues?q=is%3Aopen+is%3Aissue+milestone%3Av1.12](https://github.com/kubernetes/features/issues?q=is%3Aopen+is%3Aissue+milestone%3Av1.12)
+ * [https://groups.google.com/forum/#!topic/kubernetes-dev/T-kIHtgS5J4/discussion](https://groups.google.com/forum/#!topic/kubernetes-dev/T-kIHtgS5J4/discussion)
+ * Key dates:
+ * Feature freeze: July 31, 2018
+ * Begin code slush: Aug. 28, 2018
+ * Begin code freeze: Sept. 4, 2018
+ * End code freeze: Sept. 19, 2018
+ * Release date: Sept. 25, 2018
+ * Process changes: Nothing notable versus 1.11, will continue shortened code slush/freeze, BUT this depends on us all keeping a clean [CI Signal](http://testgrid.k8s.io/) throughout the release cycle. Additionally asking for increased focus on earlier:
+ * Definition of blocking test cases and test case additions
+ * Drafting documentation for feature changes
+ * 1.11.1: [Anirudh Ramanathan]
+ * Announcement: [https://groups.google.com/forum/#!topic/kubernetes-dev-announce/vdodsSq21qc](https://groups.google.com/forum/#!topic/kubernetes-dev-announce/vdodsSq21qc)
+ * Will cut Monday July 16th.
+ * Freezing branch today EOD
+ * Some cherrypicks still need some action: please check yours.
+ * [https://docs.google.com/document/d/1kFHQsk1iM9rh0iEaLhaNnAtvtU00ZSlZgb1etmdmcTQ/edit](https://docs.google.com/document/d/1kFHQsk1iM9rh0iEaLhaNnAtvtU00ZSlZgb1etmdmcTQ/edit)
+ * If your PR is marked in pink/orange, it might need action on your part.
+ * Kops test was blocking this morning.
+* [ 0:07 ] **KEP o' the Week** (Janet Kuo)
+ * [https://github.com/kubernetes/community/pull/2287](https://github.com/kubernetes/community/pull/2287)
+ * For cleanup of frequently created & dropped objects
+ * We don't have a good way to garbage collect items which no longer have an owner.
+ * Often people update-and-replace instead of modifying
+ * Proposal for new GC for these objects.
+ * Will be discussed in next API-machinery meeting next week (Wednesday) if you care about the KEP
+ * Will give detailed presentation there.
+* [ 0:00 ] **SIG Updates**
+ * SIG API Machinery [David Eads](confirmed)
+ * Link to [slides](https://docs.google.com/presentation/d/171PN2zg5iMXZ18LwYTEUe0Jg_-IxFDSIsbHW09F9mZk/edit#slide=id.g3d7994f0d0_0_0)
+ * Delivered in 1.11:
+ * Improved dynamic client, easier to use for CRD developers. Everyone should switch to this because the old client will eventually go away.
+ * "Null CRD conversion": you can promote a CRD from one version to another, even though there's no API changes. No data transformation, no changes to schema. So very limited for now.
+ * Work on feature-branch for Server-side apply.
+ * Prep work for making controller-manager start from a config
+ * 1.12 work
+ * Server-side apply dry run being merged into Master
+ * Path to more advanced CRD conversion, field defaults, advanced versioning (design phase).
+ * Controller-manager moving to running from config
+ * Generic initializers as alpha. May be superseded by admission webhooks. If you need something in Generic Init that isn't satisfied by webhooks, speak up in their meeting to save it.
+ * SIG Testing [Steve Kuznetsov] (confirmed)
+ * Link to [slides](https://docs.google.com/presentation/d/10v9MoXOjEJQ9opoIffNm6XwEIaUtYpNcCRvBuGaS-zs/edit?usp=sharing)
+ * Implemented caches for test runs, which is a big performance boost
+ * Reduced GH API hits by 1500/hr
+ * Bazel build cache lowered test times
+ * UX improvements for the k8s bot
+ * Now can LTGM and approve in review comment
+ * Robots now validate OWNERS files
+ * Easier administration
+ * Using Peribolos for GH API management
+ * Automated branch protection now on all repos
+ * Only bots can merge to branches
+ * Simpler test Job management: now just needs a container with an entrypoint
+ * Merge workflows using Tide are implemented
+ * Plan to rollout for 1.12
+ * Will include PR status page, yay! Makes it easier to see why your PR is stuck.
+ * Testgrid dashboard for conformance tests
+ * Including openstack
+ * Prow is now being adopted by other orgs
+ * Google, Red Hat, Istio, JetStack …
+ * Future work:
+ * Better onboarding docs
+ * Fix tech debt that makes getting started hard
+ * Better log viewer, esp now that we have scalability presubmits
+ * Clean up config repo
+ * Framework for writing bot interactions
+ * API for cluster provisioning
+ * Questions:
+ * What about archival stats on PR status dashboard?
+ * Will discuss at sig-testing meeting
+ * What about doc on how to write a test?
+ * Also really critical, needs help
+ * SIG ContribEx [Paris](confirmed)
+ * [Link to slides](https://docs.google.com/presentation/d/1z1Cscr-cOpX9b7vUqdRfWvtd6Yb_Ybm16_G_nVXTssI/edit?usp=sharing)
+ * Contributor Guide
+ * Umbrella issue is now closed
+ * non-code guide in development - meets on Weds
+ * Developer Guide
+ * Tim Pepper now taking point on this
+ * Reach out to him @tpepper if you can help
+ * Contributor.kubernetes.io web site is under early design
+ * Different from general community, this one will be just for contributors
+ * More modern calendar
+ * Prototype up, check it out (link from slides)
+ * goal to launch in 90 days
+ * Community Management
+ * All talking all the time, it's time consuming
+ * Contributor summits, first one (run by contribex) in Copenhagen
+ * Rolling out new contributor workshop + playground
+ * Will have smaller summit in Shanghai (contact @jberkus)
+ * Started planning for Seattle, will have an extra ½ day.
+ * Registration will be going through kubecon site
+ * Manage alacarte events at other people's conferences
+ * Communication pipelines & moderation
+ * Clean up spam
+ * Reduce number of pipelines
+ * Some draft moderation guides
+ * Also run the Community Meeting
+ * Zoom has a bad actor problem, so we're not locking down Zoom permissions, trying not to take away public meetings, looking at new security together with Zoom execs.
+ * Moderating k-dev and k-users MLs now
+ * If you need to reach moderators quickly, use slack-admins slack channel
+ * Slack: 40K users, a lot less moderation required
+ * Discuss.kubernetes.io
+ * Been successful for tips & tricks and user advice
+ * Will be "official" RSN
+ * Mentoring
+ * Meet Our Contributors is doing well
+ * Yesterday's special edition had Steering Committee members
+ * Outreachy, only participating twice a year
+ * September deadline for winter intern, planning on 1
+ * Participating companies can pay for more
+ * Group mentoring: the 1:1 hour
+ * If your SIG needs to move people up to approver, please contact @paris
+ * GSoC, being done by API-machinery
+ * DevStats
+ * Github Management proposed subproject
+* Announcements
+ * **Shoutouts** - enter yours in #shoutouts slack channel!
+ * (jberkus) - @jdumars for inventing, then running, really effective retros for releases.
+ * (paris) - shouts to @liggitt @stevekuznetsov and @munnerz for a jam packed, informative, #meet-our-contributors [session yesterday](https://youtu.be/EA6s09YXgh8)! (watch the recording; good info!)
+ * (paris) another shout to @arschles @janetkuo for being mentors on the second great episode of #meet-our-contributors yesterday. (also to bdburns, pwittroc, and philips but I will spare their notifications for doing our first [AMA with steering committee members](https://youtu.be/BuJhzJriaNY))
+ * munnerz - shout out to @stevekuznetsov for immediately jumping to spend time debugging and fixing issues with our Prow deployment and tide (not) merging our PRs! looking forward to finally rolling the fix out :slightly_smiling_face: it has caused us issues for 1-2 months now :smile:
+ * **[Office Hours](https://github.com/kubernetes/community/blob/master/events/office-hours.md) is next Wednesday** - volunteers to help answer user questions are always appreciated, ping @jeefy or @mrbobbytables if you want to help, otherwise help us spread the word!
+ * Next week's meeting won't be streamed, so expect a slight delay on publishing it to YouTube
+
+
+## July 5, 2018
+
+**NO MEETING**
+
+
+## June 28, 2018 - ([recording](https://youtu.be/aTNNtJ56ahE))
+
+
+
+* **Moderators**: Jaice Singer DuMars [SIG PM/Release]
+* **Note Taker**: First Last [Company/SIG]
+* [ 0:00 ]** Demo **- containerd - Phil Estes - estesp@gmail.com
+ * Link to [slides](https://docs.google.com/presentation/d/19ZHjXR1uG4wdW5uXiNB7fda2goRBlSbBaV5Cw06a3zk/edit?usp=sharing)
+ * Link to [repositories](https://github.com/containerd)
+* [ 0:00 ] **Announcements**
+ * SIG IBMCloud, Autoscaling, and GCP will be updating in August
+ * Github Groups [Jorge Castro]
+ * [https://github.com/kubernetes/community/issues/2323](https://github.com/kubernetes/community/issues/2323) working to make current 303 groups in the org easier to manage
+ * Shoutouts this week (Check in #shoutouts on slack)
+ * jberkus: To Jordan Liggitt for diagnosing & fixing the controller performance issue that has haunted us since last August, and to Julia Evans for reporting the original issue.
+ * Maulion: And another to @liggitt for always helping anyone with a auth question in all the channels with kindness
+ * jdumars: @paris - thank you for all of your work helping to keep our community safe and inclusive! I know that you've spent countless hours refining our Zoom usage, documenting, testing, and generally being super proactive on this.
+ * Nikhita: shoutout to @cblecker for excellent meme skills!
+ * Mrbobbytales: Just want to give a big shout out to the whole release team. Thanks for all your effort in getting 1.11 out the door :slightly_smiling_face: Seriously, great job!
+ * Misty: @chenopis for last-minute 1.11 docs-related heroics!
+ * Misty: @nickchase for amazing release notes!
+ * Misty: @jberkus for being a very patient and available release lead as I was on the release team for the first time
+ * Jberkus: @liggitt for last-minute Cherrypick shepherding, and @nickchase for marathon release notes slog
+ * Jberkus: and @misty @AishSundar @tpepper @calebamiles @idvoretskyi @bentheelder @cjwagner @zparnold @justaugustus @Kaitlyn for best release team yet
+ * Tpepper: shoutout to @jberkus for his leadership of our team!
+* [ 0:00 ]** Release Retrospective for 1.11**
+ * [Retro doc](https://docs.google.com/document/d/1Kp9J29wCTY_3SdQn0Kmpuw9lOSxoO7BYWDUcmSoCrZo/edit#)
+ * SIG Release will do deep dive on retrospective details, but today this meeting focused on the high level cross-project topics like:
+ * Release timeline evolution and deadlines
+ * How to better track major features and changes that are in need of docs, test cases, release noting
+ * How do we get user/distributor/vendor testing of betas and rc's. Consumption is harder when docs and kubeadm upgrade path aren't there yet.
+ * Retro Part II (detail retro): Tuesday, July 3rd, 10am, [https://zoom.us/j/405366973](https://zoom.us/j/405366973)
+
+
+## July 15, 2018 - recording
+
+
+
+* [Stackoverflow Top Users](https://stackoverflow.com/tags/kubernetes/topusers) for June 2018, thanks for helping out!
+ * [Matthew L Daniel](https://stackoverflow.com/users/225016/matthew-l-daniel)
+ * s[uren](https://stackoverflow.com/users/5564578/suren)
+ * [Janos Lenart](https://stackoverflow.com/users/371954/janos-lenart)
+ * [Nicola Benaglia](https://stackoverflow.com/users/2718151/nicola-benaglia)
+ * [Ignacio Millán](https://stackoverflow.com/users/9811836/ignacio-mill%c3%a1n)
+
+
+## June 21, 2018 - ([recording](https://youtu.be/VmVh2TsRP-s))
+
+
+
+* **Moderators**: Arun Gupta [Amazon / SIG-AWS]
+* **Note Taker**: Chris Short and Jorge Castro [SIG Contrib Ex]
+* [ 0:00 ]** Demo **-- Agones - Dedicated Game Server Hosting and Scaling for Multiplayer Games on Kubernetes [Mark Mandel, markmandel@google.com] (confirmed)
+ * [https://github.com/GoogleCloudPlatform/agones](https://github.com/GoogleCloudPlatform/agones)
+* [ 0:00 ]** Release Updates**
+ * 1.11 [Josh Berkus - Release Lead]
+ * Code Thaw on Tuesday, held changes from Code Freeze have now cleared the queue.
+ * All 1.11 changes now need to be cherrypicked.
+ * RC1 was released yesterday, please test!
+ * Status is currently uncertain. Probability of a release delay is 50%, will make call at Burndown meeting 10am tomorrow.
+ * CI Signal Issues:
+ * GKE appears to have pushed a [change breaking tests](https://github.com/kubernetes/kubernetes/issues/65311) at midnight last night, currently sorting whether that's just a GKE problem.
+ * [Upgrade tests are still very flaky](https://k8s-testgrid.appspot.com/sig-release-master-upgrade), this seems to be an artifact of the tests and not of code. GCE/GKE staff have given the go-ahead to release without clean signal as they will not be fixing the tests.
+ * [Alpha tests failing](https://k8s-testgrid.appspot.com/sig-release-1.11-blocking#gce-alpha-features-1.11) due to [Daemonset issue](https://github.com/kubernetes/kubernetes/issues/65192#issuecomment-398541158); currently trying test resource change.
+ * Release notes collector is still broken, please **check[ the release notes](https://github.com/kubernetes/sig-release/blob/master/releases/release-1.11/release_notes_draft.md) to make sure all of your changes represented**! An estimated 20-30 release notes are missing. Contact (@nickchase / [nchase@mirantis.com](mailto:nchase@mirantis.com)) if you find something missing.
+ * 1.12 [Tim Pepper - Release Lead]
+ * Patch Release Updates
+ * 1.8.14
+ * 1.9.9 release schedule
+ * 1.10.5
+* [ 0:00 ] **KEP o' the Week** (Yisui)
+ * Namespace Population is an automated mechanism to make sure the predefined policy objects (e.g. NetworkPolicy, Role, RoleBinding) are present in selected namespaces.
+ * [https://github.com/kubernetes/community/pull/2177](https://github.com/kubernetes/community/pull/2177)
+* [ 0:00 ] **Open KEPs** [Kubernetes Enhancement Proposals]
+ * [Check out the KEP Tracking Board](https://github.com/kubernetes/community/projects/1)
+* [ 0:00 ] **SIG Updates**
+ * SIG Big Data [Sean Suchter] (confirmed)
+ * Slides: [https://docs.google.com/presentation/d/1GE9wh7Lja1vFHFCJdH45PAjjEVJBmhhXvnrjjq5jpnk/edit#slide=id.g3bb158eee1_0_0](https://docs.google.com/presentation/d/1GE9wh7Lja1vFHFCJdH45PAjjEVJBmhhXvnrjjq5jpnk/edit#slide=id.g3bb158eee1_0_0)
+ * k8s 💗Spark
+ * [Spark on Kubernetes Talk](https://databricks.com/session/apache-spark-on-kubernetes-clusters)
+ * ASF is going to publish official Spark container images
+ * NEED: Parity with Spark on YARN (esp. scheduling)
+ * SIG PM [Stephen Augustus] (confirmed)
+ * Slides: [https://docs.google.com/presentation/d/1p8FF45r0CF-AYnGm_B59F2PN_jybd0bbLe0fb0APlJU/edit?usp=drivesdk](https://docs.google.com/presentation/d/1p8FF45r0CF-AYnGm_B59F2PN_jybd0bbLe0fb0APlJU/edit?usp=drivesdk)
+ * SIG Azure [Stephen Augustus] (confirmed)
+ * Slides: [https://docs.google.com/presentation/d/15M-bQdMxaY6ZBedEEt9yKNesfv_95NYHBHIRwCYAP2E/edit?usp=drivesdk](https://docs.google.com/presentation/d/15M-bQdMxaY6ZBedEEt9yKNesfv_95NYHBHIRwCYAP2E/edit?usp=drivesdk)
+* [ 0:00 ] **Announcements**
+ * Please pin your SIG meeting info and agenda doc in your SIG slack channel. Now that the main calendar is not on [https://kubernetes.io/community/](https://kubernetes.io/community/) meeting info is less discoverable without these links.
+ * **SIG Chairs/TLs - please check your email (sent to k-sig-leads@). New zoom settings and moderation controls. Let's keep our meetings safe and transparent. **
+ * All SIGs - please take time to look at the "help wanted" and "good first issue" labels, available across all Kubernetes repositories. They're meant to highlight opportunities for new contributors. Please ensure that they're being used appropriately (the "good-first-issue" especially has fairly specific requirements for the issue author): [https://github.com/kubernetes/community/blob/master/contributors/devel/help-wanted.md](https://github.com/kubernetes/community/blob/master/contributors/devel/help-wanted.md)
+ * Shoutouts this week (Check in #shoutouts on slack)
+ * Jason DeTiberus: @neolit123 (Lubomir Ivanov) for all of the docs contributions for kubeadm v1.11
+ * Jason DeTiberus: @jrondeau (Jennifer Rondeau) for the relentless work on improving our docs and helping bring some more structure to the docs process for sig-cluster-lifecycle
+ * @neolit123 (Lubomir Ivanov): @jdetiber (Jason DeTiberus), @liz (Liz Frost), @cha (Chuck Ha), @timothysc (Timothy St. Clair) and @luxas (Lucas Kladstrom) for the relentless grind trough kubeadm 1.11 backlog potentially making it the best release thus far.
+ * @austbot (Austin Adams): To @lukaszgryglicki (Lukasz Gryglicki) for DevStats, which is Awesome!!
+ * Stealthybox (Leigh Capili): shoutout to @oikiki (Kirsten) for being very welcoming to new contributors
+ * Nikhita: shoutout to the whole test-infra community for actively using emojis in issues, PRs and slack. It's pretty subtle but it goes a LONG way in making the project and community more friendly and welcoming to new contributors!! cc @fejta (Erick Fejta) @bentheelder (Benjamin Elder) @cblecker (Christoph Blecker) @stevekuznetsov (Steve Kuznetsov)
+ * @misty (Misty Stanely-Jones): @Jesse (Jesse Stuart) for fixing CSS relating to tab sets in docs! :raised_hands:
+ * @fejta (EricK Fejta): @krzyzacy (Sen Lu) and @bentheelder (Benjamin Elder) for being ever diligent about reviewing PRs in a timely manner
+ * JoshBerkus: to @kjackal (Konstantinos) for actually beta-testing 1.11 and spotting a bug before RC1
+ * @oikiki (Kirsten): shoutout to @gsaenger for always generously helping new folks get started contributing to k8s! (and also for completing her first major technical PR!) WOOP WOOP!
+ * @gsaenger (Guinevere Senger) Um... no, really, I couldn't have done it without so much help from @cblecker (Christoph Blecker) and @cjwagner (Cole Wagner) and @fejta (Erick Fejta)and @bentheelder (Benjamin Elder). Everyone was super nice and patient and helped me learn. :heart: So, shoutouts to them. I'm so grateful.
+
+
+## June 14, 2018 - ([recording](https://youtu.be/yAtOHS6C-W0))
+
+
+
+* **Moderators**: Zach Arnold [Ygrene Energy Fund/SIG Docs]
+* **Note Taker**: Jorge Castro [Heptio/SIG Contribex] and Solly Ross [Red Hat/SIG Autoscaling]
+* [ 0:00 ]** Demo **-- Building Images in Kubernetes [Priya Wadhwa, priyawadhwa@google.com] (confirmed)
+ * [https://github.com/GoogleContainerTools/kaniko](https://github.com/GoogleContainerTools/kaniko)
+ * [https://docs.google.com/presentation/d/1ZoiQ3cuQNJJciKq_JvqTty_tcoaRKNyYRzgCBbTumsE/edit?usp=sharing](https://docs.google.com/presentation/d/1ZoiQ3cuQNJJciKq_JvqTty_tcoaRKNyYRzgCBbTumsE/edit?usp=sharing)
+ * Tool for building container images without needing to mount in Docker socket
+ * Extracts base image to file system
+ * Downloads build context tarball from storage (e.g. S3, more on the way)
+ * Executes commands listed in Dockerfile
+ * Snapshots in userspace after each step
+ * Ignores mounted directories during snapshots
+ * Can be run in gVisor as well
+ * Questions:
+ * do you have to use dockerfiles, or can you use other instruction sets
+ * Only dockerfiles right now, but file issues if you want other things
+ * Which dockerfile verbs are supported?
+ * All of them
+ * Can the bucket be S3 or DO Space?
+ * Working on a PR right now to support other solutions
+ * Feature parity with docker build?
+ * Yes
+ * Link to slides.
+* [ 0:00 ]** Release Updates**
+ * 1.11 [Josh Berkus - Release Lead]
+ * **_Next Deadline: RC1 and branch on June 20th_**
+ * Less than a week of code freeze left!
+ * **Docs are due** and overdue; if you have a feature in 1.11,_ you should have already submitted final docs_. Contact the docs team.
+ * CI signal is good, a few tests being flaky, especially alpha-features.
+ * Only 2 issues and 6 PRs open; currently more stable than we've ever been! Thanks so much to everyone for working to get stuff in the release early.
+ * 1.12 [Tim Pepper - 1.12 Release Lead]
+ * Tim Pepper as Lead
+ * Almost finished building 1.12 team, contact @tpepper on Slack to join.
+ * Needed:
+ * PR triage (tentatively adding role separate from issue triage)
+ * Branch manager
+ * Patch Release Updates
+ * 1.10.4?
+* [ 0:00 ] **KEP o' the Week**
+ * SIG Cloud Provider KEP: Reporting Conformance Test Results to Testgrid [Andrew Sy Kim]
+ * Formerly WG-Cloud-Provider
+ * Standards and common requirements for Kubernetes cloud provider integrations
+ * Improving docs around cloud providers (how to work with different integration features)
+ * Improving testing of cloud providers
+ * KEP is basically "Why we want conformance tests reported by the cloud providers"
+ * We didn't have a formal way to do this without KEP
+ * SIG Testing infra wasn't available back then, so now we have testgrid and a way to report tests, etc. Gives providers instructions to follow to contribute results.
+ * SIG Openstack has been pioneering this work
+ * We want all providers to do this eventually, we'll be reaching out to all the cloud providers to give them visibility that this KEP exists.
+ * Still missing some details, will address those as more experience is developed in how to do better test
+ * Q:
+ * Coverage is listed as out of scope, but is a benefit, will coverage improvements be a follow-on KEP?
+ * Eventually, but currently not necessarily an immediate priority
+ * [https://github.com/kubernetes/community/pull/2224](https://github.com/kubernetes/community/pull/2224)
+* [ 0:00 ] **Open KEPs** [Kubernetes Enhancement Proposals]
+ * [Check out the KEP Tracking Board](https://github.com/kubernetes/community/projects/1)
+* [ 0:00 ] **SIG Updates**
+ * SIG Windows [Patrick Lang]
+ * [Trello board](https://trello.com/b/rjTqrwjl/windows-k8s-roadmap) - maps K8s features to Windows release needed
+ * Releasing twice a year in the Windows Server Semi Annual Channel
+ * Like 18.03, 17.09, etc.
+ * We've had to make changes to Windows Server to make Kubernetes work well. For example, symbolic links in Windows v. Unix.
+ * Board is tagged with the right version of Windows to use to get a particular Kubernetes feature working, but in general, use the latest release if possible
+ * Kube 1.11
+ * Lots of features with Windows, e.g. Kubelet stats
+ * For the future
+ * Currently using dockershim, trying to figure out how to support other CRI implementations
+ * Working with other CNI plugins (Flannel, OVN, Calico)
+ * Trying to get support for showing test results via Prow, Kubetest, TestGrid
+ * Want to move to GA eventually, with 2019 Windows release (extended support cycle)
+ * Questions:
+ * Q: Unix-style symlinks in Windows?
+ * Have something similar to unix-style symlinks and hardlinks, needed to make some symlink changes to make sure you can't traverse in an insecure way. Code either in Kubelet or Go winio library
+ * Hardlinks not recommended, stick to the symlinks.
+ * Q: is windows currently dockershim+embedded EE
+ * Currently uses Docker EE Basic for Windows (as published by Docker), used for testing
+ * Potentially switching to crio eventually, or containerd
+ * SIG Apps [Kenneth Owens]
+ * Helm
+ * Helm moved to a separate CNCF project (see last meeting)
+ * Helm 2 stability release
+ * Helm 3 proposal merged, work continuing
+ * Application Resource
+ * Seeks to describe application as running
+ * Controller soon
+ * WIP
+ * AppDef WG
+ * Winding down
+ * Proposal for common labels and annotations will be merged in partial form
+ * Ksonnet
+ * New release
+ * Decentralized charts repo coming
+ * Skaffold: kustomize support
+ * Workloads API
+ * Need to stabilize Job
+ * Want really make sure cron jobs are stable before moving cronjobs out of beta
+ * Job [first class sidecar container KEP](https://github.com/kubernetes/community/issues/2148) discussion ongoing
+ * Questions:
+ * Why didn't charts go with Helm to a separate CNCF project
+ * Current Status: Charts are listed as a subproject of SIG Apps.
+ * Chart maintainers aren't necessarily Helm maintainers
+ * Trying to figure out the right model for maintainership
+ * Is the charts tooling part of the charts subproject, or Helm?
+ * Unsure, currently part of the charts subproject, but points to the kubernetes/helm repo
+ * SIG Docs [Jennifer Rondeau]
+ * Zach is out sick today, fulll update Augustish, feel better Zach!
+ * 1 minutes Jennifer update
+ * We're making great progress on fixes for the hugo migration, we've plowed through a bunch, thanks to all the new contributors who have been diving in.
+ * Thanks to all of you who have submitting 1.11 docs
+ * **If you're behind on 1.11 docs, please submit them asap!**
+* [ 0:00 ] **Announcements**
+ * [K8s Office Hours](https://github.com/kubernetes/community/blob/master/events/office-hours.md) Next Week, Wednesday 6/20
+ * Volunteers always sought, ping @jorge or @mrbobbytables on slack
+ * Users who participate will be entered in a raffle to win a k8s shirt!
+ * SIG Leads, if you haven't uploaded your meeting videos to the youtube channel recently, please try to catch up. Ping @jorge if you need help.
+ * SIG Architecture has a new meeting time at 11PST every other Thursday after this meeting. Also, there is a new Zoom link you can get from joining the mailing list. Check out the[ SIG Arch readme for more information](https://github.com/kubernetes/community/tree/master/sig-architecture).
+ * Shoutouts this week (Check in #shoutouts on slack)
+ * (Josh Berkus) @liggitt and @dims for pitching in and doing a ton of work on PRs for 1.11, across all of Kubernetes.
+ * (Jennifer Rondeau) @misty for stepping in to help with ALL things docs no matter how crazy they get or how much else she has on her plate :tada:
+ * (Aish Sundar) @justaugustus for giving us a huge head start and herding all the cats to get a stellar 1.12 release team already in place. Thanks a lot!
+ * (Misty Stanley-Jones + Aish Sundar) @jberkus for herding 1.11 release cats! :cat:
+ * To echo what @misty said, HUGE shoutout to @jberkus for being an awesome patient leader throughout 1.11 cycle. It was such a learning experience seeing him work through issues calmly, all the while encouraging the RT team to lead in our own little way.
+ * Jason DeTiberius
+ * @neolit123 (Lubomir Ivanov) for all of the docs contributions for kubeadm v1.11
+ * @jrondeau (Jennifer Rondeau) for the relentless work on improving our docs and helping bring some more structure to the docs process for sig-cluster-lifecycle
+ *
+
+
+## June 7, 2018 - ([recording](https://youtu.be/fOhby7EUiuo))
+
+
+
+* **Moderators**: Jaice Singer DuMars [SIG Release/Architecture]
+* **Note Taker**: Austin Adams [Ygrene Energy Fund]
+* [ 0:00 ] **Demo** -- YugaByte ~ Karthik Ranganathan [[karthik@yugabyte.com](mailto:karthik@yugabyte.com)] (confirmed)
+ * Karthik Ranganathan
+ * GitHub:[ https://github.com/YugaByte/yugabyte-db](https://github.com/YugaByte/yugabyte-db)
+ * Docs:[ https://docs.yugabyte.com/](https://docs.yugabyte.com/)
+ * Slides: https://www.slideshare.net/YugaByte
+ * Yugabyte is a database focusing on, planet scale, transactional and high availability. It implements many common database apis making it a drop in replacement for those DBs. Can run as a StatefulSet on k8s. Multiple db api paradigms can be used for one database.
+ * No Kubernetes operator yet, but it's in progress.
+ * Answers from Q&A:
+ * @jberkus - For q1 - YB is optimized for small reads and writes, but can also perform batch reads and writes efficiently - mostly oriented towards modern OLTP/user-facing applications. Example is using spark or presto on top for use-cases like iot, fraud detection, alerting, user-personalization, etc.
+ * q2: operator in the works. We are just wrapping up our helm charts[ https://github.com/YugaByte/yugabyte-db/tree/master/cloud/kubernetes/helm](https://github.com/YugaByte/yugabyte-db/tree/master/cloud/kubernetes/helm)
+ * q3: the enterprise edition does have net new DB features like async replication and enforcing geographic affinity for reads/writes, etc. Here is a comparison:[ https://www.yugabyte.com/product/compare/](https://www.yugabyte.com/product/compare/)
+ * q4: You cannot write data using redis and read using another API. Its often tough to model across api's. Aim is to use a single database to build the app, so support common apis
+ * The storage layer is common
+ * So all APIs are modeled on top of the common document storage layer
+ * The API layer (called YQL) is pluggable
+ * Currently we model Redis "objects" and Cassandra "tables" on top of this document core, taking care to optimize the access patterns from the various APIs
+ * We are working on postgres as the next API
+* [ 0:00 ]** Release Updates**
+ * 1.11 [Josh Berkus - Release Lead]
+ * **_Next Deadline: Docs Complete, June 11_**
+ * All listed features have docs in draft -- Thanks!
+ * However: non-listed (minor) changes, please make sure you have docs!
+ * [Currently](https://github.com/kubernetes/sig-release/blob/master/releases/release-1.11/release-1.11.md) in Code Freeze
+ * Only 1.11 patches, must be approved and critical-urgent
+ * Down to 11 PRs
+ * Still using the old Milestone Munger, so expect the same annoying behavior, sorry.
+ * Particularly: can't take back-branch PRs.
+ * <span style="text-decoration:underline;">No New Features/Cleanups Now, please</span>
+ * All new features have draft documentation, however, there are lots of small patches **not big enough** to be a feature but we don't know if we have documentation for those.
+ * **Please make sure your 1.11 small patches have documentation.**
+ * Code freeze ends June 19th.
+ * Docs need to be complete by June 11th
+ * CI Signal looking good
+ * Recent GKE breakage fixed.
+ * Only upgrade/downgrade tests failing, PR in progress.
+ * Thanks everyone for responding to test fails quickly!
+ * Scalability/Performance
+ * Currently passing all performance tests
+ * Thanks to everyone who worked on this early in the cycle!
+ * New performance presubit test
+ * Kudos to SIG-scalability for getting this done.
+ * 1.12
+ * Currently working on[ forming a 1.12 Release Team](https://github.com/kubernetes/sig-release/issues/167)
+ * Interested? Comment on the PR or speak up in #sig-release
+* **[ 0:00 KEP Highlight ] **- Kustomize [ Jeff Regan ]
+ * overall process: [https://github.com/kubernetes/community/tree/master/keps ](https://github.com/kubernetes/community/tree/master/keps)
+ * Kustomize
+ * Kustomize is a way for us to provide a declarative way to update resources in kubernetes. This allows us to version control changes to k8s configs and resources and so forth.
+ * Sig cli is sponsoring this project.
+ * **[PR](https://github.com/kubernetes/community/pull/2132)** for the KEP - commentary important
+ * **final committed KEP - **[https://github.com/kubernetes/community/blob/master/keps/sig-cli/0008-kustomize.md](https://github.com/kubernetes/community/blob/master/keps/sig-cli/0008-kustomize.md)
+ * **<span style="text-decoration:underline;">actual resulting repo: [github.com/kubernetes-sigs/kustomize](https://github.com/kubernetes-sigs/kustomize)</span>**
+ * KEP Life cycle
+ * We have a GitHub project that helps keep track of Kep project lifecycles. See it here [https://github.com/kubernetes/community/projects](https://github.com/kubernetes/community/projects)
+* [ 0:00 ] **SIG Updates**
+ * **Multicluster **- Quinton Hoole (confirmed)
+ * Sig Intro
+ * Focused on solving challenges with running multiple clusters and applications therein.
+ * Working on Cluster Federation, Cluster Registry(cluster registry for k8s for cluster reuse) and Multi cluster ingress.
+ * FederationStatus
+ * Development has split between federation v1 and v2.
+ * Federation v1 is a POC and no further development planned, users showed they needed something different.
+ * Moving forward Federation v2 will focus on reusable components, federation specific apis and implementations of higher level apis and federation controllers.
+ * v2 Alpha is planned for June.
+ * Behind the effort is RedHat and Huawei.
+ * Cluster Registry Status
+ * Grew out of Federation v1. Allows reusable clusters and discovery. Google Cloud is supported for now, but more coming. Implementation is based on CRDS.
+ * Apis/CRDS in beta.
+ * [Link to slides](https://docs.google.com/presentation/d/1mdIgFkSr7dxsoTcDZCaW0nMVLLHwn5sEOyFzBhzUgD4/edit?usp=sharing)
+ * Network - Tim Hockin - (confirmed) (or dc
+ * Sig Intro
+ * In-progress Network Plumbing CRD Spec doc:
+ * A CRD is being proposed. Reference implementation is in the works. There is a proposal that covers all the relevant information.
+ * [https://docs.google.com/document/d/1Ny03h6IDVy_e_vmElOqR7UdTPAG_RNydhVE1Kx54kFQ/edit#](https://docs.google.com/document/d/1Ny03h6IDVy_e_vmElOqR7UdTPAG_RNydhVE1Kx54kFQ/edit#)
+ * Network Service Mesh proposal slides
+ * [https://docs.google.com/presentation/d/1vmN5EevNccel6Wt8KgmkXhAfnjIli4IbjskezQjyfUE/edit#slide=id.p](https://docs.google.com/presentation/d/1vmN5EevNccel6Wt8KgmkXhAfnjIli4IbjskezQjyfUE/edit#slide=id.p)
+ * DevicePlugins (from Resource Management WG) have some intersection with networking, there have been many demos/PoCs but so far no consensus on how DPs should interact with existing CRI networking APIs
+ * CoreDNS is now GA in 1.11
+ * IPVS Proxy mode is now GA in 1.11 (anyone have a link?) but not default
+ * Looking at breaking out ingress into a bunch of individual route resources instead of one monolithic list.
+ * IPv6 discussions around how to support dual-stack are ongoing
+ * We are working on test flakes, we don't have a fix yet but HELP WANTED
+ * **VMware** - Steve Wong (confirmed)
+ * Vmware Cloud Provider
+ * The target is 1.12.
+ * Working through some process level things. This project is retained as a SubProject.
+ * Creating a working group to handle testing
+ * [Link to deck](https://docs.google.com/presentation/d/1GUrqhEpVkMb4ypCcoXs3WZGkRtYylXCNRiLSAmhc-zs/edit?usp=sharing), 4 slides, estimated 5 min:
+* [ 0:00 ] **Announcements**
+ * **Happy birthday, Kubernetes!**
+ * **Shoutouts -** _powered by slack #shoutouts _- if you see someone doing great work give them a shoutout in the slack channel so we mention those here!
+ * "@jrondeau for working on the weekend to get 1.11 doc builds working again!!" -mistyhacks
+ * "@andrewsykim for all the effort in getting SIG Cloud Provider off the ground!" -fabio
+ * "@neolit123 for really stepping up lately to help with user facing issues for the kubeadm 1.11 release. we really appreciate your contributions to the sig" -stealthybox
+ * "@cblecker who is everywhere keeping tabs on things and people on track." -gsaenger
+ * **Help Wanted**
+ * [Stephen Augustus]** **[1.12 release team is forming](https://github.com/kubernetes/sig-release/issues/167), see #sig-release for more info. Roles & Responsibilities info [here](https://github.com/kubernetes/sig-release/#kubernetes-release-team-roles). Volunteers needed!
+ * Help wanted on Sig Network Test Flakes reach out to #sig-network on slack
+ * Anyone Interested in learning prow and helping with the transition from. Munger to prow will be helpful. See @jberkus
+
+
+## May 31, 2018 - ([recording](https://youtu.be/9RSY7czYRCY))
+
+
+
+* **Moderators**: Jorge Castro [SIG Contributor Experience]
+* **Note Taker**: First Last [Company/SIG]
+* [ 0:00 ]** Demo **-- [Aptomi](https://github.com/Aptomi/aptomi/) - application delivery engine for K8S [Roman Alekseenkov]
+ * framework on top of helm charts, for composition into services
+ * showed charts (hdfs, kafka, spark, zookeper), that together show twitter status
+ * Link to slides: [https://docs.google.com/presentation/d/1HQQ_hScOyfIt8SAYPRu6fUuLJCv7b7e6bUyosd38ir8/edit?usp=sharing](https://docs.google.com/presentation/d/1HQQ_hScOyfIt8SAYPRu6fUuLJCv7b7e6bUyosd38ir8/edit?usp=sharing)
+ * Link to repositories: [https://github.com/Aptomi/aptomi/](https://github.com/Aptomi/aptomi/)
+ * <span style="text-decoration:underline;">Full Demo (40 min): [https://www.youtube.com/watch?v=GVB3kKocKi4](https://www.youtube.com/watch?v=GVB3kKocKi4)</span>
+ * [Description & Blog Post](https://superuser.openstack.org/articles/aptomi-application-delivery-engine-k8s/)
+* [ 0:13 ]** Release Updates**
+ * 1.11 [Josh Berkus - Release Lead]
+ * **_Next Deadline: Draft doc PRs due June 4th._**
+ * Currently in Code Slush. Requiring milestones, sorry for lack of warning on that.
+ * Were not able to move to Prow milestone maintainer or Tide for this release.
+ * Code Freeze Starts Tuesday, June 5th
+ * If your feature won't be ready, now is the time to update your issue in the Features repo.
+ * [Feature tracking spreadsheet](https://docs.google.com/spreadsheets/d/16N9KSlxWwxUA2gV6jvuW9N8tPRHzNhu1-RYY4Y0RZLs/edit#gid=2053885135) has been reformatted with lots of new information.
+ * CI Signal -
+ * Almost green, last few fixes merged.
+ * 1 open tracking issue - [Scale Density test for 30 pods](https://github.com/kubernetes/kubernetes/issues/63030)
+ * Conformance tests results (GCE and OpenStack) now in Release blocking dashboard
+ * @misty on slack for release docs issues
+ * Patch Release Updates
+ * x.x
+ * y.x
+* [ 0:00 ] **Introduction to KEPs** [Kubernetes Enhancement Proposals] [Caleb Miles]
+ * We'll be highlighting KEPs in community meetings
+ * tracking how decisions are made: identify the problem + find a sig for motivation agreement + documenting it for everyone
+ * [Slides](https://docs.google.com/a/google.com/presentation/d/e/2PACX-1vQ0KX1TuXC9VeXPRZhxZxNILoFzL7oEpLO1szMGCYCThxTstpK7VH7s_EJ4axseJVkJ6kkYDvhFJmsC/pub?start=false&loop=false&delayms=3000)
+* [ 0:00 ] **SIG Updates**
+ * SIG OpenStack [David Lyle and Chris Hoge]
+ * [https://docs.google.com/presentation/d/1BGdbMQnSzrYOTLxW8VZswSwMwQ2HPlegVJszITYFv6c/edit?usp=sharing](https://docs.google.com/presentation/d/1BGdbMQnSzrYOTLxW8VZswSwMwQ2HPlegVJszITYFv6c/edit?usp=sharing)
+ * expanded testing of provided code
+ * driver testing added
+ * whitepaper
+ * SIG Node [Dawn Chen]
+ * Made a steady progress on all 5 areas in Q2: 1) node management including Windows, 2) application / workload management, 3) security, 4) resource management and 5) monitoring, logging and debuggability.
+ * On node management
+ * Promoted dynamic kubelet config to beta
+ * Refactor the system to use node-level checkpointing manager
+ * Proposed a probe-based mechanism for kubelet plugins: device, csi, etc.
+ * Proposed a design to address the scalability issue caused by large node object and approved by the community. Had a short-term workaround in v.11, and plan to work on the long term solution in v1.12.
+ * Together with sig-windows, we made many progress on Windows support which including stats, node e2e for Windows Container Image. More works on SecurityContext, storage and network in next release.
+ * Both CRI-O and containerd are GA in this release.
+ * More enhancements on CRI for container logs
+ * Many enhancements to crictl, the tool for all CRI-compliant runtimes. Expecting to be GA in v1.12
+ * Announced CRI testing policy to the community, and introduced node exclusive tags to e2e.
+ * On security
+ * For 1.11, making all addons use default seccomp profile. Expecting to promote it to beta and enable it by default.
+ * Proposed a design and alpha-level Kubernetes API for sandbox. Working closely with Kata community and gVisor community on integration of CRI-compliant runtime.
+ * WIP for user namespace support
+ * Made progress on node TLS bootstrap via TPM.
+ * On resource management side, we made the progress on promoting sysctl to beta and proposed ResourceClass to make resource support extensible.
+ * Made the steady progress on debug pod, but unfortunately due to backforth review from the different reviewers on API changes, we couldn't have alpha support in v1.11. Escalate it to sig-architecture.
+ * On the logistics side
+ * Sig-node holds weekly meeting on Tuesday, 10am (Pacific Time)
+ * Please join kubernetes-sig-node googlegroup to have access to all design docs, roadmap and emails.
+ * Derek and I are working on sig-node charter, which is still under review and discussion.
+* [ 0:00 ] **Announcements**
+ * [Deprecation Policy Update](https://groups.google.com/forum/#!topic/kubernetes-dev/pNcskHXAD-k) (Important!)
+ * SIG Leads - check the top of this document for a link to the SIG Update schedule.
+ * Shoutouts - Someone going above and beyond? Mention them in #shoutouts on slack to thank them.
+ * Aish Sundar - Shoutout to @dims and OpenStack team for quickly getting their 1.11 Conformance results piped to CI runs and contributing results to Conformance dashboard!
+ * Aish Sundar - Shoutout to Benjamin Elder for adding Conformance test results to all Sig-release dashboards - master-blocking and all release branches.
+ * Josh Berkus and Stephen Augustus - To Misty Stanley-Jones for aggressively and doggedly pursuing 1.11 documentation deadlines, which both gives folks earlier warning about docs needs and lets us bounce incomplete features earlier
+ * Help Wanted
+ * Looking for Mandarin-speakers to help with new contributor workshop and other events at KubeCon Shanghai. If you can help, please contact @jberkus / [jberkus@redhat.com](mailto:jberkus@redhat.com)
+ * [KEP-005](https://github.com/kubernetes/community/blob/master/keps/sig-contributor-experience/0005-contributor-site.md) - Contributor Site - ping [jorge@heptio.com](mailto:jorge@heptio.com) if you can help!
+ * Meet Our Contributors (mentors on demand)
+ * June 6th at 230p and 8pm **UTC** [https://git.k8s.io/community/mentoring/meet-our-contributors.md](https://git.k8s.io/community/mentoring/meet-our-contributors.md)
+ * Want to know the paths of some of our approvers? Confused about what a SIG is? Anything that you'd ask a mentor - ask in #meet-our-contributors on slack or DM @paris with an anonymous question
+ * [Stackoverflow Top Users](https://stackoverflow.com/tags/kubernetes/topusers)
+ * [Const](https://stackoverflow.com/users/9663586/const)
+ * [VAS](https://stackoverflow.com/users/9521610/vas)
+ * [Alexandr Lurye](https://stackoverflow.com/users/9611623/alexandr-lurye)
+ * [James Strachan](https://stackoverflow.com/users/2068211/james-strachan)
+ * [Jordan Liggitt](https://stackoverflow.com/users/54696/jordan-liggitt)
+ * Thread o' the week: [How has Kubernetes failed for you?](https://discuss.kubernetes.io/t/how-has-kubernetes-failed-for-you/481)
+
+
+## May 24, 2018 - ([recording](https://youtu.be/zKtxTbq0s4o))
+
+
+
+* **Moderators**: Josh Berkus [SIG-Release]
+* **Note Taker**: Tim Pepper [VMware/SIGs Release & ContribX]
+* [ 0:00 ]** Demo **-- Workflows as CRD [ Jesse Suen (Jesse_Suen@intuit.com)]
+ * Link to slides: [https://drive.google.com/file/d/1Z5TMIr6r4hC7N5KeVqajC3c3NcYqK4_z/view?usp=sharing](https://drive.google.com/file/d/1Z5TMIr6r4hC7N5KeVqajC3c3NcYqK4_z/view?usp=sharing)
+ * Link to repositories: [https://github.com/argoproj/argo](https://github.com/argoproj/argo)
+ * Argo: a fancy job controller for workflows, DAGs implemented as CRD. Originally intended for CI/CD pipelines, but is seeing usage for other workflows like machine learning.
+ * Used with kubeflow
+ * Component architecture interfacing wsith k8s api server and leveraging sidecars in pods for workload artifact management
+ * Argo command line gives validation of commands, but is effectively a kubectl wrapper
+ * Workflows can be defined as a top down iterative list of steps, or as a DAG of dependencies
+* [ 0:16 ]** Release Updates**
+ * 1.11 Update [Josh Berkus, Release Lead]
+ * **_Next Deadline: Docs Placeholder PRs Due Tomorrow for [feature list](https://docs.google.com/spreadsheets/d/16N9KSlxWwxUA2gV6jvuW9N8tPRHzNhu1-RYY4Y0RZLs/edit#gid=0)_**!!!
+ * **_Code Slush on Tuesday_**
+ * Current CI status and schedule
+ * CI Signal : [Tracking 3 open issues](https://docs.google.com/spreadsheets/d/1j2K8cxraSp8jZR2S-kJUT6GNjtXYU9hocNRiVUGZWvc/edit#gid=127492362), all are test issue being actively worked on.
+ * Code freeze coming June 5, make sure your issues/PRs are up to date with labels and priorities and status
+ * [Changing Burndown Meeting Schedule](https://github.com/kubernetes/sig-release/issues/148), please comment..looking for less conflicted times and ones friendlier for more timezones
+ * Patch Release Updates?
+ * 1.10.3 released monday
+* [ 0:21 ] **SIG Updates**
+ * SIG Service Catalog [Doug Davis] (confirmed)
+ * Beta as of Oct 2017
+ * Key development activities
+ * New svcat cmd line tool (similar to CloudFoundry's way of things)
+ * NS-scoped brokers - still under dev
+ * Considering moving to CRDs instead of dedicated apiserver
+ * Finalizing our v1.0 wish-list
+ * NS-scoped brokers
+ * Async-bindings
+ * Resolve CRD decision
+ * Generic Broker & Instance Actions
+ * GUIDs as Kube "name" is problematic
+ * SIG has recently been actively mentoring and onboarding newcomers
+ * SIG Auth [Tim Allclair](confirmed)
+ * Pod TokenRequest API and ServiceAccountTokenProjection improving for 1.11
+ * Client-go gaining support for x509 credentials and externalizing currently in-tree credential providers
+ * Scheduling policy design thinking happening ahead of 1.12
+ * Audit Logging: improved annotation metadata coming around auth and admission for logs
+ * Node Isolation: nodes no longer able to update their own taints (eg: exploit to attract sensitive pod/data to a compromised node)
+ * Conformance: [KEP PR open on security related conformance](https://github.com/kubernetes/community/pull/2081) testing to give better assurance that best practices are in use or validate a hardened profile is active. Likely not 1.11 rather 1.12.
+ * Bug bounty is WIP
+ * SIG Storage [Brad Childs](confirmed) [Slides](https://docs.google.com/presentation/d/1HkCHC5xkxt2TXOLS1riXUeIRRejtX-82tOf5x42F4w8/edit?usp=sharing)
+ * Had SIG face-to-face meeting last week. ~40 people and 19 companies present
+ * Storage functionality is moving out of tree by way of the CSI interface
+ * CSI spec moving from 0.2 to 0.3 soon
+ * Lots of CSI related features coming in k8s 1.11
+ * Aiming for out-of-tree feature parity relative to existing in-tree
+ * Feature areas: Snapshots, topology aware (scheduling relative to location of PV) and local PV, local disk pools/health/capacity, volume expansion and online resize
+ * Testing: multi-phased plan to inventory and improve test coverage and CI/CD, including test coverage on other cloud providers beyond GCE. VMware committed resources, looking for commit from others.
+ * Operators: external provisioners, snapshot and other operator frameworks underway. Currently not looking to do a shared operator library to span SIG-Storage repos.
+ * Metrics: there are a lot. Some are cloud provider specific. Goal is to assist SRE's in problem determination and corrective action.
+ * API Throttling: api quota exhaustion at cloud provider and api server are frequently causing storage issues. Looking at ways to streamline.
+ * External projects: SIG has something like 20 projects and is breaking them apart, looking for owners and out of tree locations for them to better live. Projects should move to CSI, a kubernetes-sigs/* repo, a utility library, or EOL
+* [ 0:00 ] **Announcements**
+ * <span style="text-decoration:underline;">Shoutouts this week</span> (Check in #shoutouts on slack)
+ * Big shoutout to @carolynvs for being welcoming and encouraging to newcomers, to @paris for all the community energy and dedication, and to all the panelists from the recent Kubecon diversity lunch for sharing their experiences.
+ * Big shoutout to @mike.splain for running the Boston Kubernetes meetup (9 so far!)
+ * everyone at svcat is awesome and patient especially @carolynvs, @Jeremy Rickard & @jpeeler who all took time to help me when I hit some bumps on my first PR.
+ * <span style="text-decoration:underline;">Help Wanted</span>
+ * SIG UI is looking for new contributors. Check out their issue log to jump in; also listen to their SIG UI call today where they explained more and answered questions. #sig-ui in slack for on-ramp help. [Notes from the call ](https://docs.google.com/document/d/1PwHFvqiShLIq8ZpoXvE3dSUnOv1ts5BTtZ7aATuKd-E/edit?usp=sharing)
+ * Looking for more mentors as we kick off our contributor mentoring programs. [Fill out this form ](https://goo.gl/forms/uKbzNsv51JUVkC0g1)(works for looking for mentorship, too). Pardon the dust as we do a mentor recruiting drive.
+
+
+## May 17, 2018 - ([recording](https://youtu.be/DpFTcTnBxbM))
+
+
+
+* **Moderators**: Paris Pittman [SIG Contributor Experience]
+* **Note Taker**: Solly Ross
+* **Demo:** [Gardener Demo](https://gardener.cloud) ([vasu.chandrasekhara@sap.com](mailto:vasu.chandrasekhara@sap.com) and [rafael.franzke@sap.com](mailto:rafael.franzke@sap.com))
+ * [_occurred towards end of video instead_]
+ * Open Source: [https://gardener.cloud/](https://gardener.cloud/)
+ * Mission
+ * Manage, maintain, operate multiple k8s clusters
+ * Work across public and private clouds
+ * Architecture
+ * Self-hosted
+ * Kube-centric
+ * Steps
+ * Boot initial "garden" cluster using kubify ([https://github.com/gardener/kubify](https://github.com/gardener/kubify), open source)
+ * Deploy Gardener to "garden" cluster + dashboard (Gardener is extension API server)
+ * Run/use "seed" cluster to run control plane components, terraformer for each cluster "shoot" cluster (1 seed per hosting platform, region, etc)
+ * Each set of control plane components corresponds to a "shoot" cluster with actual nodes (machine controller + machine API objects control this)
+ * VPN between "seed" cluster and "shoot" clusters so that API server, monitoring can talk to node
+ * Secrets are created for each shoot to easily download kubeconfigs, etc
+ * Declarative config for each cluster ("shoot") with status info as well
+ * Uses cluster API machine resources, working with Cluster API WG
+ * Q: is it stable, or in development?
+ * A: used internally, but still in development
+ * Q: baremetal support?
+ * If there's an infra API that can be used to control baremetal, then that can be used
+ * Detailed Blog describing Gardener's architecture: [https://kubernetes.io/blog/2018/05/17/gardener/](https://kubernetes.io/blog/2018/05/17/gardener/)
+* **Release Updates:**
+ * **1.11 **[Josh Berkus, RT Lead / Aish Sundar CI Signal Lead] (Week 7)
+ * **_Next Deadline: Docs, Open Placeholder PRs Required, May 25th_**
+ * 1.11.0 Beta0 released yesterday.
+ * We are delaying/shortening Code Freeze as discussed. See[ new calendar](https://github.com/kubernetes/sig-release/blob/master/releases/release-1.11/release-1.11.md) for current deadlines.
+ * Stable passing tests, low bug count → small code freeze periods → more development time
+ * Code slush: **May 29th**
+ * Code freeze: **June 5th**
+ * Many thanks to dims, liggit, timothysc, krousey, kow3ns, yliaog, k82cn, mrhohn, msau42, shyamvs, directxman12 for debugging fails and closing issues, and AishSundar, Cole Mickens, Mohammed Ahmed, and Zach Arnold for working with the SIGs to get attention on issues and test failures.
+ * Help wanted to on scalability and performance
+ * **1.10 **[Maciek Pytel, PRM]
+* **SIG Updates:**
+ * [Scheduling](https://github.com/kubernetes/community/blob/master/sig-scheduling/README.md)** **[Bobby Salamat]
+ * Priority and Preemption
+ * Have gotten good feedback over the past quarter
+ * Moving to beta/enabled by default in 1.11
+ * Equivalence Cache Scheduling
+ * Caching predicate results for given inputs as long as conditions don't change in cluster
+ * Gang Scheduling
+ * Schedule a bunch of pods together, don't schedule only a subset
+ * Kube-arbitrator has a prototype that seems to work well
+ * Need to collect more requirements
+ * Q: Can we use batch scheduling to improve throughput?
+ * A: Maybe use a Firmament-like approach?
+ * Q: is this a step along the way for perf optimization on the current schedule?
+ * A: Engineers from Huawei are working on this, but ran into issues with things like pod-antiafinity, actually binding the pods
+ * Taint based eviction to beta
+ * Scheduling framework
+ * Still in design framework
+ * Pod scheduling policy
+ * Lots of opinions, progress has been slow
+ * Existing design proposal with lots of opinions
+ * [Scalability](https://github.com/kubernetes/community/blob/master/sig-scalability/README.md) [Bob Wise]
+ * [Slides](https://docs.google.com/presentation/d/1vP3kRPiei5yNrNmsrndWQgZo2idcc8uN0LgjK4xt2wU/edit)
+ * Schedule for large runs of perf is even-odd day
+ * Different Per Axes (there's not just one axis, e.g. "number of nodes")
+ * Nodes, Pod Churn, Pod density, Networking, Secrets, Active Namespaces
+ * Pro Tips
+ * Lock your etcd version
+ * Test your cluster with Kubemark
+ * Recommended reading in slides
+ * Perf regression study
+ * Scalability good practices
+ * WIP Items
+ * Better testing of real workloads (cluster-loader)
+ * More scalability testing in presubmit tests
+ * Concerns around run time issues
+ * Sonobouy per testing
+ * Q: Limitations on scalability come down to etcd perf, do we work with etcd engineers?
+ * A: Perf is generally not an etcd issue wrt bottlenecks
+ * A: Etcd tends to be regressions across etcd versions, not etcd as bottleneck
+ * A: Range locking issues being improved in 3.3,3.4
+ * A: talk to Shyam about this for more info
+ * [API Machinery](https://github.com/kubernetes/community/blob/master/sig-api-machinery/README.md) [Daniel Smith, confirmed]
+ * New Dynamic client with better interface!
+ * Old is under "deprecated" directory
+ * Clientside QPS rate limit behavior changed
+ * CRD Versioning
+ * Design issue with versioning priorities found, but no-op conversion will still land in 1.11
+ * Apply WG
+ * Feature branch for apply, trying to put things in master when possible
+ * Won't reintegrate before 1.11 (feature branch work will continue through code freeze)
+* **Announcements:**
+ * **Shoutouts!**
+ * Warm welcome to @liz and @cha for their journeys in joining the k8s org! Both of you have been having a big impact in #sig-cluster-lifecycle - stealthybox
+ * @chancez and @danderson for a great conversation on bare metal options and concerns! - mauilion
+ * shoutout to @liggitt, master wrangler of e2e test bugs. Jordan has fixed many ["fun" bugs](https://github.com/kubernetes/kubernetes/issues/63731#issuecomment-388529120). Thanks for helping keep things green! :smile: - bentheelder
+ * As a new contributor, I can 100% endorse @carolynvs for being REALLY GOOD at bringing in new contributors, and dedicating a lot of time and effort to make sure they are successful. -teague_cole
+ * **Help Wanted!**
+ * SIG UI looking for new contributors to go up the ladder to maintainers. Start with an open issue and reach out to the mailing list and slack channel.
+ * SIG Scalability is looking for contributors!
+ * We need more contributor mentors! [Fill this out.](https://goo.gl/forms/17Fzwdm5V2TVWiwy2)
+ * The next Meet Our Contributors (mentors on demand!) will be on June 6th. Check out kubernetes.io/community for time slots and to copy to your calendar.
+ * **Kubecon Follow Ups**
+ * Videos and slides: [https://github.com/cloudyuga/kubecon18-eu](https://github.com/cloudyuga/kubecon18-eu) Thanks CloudYuga for this!
+ * **Other**
+ * Don't forget to check out [discuss.kubernetes.io](https://discuss.kubernetes.io/)!
+ * DockerCon Kubernetes Contributor AMA during Community Day - June 13th. 3 hour window; specific time TBA
+
+
+## May 10, 2018 - ([recording](https://youtu.be/ygW6jTBp7Fs))
+
+
+
+* **Moderators**: Tim Pepper [SIG Contributor Experience, SIG Release]
+* **Note Taker**: Jorge Castro / Christian Roy
+* **Demo:** Ambassador API Gateway built on Envoy/K8S ([https://www.getambassador.io](https://www.getambassador.io)) ([richard@datawire.io](mailto:richard@datawire.io))
+ * [https://github.com/datawire/ambassador](https://github.com/datawire/ambassador)
+ * Link to [slides](https://www.slideshare.net/datawire/ambassador-kubernetesnative-api-gateway)
+ * Kubernetes only, simple architecture
+ * Apache licensed
+ * Declarative configuration via kubernetes annotations
+ * Built on Envoy - designed for machine configuration
+ * Operates as a sidecar to envoy, async notified of config changes and configures envoy accordingly
+ * Concept of shadowing traffic - takes all the incoming requests and sends it to another service but filters the responses, good for debugging in production.
+* **Release Updates:**
+ * **1.11 **[Josh Berkus, RT Lead / Aish Sundar CI Signal Lead] (Week 6)
+ * _Next Deadline: Beta0, May 15th. Tests must be passing by then!_
+ * _Week started well but gke tests started failing_
+ * _Some top level failing tests generate failures across other groups_
+ * _SIGs are responding responsibly on the failures_
+ * [43 Tracking Features](http://bit.ly/k8s111-features)
+ * CI Signal Test report - [https://docs.google.com/spreadsheets/d/1j2K8cxraSp8jZR2S-kJUT6GNjtXYU9hocNRiVUGZWvc/edit#gid=127492362](https://docs.google.com/spreadsheets/d/1j2K8cxraSp8jZR2S-kJUT6GNjtXYU9hocNRiVUGZWvc/edit#gid=127492362)
+ * **1.10** [Maciek Pytel, PRM]
+ * 1.10.3 release planned Monday May 21st
+* **SIG Updates:**
+ * **Architecture [Brian Grant]**
+ * Working on our [charter](https://github.com/kubernetes/community/pull/2074)
+ * Improving conformance tests
+ * Provide technical expertise/advice/overview across SIGs
+ * Formalizing proposal processes into KEPs, more structure, make it more obvious
+ * API review process. Used to be informal, we want to formalize that.
+ * Weekly meeting with alternating full meeting (decisions) and office hours (discussions)
+ * Office hours are available for people who want to ask questions on how to best implement incoming ideas (API review, etc.)
+ * [Meeting and note information](https://github.com/kubernetes/community/blob/master/sig-architecture/README.md)
+ * **Contributor Experience [Paris Pittman]**
+ * [Contributor site](https://github.com/kubernetes/community/blob/master/keps/sig-contributor-experience/0005-contributor-site.md) KEP underway
+ * Discourse is up and ready to test!
+ * [Discuss.kubernetes.io](http://discuss.kubernetes.io/) - please post content, announcements, meetup reminders, or just [introduce yourself](https://discuss.kubernetes.io/t/introduce-yourself-here/56)!
+ * [Looking for mentors!](https://goo.gl/forms/3ISrNbTkYqExWzKw1)
+ * Register for [Meet Our Contributors monthly YT series](https://youtu.be/EVsXi3Zhlo0) (first Weds of the month; link on kubernetes.io/community) with this form and all other mentoring activities
+ * Contributor Experience survey to go out in June
+ * Communication platform
+ * Flow in github
+ * [Developers Guide underway](https://github.com/kubernetes/community/issues/1919) under Contributor Docs subproject
+ * Contributor Experience Update [slide deck](https://docs.google.com/presentation/d/1KUbnP_Bl7ulLJ1evo-X_TdXhlvQWUyru4GuZm51YfjY/edit?usp=sharing) from KubeConEU [if you are in k-dev mailing list, you'll have access)
+* **Announcements:**
+ * **Shoutouts!**
+ * See someone doing something great in the community? Mention them in #shoutouts on slack and we'll mention them during the community meeting:
+ * Ihor Dvoretskyi thanks @justaugustus, who made a GREAT job as a Kubernetes 1.11 release features shadow
+ * Josh Berkus to Aish Sundar for doing a truly phenomenal job as CI signal lead on the 1.11 release team
+ * Tim Pepper to Aaron Crickenberger for being such a great leader on the project during recent months
+ * Chuck Ha shouts out to the doc team - "Working on the website is such a good experience now that it's on hugo. Page rebuild time went from ~20 seconds to 60ms" :heart emoji:
+ * Jason de Tiber would like to thank Leigh Capili (@stealthybox) for the hard work and long hours helping to fix kubeadm upgrade issues. (2nd shoutout in a row for Leigh! -ed)
+ * Jorge Castro and Paris Pittman would like to thank Vanessa Heric and the rest of the CNCF/Linux Foundation personnel that helped us pull off another great Contributor Summit and Kubecon
+ * [Top Stackoverflow Users](https://stackoverflow.com/tags/kubernetes/topusers) in the Kubernetes Tag for the month
+ * Anton Kostenko, Nicola Ben, Maruf Tuhin, Jonah Benton, Const
+ * Message from the docs team re: hugo transition:
+ * We've successfully migrated kubernetes.io from a Jekyll site framework to Hugo. Any open pull requests for k/website need to be revised to incorporate the repo's new content structure. (Changes in `docs/` must now change `content/en/docs/`.)
+ * More about the framework change: [https://kubernetes.io/blog/2018/05/05/hugo-migration/](https://kubernetes.io/blog/2018/05/05/hugo-migration/)
+ * KEP Section for the Community Meeting? [Jorge Castro]
+ * Lots of KEPs coming in via PR, should we have current KEPs in flight as a standing agenda item in the community meeting?
+ * When starting a KEP, send an email FYI to the appropriate SIGs and Arch as github notifications are noisy and missed.
+ * Would be good to help us bootstrap the KEP processes for people if we got some visibility on them, but still need a site of KEPs
+ * Kubernetes Application Survey results [Matt Farina] WG
+ * [Raw results](https://docs.google.com/spreadsheets/d/12ilRCly2eHKPuicv1P_BD6z__PXAqpiaR-tDYe2eudE/edit)
+ * [Deck on results](https://docs.google.com/presentation/d/1utT0K-u1nl2apXRo29GaBvRV1x7mFLeQSgpw8mI_nGM/edit?usp=sharing) (Slides)
+ * [Blog post about it](https://kubernetes.io/blog/2018/04/24/kubernetes-application-survey-results-2018/)
+ * Developers, check it out, people took a lot of time to give us lots of good information, take the time to get information from it.
+
+ **Help Wanted?**
+
+ * [SIG UI](https://github.com/kubernetes/community/blob/master/sig-ui/README.md) is looking for additional contributors (with javascript and/or go knowledge) and maintainers
+ * [Piotr](https://github.com/bryk) and and [Konrad](https://github.com/konryd) from google have offered to bring folks up to speed.
+ * Take a look at open issues to get started or reach out to their slack channel, mailing list, or next meeting.
+ * SIG UI mailing list: [https://groups.google.com/forum/#!forum/kubernetes-sig-ui](https://groups.google.com/forum/#!forum/kubernetes-sig-ui)
+
+
+## April 26, 2018 - (recording)
+
+
+
+* **Moderators**: Jorge Castro [SIG Contributor Experience]
+* **Note Taker**: Christian Roy
+* **Demo:** [Gitkustabe](https://github.com/hasura/gitkube): Build and deploy docker images to Kubernetes using git push (<span style="text-decoration:underline;">shahidh@hasura.io</span>, [tiru@hasura.io](mailto:tiru@hasura.io))
+ * [https://github.com/hasura/gitkube](https://github.com/hasura/gitkube)
+ * Git push to a url in your k8s cluster
+ * Builds and deploy the image in an deployment
+* **Release Updates:**
+ * **1.11 **[Josh Berkus, RT Lead]
+ * Features are now frozen!
+ * [44 filed features](https://github.com/kubernetes/features/issues?utf8=%E2%9C%93&q=is%3Aissue+is%3Aopen+milestone%3Av1.11)
+ * [Test Status](http://bit.ly/k8s111-cisignal) is **Red**
+ * Master-upgrade: 11 out of 12 jobs failing
+ * Master-blocking: 5 out of 26 jobs failing
+ * [Issues filed](https://docs.google.com/spreadsheets/d/1j2K8cxraSp8jZR2S-kJUT6GNjtXYU9hocNRiVUGZWvc/edit#gid=2128913655)
+ * Some SIGs not responding to issues
+ * SIG responsible to debug why test failing, please look at your issues and start to prioritize them!
+ * Next Deadline: [Beta Release May 15th](https://github.com/kubernetes/sig-release/blob/master/releases/release-1.11/release-1.11.md)
+ * Will currently be blocked by failing tests.
+ * 1.10 [Maciek Pytel, PRM]
+ * Branch frozen for 1.10.2, release planned for April 27th or Monday April 30th
+* **Graph of the Week: **Slack Stats [Paris Pittman]
+ * [Slack Stats](https://docs.google.com/document/d/1ZR8ZqYcql_C8nGFrKqcZw_ZANkEb4P-3CNFyT6OkfDc/edit)
+ * 35k users with 5k weekly active users
+ * Produced Quarterly
+* **SIG Updates:**
+ * **Thanks to test infra folks for labels**
+ * **Cluster Lifecycle [Tim St. Clair]**
+ * Kubeadm
+ * Steadily burning down against 1.11
+ * Found+Fixed some thorny upgrade issues in 1.10
+ * Tests are still broken
+ * Working on proposal
+ * HA
+ * Master join
+ * UX
+ * Phases rework
+ * Upgrade
+ * Config changes
+ * Self hosting
+ * ClusterAPI [kris nova]
+ * There is a new repo: https://github.com/kubernetes-sigs/cluster-api
+ * Aiming to keep cloud provider logic OUT of the repo (common logic only)
+ * Aiming for a (api only) alpha release 1.11
+ * Configurable machine setup proposal in progress
+ * [https://docs.google.com/document/d/1OfykBDOXP_t6QEtiYBA-Ax7nSpqohFofyX-wOxrQrnw/edit?ts=5ae0b27a#heading=h.xgjl2srtytjt](https://docs.google.com/document/d/1OfykBDOXP_t6QEtiYBA-Ax7nSpqohFofyX-wOxrQrnw/edit?ts=5ae0b27a#heading=h.xgjl2srtytjt)
+ * **Autoscaling [Solly Ross]**
+ * HPA v2 improvements ([https://github.com/kubernetes/community/pull/2055](https://github.com/kubernetes/community/pull/2055))
+ * Label selectors for metrics
+ * Support for averages on object metrics
+ * Slight changes to structure of object (Unify metrics sources)
+ * Better e2e tests on all HPA functionality
+ * Movement along the path to blocking HPA custom metrics e2e tests
+ * VPA work coming along, alpha soon (demo at KubeCon)
+ * Come say hi at KubeCon (Intro and Deep Dive, talks on HPA)
+ * **PM [Jaice Singer DuMars]**
+ * Working on mechanisms to get feedback from the user community (playing with something like [http://kubernetes.report](http://kubernetes.report) -- in development, not ready for distro yet)
+ * Presenting at KubeCon 16:35 on Thursday ~ Ihor and Aparna
+ * Working on a charter draft
+ * We actually represent three 'P' areas: product, project, and program
+ * Help SIG focus on implementations
+ * We're trying to look a
+* **Announcements:**
+ * **Kubecon next week, no community meeting! **\o/
+ * **Last Chance to Register for the Contributor Summit - **
+ * Registration ends Fri, Apr 7th @ 7pm UTC
+ * Tuesday, May 1, day before Kubecon
+ * You must [register here](https://github.com/kubernetes/community/tree/master/events/2018/05-contributor-summit) even if you've registered for Kubecon
+ * SIGs, remember to [put yourself down on the SIG Update sheet](https://docs.google.com/spreadsheets/d/1adztrJ05mQ_cjatYSnvyiy85KjuI6-GuXsRsP-T2R3k/edit#gid=1543199895) to give your 5 minute update that afternoon.
+ * **Shoutouts!**
+ * See someone doing something great in the community? Mention them in #shoutouts on slack and we'll mention them during the community meeting:
+ * Timothy St. Clair would like to thank Peter Zhao (@xiangpengzhao) for "steadfast PR-review and contributions to SIG Cluster Lifecycle"
+ * Chuck Ha would like to thank Leigh Capilo (@stealthybox) for "being welcoming to new folks in SIG Cluster Lifecycle. You are welcoming and helpful and it keeps our community healthy. Thank You!"
+ * [Normalization of Kind Labels](https://github.com/kubernetes/community/issues/2032) [Josh Berkus]
+ * Updating list of kind labels, how they are used
+ * Cloud Foundry wants to welcome folks to their Summit [on May 1st](http://sched.co/Dun0) (day before KubeCon/CloudNativeCon)
+ * You can now use Kubernetes to [water your lawn](https://www.youtube.com/watch?v=Y5WDO-OTf-4).
+ * No Meeting next week!
+ * **Help Wanted?**
+ * **Add to this section when you have something you need help with! Issue, contributors, etc.**
+
+
+## April 19, 2018 - ([recording](https://youtu.be/fEYVDMB3Xzo))
+
+
+
+* **Moderators**: Paris Pittman [SIG ContribEx]
+* **Note Taker**: Jaice Singer DuMars (Google)
+* [0:01] **Demo **- [Skaffold](https://github.com/GoogleCloudPlatform/skaffold) Matt Rickard - Google ([mrick@google.com](mailto:mrick@google.com))
+ * [https://github.com/GoogleContainerTools/skaffold](https://github.com/GoogleContainerTools/skaffold)
+ * Tool for developing applications on Kubernetes
+ * Allows you to step into CI/CD
+ * skaffold-dev / skaffold-run are the two primary components
+ * [ 0:03 ] - Demo
+ * Q: What is the plan around integration for new Kubernetes releases?
+ * Pinned to 1.10, have integration testing but not version skew
+ * Want to follow the Kubernetes support process of ~2 releases
+ * Q: Why would this not be in CNCF/part of k8s?
+ * Trying to keep it unopinionated
+ * If a community project makes sense, we will examine that
+ * MFarina: Ecosystem projects are the preference to avoid contention
+ * Q: So what are the non docker image formats this tool supports?
+ * Only supports bazel
+ * Working on java support
+ * This is the other build tool we're working on integrating next for skaffold. [https://github.com/google/jib](https://github.com/google/jib)
+ * Minimal arbitrary support, but requires a file to query and parse to determine SC dependencies, currently in-tree but might move to a plugin model
+* [0:14]** Release Updates**
+ * 1.11 [Josh Berkus ~ Release Lead] (confirmed)
+ * **_Feature Freeze is Tuesday, April 24!_**
+ * File your features: [https://github.com/kubernetes/features/issues](https://github.com/kubernetes/features/issues)
+ * Tests are currently **not passing**
+ * [CI Signal Report](https://docs.google.com/document/d/1y044OcaKGEUgj094JH1ZxnnLRHnqi0Kq0f4ov56kvxE/edit?ts=5ad596c2)
+ * [Issues](https://github.com/kubernetes/kubernetes/issues?utf8=%E2%9C%93&q=is%3Aissue+is%3Aopen+milestone%3Av1.11+test)
+ * Releasinging Alpha2 today (thanks Caleb!)
+ * Release notes doc will be up later today, watch kubernetes-dev
+ * Patch Release Updates
+* [0:00]** Graph o' the Week**
+ * YouTube Channel Stats!
+ * ~7000 subscribers and growing
+ * Old videos and high engagement videos get the most attention
+ * SIG recordings are typically used as a sleep aid
+ * 4:45 average view time on videos
+ * If you are posting videos, use descriptive titles, tags
+ * Desktops primary viewing device, also TVs
+ * You can turn the video speed up to 1.5 if you want to get through the material faster
+* [0:23] **SIG Updates**
+ * **CLI **(Maciej Szulik - confirmed)
+ * Printing of objects being moved to server - currently in beta, in 1.10 users were able to opt in to it
+ * You can opt out via flag, but it is on by default in 1.11
+ * No user-facing impact, but if there are, contact sig-cli
+ * Different patterns across the repo, and trying to unify by providing identical flags and output
+ * unified flag handling will unify the code base, ux, and simplify the code base
+ * **AWS **(Justin SB - Confirmed)
+ * Our first [sig repository](https://github.com/kubernetes/community/blob/master/kubernetes-repositories.md#sig-repositories): [aws-encryption-provider](https://github.com/kubernetes-sigs/aws-encryption-provider) ~ encryption at rest in etcd
+ * Justin SB is now a Googler
+ * CP breakout is blocked by non-technical issues
+ * _From Micah Hausler (EKS) to Everyone: (10:29 AM): Small correction: We are actively working on the CP breakout here at AWS (we've had a ad-hoc community-based meeting to get it going) - [meeting notes](https://docs.google.com/document/d/1-i0xQidlXnFEP9fXHWkBxqySkXwJnrGJP9OGyP2_P14/edit#heading=h.dbsrync38vdv)_
+ * Need help working on this
+ * **GCP **(Adam Worrell - confirmed) ([bit.ly/k8s-sig-gcp](http://bit.ly/k8s-sig-gcp))
+ * Not thriving, 3 meetings total but having a lack of topics
+ * There's only one lead, but someone has expressed interest
+ * Organizationally important, but there don't seem to be externally-interested parties
+ * There are lurkers, but not a specific community
+ * Community, please use this opportunity
+* [0:00] **Announcements**
+ * <span style="text-decoration:underline;">Shoutouts!</span>
+ * Join #shoutouts to add yours to the weekly announcements
+ * @maciekpytel for providing some nuance and clarity around node autoscaler
+ * @cblecker for fielding so many issues and PRs.
+ * <span style="text-decoration:underline;">Help Wanted?</span>
+ * SIG UI is looking for more active contributors to revitalize the dashboard. Please join their [communication channels](https://github.com/kubernetes/community/blob/master/sig-ui/README.md) and attend the next meeting to announce your interest.
+ * <span style="text-decoration:underline;">KubeCon EU Update</span>
+ * Current contributor track session voting will be emailed to attendees today!C
+ * RSVP for Contributor Summit [[here]](https://github.com/kubernetes/community/tree/master/events/2018/05-contributor-summit)
+ * SIG Leads, please do your updates for the 5 minute updates
+ * CNCF meet the maintainers group is organizing ~ please sign up for attending the CNCF booth
+
+
+## April 12, 2018- ([recording](https://www.youtube.com/watch?v=1wTmoXPfspI))
+
+
+
+* **Moderators**: Josh Berkus [SIG-Release]
+* **Note Taker**: Clint Kitson [VMware]
+* 69+ participants
+* [ 10:00 ]** Demo **-- CRI-O [Antonio Murdaca, runcom@redhat.com] (confirmed)
+ * [https://github.com/kubernetes-incubator/cri-o](https://github.com/kubernetes-incubator/cri-o)
+ * Support for k8s 1.9/1.10 and tracking changes consistently
+ * Planned support for clearcontainers/kata-containers
+ * Demo on K8s 1.10
+ * Support for kubeadm and minikube
+ * Create issues on crio project on github
+ * sig-node does not have plans to choose one yet
+ * Working on conformance to address implementations which should lead to choosing default implementation
+ * Choice is important since it would be used under scalability testing
+ * Test data? Plan to publish results to testgrid, will supply results ASAP
+ * Previously blocked on dashboard issue
+ * Can get to point to make crio a blocking job-- multiple releases at this status before graduation
+ * Request for containerd update to group -> contribex
+* [ 10:18 ]** Release Updates**
+ * 1.11 [Josh Berkus ~ Release Lead] (confirmed)
+ * Week 2 of 12 (see [schedule](https://github.com/kubernetes/sig-release/blob/master/releases/release-1.11/release-1.11.md))
+ * Currently collecting features [Ihor]
+ * Please, work on adding/updating the features in the [features](https://github.com/kubernetes/features/issues?q=is%3Aopen+is%3Aissue+milestone%3Av1.11) repo.
+ * Please, also add them to the features [tracking spreadsheet](https://docs.google.com/spreadsheets/d/16N9KSlxWwxUA2gV6jvuW9N8tPRHzNhu1-RYY4Y0RZLs/edit?ouid=103000293055760527954&usp=sheets_home&ths=true).
+ * Discussion about changing the Freeze/RC schedule
+ * Been difficult to get all tests passing, so code freezes have been lengthening
+ * Less time to work on development
+ * In 1.11 trying to shorten code freeze conditionally
+ * If last 3 week days if all test passing then will make code freeze shorter to enable more development time
+ * All 3 dashboards (1.11-blocking, master-blocking, master-upgrade in [testgrid](https://k8s-testgrid.appspot.com/))
+ * 1.11 dashboards don't exist until 7 days prior
+ * Sending out update to k8s-dev 4/13
+ * Could get freeze to 7 working days (goal)
+ * Up to release lead if this works out
+ * Questions to sig-release mailing list/slack
+ * Patch Release Updates
+ * 1.10.1 [Maciek Pytel](confirmed)
+ * Issues with flakey tests, triaged, reviewing and hopefully green by 4/13
+ * 1.9.
+ * No info
+* [ 10:24 ] **Graph o' the Week **[Aaron Crickenberger](confirmed)
+ * devstats.k8s.io - PRs labels repository groups
+ * [https://k8s.devstats.cncf.io/d/47/prs-labels-repository-groups?orgId=1](https://k8s.devstats.cncf.io/d/47/prs-labels-repository-groups?orgId=1)
+ * needs-rebase - trend over time shows abandoned pull requests
+ * [https://github.com/kubernetes/test-infra/tree/master/label_sync](https://github.com/kubernetes/test-infra/tree/master/label_sync)
+ * need-ok-to-test - shows pull requests need help to get through process
+ * Members don't get this tag
+ * [https://github.com/kubernetes/community/blob/master/community-membership.md](https://github.com/kubernetes/community/blob/master/community-membership.md)
+ * Requirements have changed - Demonstrate intent and dedication to the project
+ * Number of PR's that have a given label applied over time
+ * [http://not.oktotest.com](http://not.oktotest.com)
+ * need-rebase
+* [ 10:32 ] **SIG Updates**
+ * SIG-VMware [Steve Wong](confirmed)
+ * 1st meeting, charter defined, meeting notes are published and shared
+ * 31 google group members, 11 people on first meeting, 50 slack members
+ * Support kubernetes users who are deploying at scale on VMware platforms, support development relating to cloud providers
+ * Working on aligning cloud provider to cloud provider WG strategy
+ * SIG-Windows [Michael Michael](confirmed)
+ * Busy since 1.9 on getting people using and deploying windows containers
+ * Been fixing bugs
+ * Added support
+ * Resource controls
+ * File system stats
+ * Flexv
+ * Hyper-v isolation (experimental), similar to kata
+ * e2e automation and tests for sig-windows (eta 1 month)
+ * Hoping for GA around Windows Server 2019
+ * Join on slack or directly
+ * On-Prem announcement (jb)
+ * Demoted to working group
+ * Forum for discussion for people with on-prem deployments
+ * Possibly formally recognized group in future (waiting decision from steering committee)
+ * Committee is clarifying what a WG is, why, rules
+ * Also clarifying a sub-project
+ * Doesn't have formal meetings/structure yet
+* [ 10:40 ] **Announcements**
+ * Shoutouts this week:
+ * @robinpercy@mike.splain @rdrgmnzs for graduating the mentoring program. @paris for leading it.
+ * @maciekpytel for "providing clarity" around the node autoscaler
+ * We're going to disable the cherrypick-auto-approve munger [aaron]
+ * Process now is based on cherrypick-approve
+ * [kubernetes-dev@ thread](https://groups.google.com/d/msg/kubernetes-dev/Br2-4pQPOIs/YbSM1YNIBgAJ)
+ * SC update: subprojects and SIG charters [briangrant]
+ * Help SIGs create charters
+ * Explain to SIGs what sub-projects are and how they can be used
+ * Split SIGs among steering committee members
+ * 6 charters in flight working on charter, then going to other SIGs
+ * [r/kubernetes: Ask Me Anything](https://www.reddit.com/r/kubernetes/comments/8b7f0x/we_are_kubernetes_developers_ask_us_anything/) - thanks everyone for participating, lots of user feedback, please have a look.
+ * We'll likely do more of these in the future.
+ * [Kubernetes Contributor Summit @ Kubecon](https://github.com/kubernetes/community/tree/master/events/2018/05-contributor-summit) - May 1 (jb)
+ * You need to register for this even if you already registered for Kubecon! Link to the form in the link above.
+ * New contributor/on-going contrib in morning and general tracks in afternoon
+ * New CNCF Interactive Landscape: [https://landscape.cncf.io/](https://landscape.cncf.io/) (dan kohn)
+
+
+## April 5, 2018 - ([recording](https://www.youtube.com/watch?v=z1vLGqNAJuA))
+
+
+
+* **Moderators**: Jose Palafox [ContribEx]
+* **Note Taker**: Solly Ross
+* **Demo **
+ * Artifactory in Kubernetes - Jainish Shah ([jainishs@jfrog.com](mailto:jainishs@jfrog.com)), Craig Peters ([craigp@jfrog.com](mailto:craigp@jfrog.com))
+ * Artifactory is a universal repository/artifact manager (e.g. docker registry, helm repositories)
+ * Can be deployed via Helm ([https://hub.kubeapps.com/charts/stable/artifactory](https://hub.kubeapps.com/charts/stable/artifactory))
+ * Can proxy/mirror/cache upstream repositories, and store artifacts itself
+ * Demo shows:
+ * creating docker registry and helm repos, pushing helm chart
+ * CLI and web UI
+ * Caching upstream repositories
+ * Walkthrough and Example: [https://jfrog.com/blog/control-your-kubernetes-voyage-with-artifactory/](https://jfrog.com/blog/control-your-kubernetes-voyage-with-artifactory/) & [https://github.com/jfrogtraining/kubernetes_example](https://github.com/jfrogtraining/kubernetes_example)
+ * Questions
+ * Difference between commercial and free (and what's the cost)
+ * Free only has maven support, is open source, commercial supports everything (including Kubernetes-related technologies, like Helm)
+ * Is HTTP basic auth the default
+ * Yes, but other auth schemes are supported
+ * Use of the API key in the jfrog cli documented [https://www.jfrog.com/confluence/display/CLI/CLI+for+JFrog+Artifactory](https://www.jfrog.com/confluence/display/CLI/CLI+for+JFrog+Artifactory)
+* **Release Team**
+ * 1.11 [Josh Berkus, Release Lead]
+ * We are in Week 1
+ * [Release team roles](https://github.com/kubernetes/sig-release/blob/master/releases/release-1.11/release_team.md) almost filled
+ * Still need a docs shadow (ask in #sig-release or #sig-docs if interested)
+ * [Schedule posted](https://github.com/kubernetes/sig-release/blob/master/releases/release-1.11/release-1.11.md) (last chance to raise issues is _right now_)
+ * Feature Freeze: April 24
+ * Code slush: May 22
+ * Code Freeze: May 28
+ * Doc Deadline: June 11
+ * Release: June 26 (yes, we know it's right before July 4th, there's not a good way around it)
+ * Feature Gathering has started
+ * [Tracking Sheet](https://docs.google.com/spreadsheets/d/16N9KSlxWwxUA2gV6jvuW9N8tPRHzNhu1-RYY4Y0RZLs/edit#gid=0)
+ * **File your feature repo issues now!**
+ * 1.10 [Maciek Pytel, patch manager]
+ * 1.10.1 on April 12th (next Thursday)
+ * Contains kubectl fixes
+ * Will send email with release notes and pending PRs later today
+ * Contact Maciek if you need to cherry-pick anything not in the email
+ * 1.9
+* **Graph o' the Week **(Josh Berkus)
+ * PR Workload: shows weighted workload for each SIG in the form of PRs. Primarily there to check up on which SIGs are heavily loaded.
+ * Includes ONLY PRs against kubernetes/kubernetes because other repos don't use SIG labels consistently.
+ * "Absolute Workload" == PR count * PR Size
+ * "Relative Workload" == (PR Count * PR Size) / Number of Reviewers
+ * [Calculation in issue (docs later)](https://github.com/cncf/devstats/issues/68)
+ * PRs that were *open* during that period, not PRs that were created during the period. Old PRs are important for workload.
+ * Chart:
+ * [All PRs, 6 months](https://k8s.devstats.cncf.io/d/33/pr-workload?orgId=1)
+ * [SIG-Scalability And Autoscaling last 3 months](https://k8s.devstats.cncf.io/d/33/pr-workload?orgId=1&from=now-90d&to=now&var-sigs=autoscaling&var-sigs=scalability&var-full_name=Kubernetes)
+ * [SIG-Node last 3 months](https://k8s.devstats.cncf.io/d/33/pr-workload?orgId=1&from=now-90d&to=now&var-sigs=node&var-full_name=Kubernetes)
+ * Table:
+ * [Workloads for last month](https://k8s.devstats.cncf.io/d/34/pr-workload-table?orgId=1)
+ * [Workload for version 1.9](https://k8s.devstats.cncf.io/d/34/pr-workload-table?orgId=1&var-period_name=v1.8.0%20-%20v1.9.0&var-period=anno_28_29)
+ * [Workload for version 1.10](https://k8s.devstats.cncf.io/d/34/pr-workload-table?orgId=1&var-period_name=v1.9.0%20-%20v1.10.0&var-period=anno_29_30)
+ * Questions
+ * Assumes proper labeling
+ * Yes, things without sig labels aren't included
+ * Size labels have a lot of fudge factor
+ * Don't compare small increments ("10% more PRs than last week") but compare heavily loaded/lightly loaded ("is SIG-API overwhelmed for 1.11?")
+ * Overall graph takeaway?
+ * Determine if one SIG has a bunch of PRs suddenly, or does everyone have a bunch of PRs due to a particular place in the release cycle
+ * What do we do based on these charts?
+ * We're still exploring, seeing what people think, are these helpful to people?
+ * Join #devstats if you're interested in collaborating, discussing
+ * Need to find charts that are actually useful vs just shiny
+* **SIG Updates**
+ * SIG Cluster Ops - Rob H.
+ * Updated Mission - more focused on building operator community
+ * We have been having trouble with quorum and need some help
+ * Chris McEniry and I believe strongly in the need for an vendor neutral place for operators to gather around K8s. Neither of us are vendors, so we're good neutral hosts, but we need help getting speakers.
+ * We'd be happy to host long format demos by vendors.
+ * Questions:
+ * Consolidate with OnPrem?
+ * Sure!
+ * It's good to consolidate SIGs if it's relevant
+ * SIG Docs - Zach
+ * 2 new maintainers, 5 new contributors
+ * Coming soon: improved contribution guidelines: https://kubernetes.io/editdocs/
+ * Migrating the Kubernetes website from Jekyll to Hugo: we've met with the contractor, gotten an initial estimate, and are proceeding with a target completion date of April 30
+ * Blog migration: Formerly at blog.kubernetes.io (Blogger), the Kubernetes blog now resides at kubernetes.io/blog (GitHub). The main reason for migrating was to resolve increasing technical debt and make life easier on the blog contribution team.
+ * Thanks to test-infra for the automation that make it possible to have blog-level ownership of PRs and approvals!
+ * Questions:
+ * Tidying up contributor guidelines just for SIG docs, or Kubernetes in general?
+ * Kubernetes in general
+ * Collaborate with ContribEx
+ * Have blog posts pending review been migrated to GitHub, or do they need to be manually migrated?
+ * They will be migrated, with blog manager opening PRs as needed
+ * SIG Service Catalog - bumped to 5/24
+* **Announcements**
+ * [Kubernetes Contributor Summit @ Kubecon](https://github.com/kubernetes/community/tree/master/events/2018/05-contributor-summit) - May 1 [Jorge Castro]
+ * You need to register for this even if you already registered for Kubecon! Link to the form in the link above.
+ * Current contributor track voting on topics will be emailed to attendees Monday
+ * Reddit r/kubernetes AMA [Jorge Castro]
+ * This next Tuesday: [https://www.reddit.com/r/kubernetes/comments/89gdv0/kubernetes_ama_will_be_on_10_april_tuesday/](https://www.reddit.com/r/kubernetes/comments/89gdv0/kubernetes_ama_will_be_on_10_april_tuesday/)
+ * If you're a reddit user please contact [jorge@heptio.com](mailto:jorge@heptio.com) so we can coordinate.
+ * Roadmaps call - SIG-PM is asking for the roadmap input [https://groups.google.com/forum/#!topic/kubernetes-pm/-jW3bHUbfE8](https://groups.google.com/forum/#!topic/kubernetes-pm/-jW3bHUbfE8) [Ihor/Jaice]
+ * We're trying to assess if there are any long-term, cross-cutting views of the project, or if our planning horizon is only the length of features in process/one release
+ * We want to provide more transparency to the end-user community about what planning exists
+ * SIG-PM can also help facilitate planning, as in this [template](https://docs.google.com/document/d/1qi4LKV3W9B5JJ5JLjmAY33jESYAqWMWFyO5bWoYEBDo/edit)
+ * [https://go.k8s.io/github-labels](https://go.k8s.io/github-labels) [Aaron C]
+ * List of all labels created or consumed by automation, as well as their meanings
+ * Lists instructions on how to contribute new labels
+ * [Kubernetes Application Survey](https://goo.gl/forms/ht61kKETiqVR103v1) [Aaron C]
+ * From [wg-app-def](https://github.com/kubernetes/community/tree/master/wg-app-def)
+ * How do you build and operate application on kubernetes?
+ * Results will be made publicly available
+ * Take the survey, share it with others
+ * Due April 16th
+ * [http://k8s-code.appspot.com/](http://k8s-code.appspot.com/) (Dims)
+ * Search engine for all github code repositories under kubernetes main org
+* **Shoutouts**
+ * Thanks to our contributors that joined #meet-our-contributors yesterday for questions! @gsaenger @spiffxp @chrislovecnm @spzala @carolynvs
+ * [Once a month livestream on-demand upstream mentors! Join us!](https://github.com/kubernetes/community/blob/master/mentoring/meet-our-contributors.md)
+ * Considering twice a month - need help with running more! Contact paris@ on slack or parispittman@google.com
+
+
+## March 29, 2018
+
+
+
+* **Moderators**: Paris Pittman [ContribEx]
+* **Note Taker**: Solly Ross
+* [ 0:00 ]** Demo - **KubeAdmin Self-Config - Rob Hirschfeld ([rob@rackn.com](mailto:rob@rackn.com))
+ * Uses Digital Rebar and kubeadm to stand up clusters from scratch without no intervention
+ * Digital Rebar does PXE provisioning to get the machines stood up, and then hands of to kubeadm for installing kubernetes
+ * Workflow to do the setup sets, install Docker etc, elect master, do hand off to kubeadm
+ * Generates join keys for kubeadm
+ * Sends information like master election, cluster admin config file, etc back to shared data set
+ * Resources:
+ * Kubecon Presentation [https://www.slideshare.net/rhirschfeld/kubecon-2017-zero-touch-kubernetes](https://www.slideshare.net/rhirschfeld/kubecon-2017-zero-touch-kubernetes)
+ * Longer Demo Video [https://www.youtube.com/watch?v=OMm6Oz1NF6I](https://www.youtube.com/watch?v=OMm6Oz1NF6I)
+ * Digital Rebar:[https://github.com/digitalrebar/provision](https://github.com/digitalrebar/provision),
+ * Project Site: [http://rebar.digital](http://rebar.digital)
+ * Terraform Provider (referenced at end) [https://github.com/rackn/terraform-provider-drp](https://github.com/rackn/terraform-provider-drp)
+ * Questions:
+ * Q: Could we drive digital rebar from Kubicorn?
+ * A: Yes, probably
+* [ 0:00 ] **Announcements**
+ * [Meet Our Contributors is Apr 4th @ 330p and 9p UTC](https://github.com/kubernetes/community/blob/master/mentoring/meet-our-contributors.md)
+ * Like office hours, but contributor question focused
+ * Looking for contributors to answer questions, 2 slots
+ * Reach out to @paris on Slack if you're interested in participating
+ * Contributor Summit in Copenhagen May 1 - [registration](https://events.linuxfoundation.org/events/kubecon-cloudnativecon-europe-2018/co-located-events/kubernetes-contributor-summit/) is live
+ * KubeCon Copenhagen (May 2-4) is **on track to sell out**. [Register](https://events.linuxfoundation.org/events/kubecon-cloudnativecon-europe-2018/)
+ * Shoutouts this week (from #shoutouts in slack):
+ * @nabrahams who picked the 1.10 release notes as his first contribution. We literally could not have done this without him!
+* [ 0:15 ]** Kubernetes 1.10 Release Retrospective**
+ * Retro [doc](https://docs.google.com/document/d/1kZnDqR0rZ4Zj_D9WWdD5JIoF9dZdZRr0giIU0w32bqI/edit#)
+ * This is how we improve
+ * Prior Release Retrospectives [ [1.3](http://bit.ly/kube13retro), [1.4](http://bit.ly/kube14retro), [1.5](http://bit.ly/kube15retro), [1.6](http://bit.ly/kube16retro), [1.7](http://bit.ly/kube17retro), [1.8](http://bit.ly/kube18retro), [1.9](http://bit.ly/kube19retro) ]
+
+
+## March 22, 2018 - recording
+
+
+
+* **Moderators**: Chris Short [Contribex]
+* **Note Taker**: Sanket Patel [Egen Solutions]
+* [ 0:00 ]** Demo **-- 03/22: Ark - a backup/disaster recovery tool for k8s - Andy Goldstein ([andy@heptio.com](mailto:andy@heptio.com))
+ * [https://github.com/heptio/ark](https://github.com/heptio/ark)
+ * "Heptio Ark is a utility for managing disaster recovery, specifically for your Kubernetes cluster resources and persistent volumes."
+ * Looking for help on code and documentation
+ * Link to slides
+* [ 0:00 ]** Release Updates**
+ * 1.10 [Jaice Singer DuMars ~ Release lead]
+ * Code thaw happened
+ * rc1 is out, please take a look!
+ * release due Monday 3/26 ~ 6PM Pacific time
+ * release retrospective the last 45 minutes of the community meeting next week
+ * You can see our progress in [videos](https://www.youtube.com/watch?v=e14tlUBd2jQ&list=PL69nYSiGNLP3QKkOsDsO6A0Y1rhgP84iZ) and in the burndown meeting [notes](http://bit.ly/k8s110-burndown)
+ * We're looking for a Release Lead for 1.11, code name "Everything Croissant" - read the role description and requirements [here](https://github.com/kubernetes/sig-release/blob/master/release-process-documentation/release-team-guides/release-lead.md), and contact @jdumars in Slack if you are interested in knowing more
+ * Josh Berkus has volunteered, but the more the merrier!
+ * Release retrospective in this time slot, in 2 weeks: 3/29
+ * Patch releases out now
+ * 1.7.15
+ * 1.8.10
+ * 1.9.6
+* [ 0:00 ] **Graph o' the Week **[N/A]
+ * Back Next Week After 1.10 Release
+* [ 0:00 ] **SIG Updates**
+ * SIG Azure [Cole Mickens] (confirmed)
+ * #sig-azure on Slack
+ * Azure backlog is public
+ * cloudprovider is moving to independent repo
+ * Lots of work around Azure integrations
+ * Kal Henidak is leading Azure's upstream release efforts
+ * Many fixes in the 1.10 release
+ * More great things in the pipeline
+ * SIG Node [Derek Carr] (confirmed)
+ * New [CRI testing policy](https://github.com/kubernetes/community/blob/master/contributors/devel/cri-testing-policy.md)
+ * Feature going into Beta - Local storage capacity isolation
+ * Feature going into alpha - debug container, supports pod pid limits, cri container log rotation
+ * wg-resource-mgmt : graduated device plugins, hugepages, cpu pinning (beta)
+ * Cri-o declared stable since 1.9x
+ * Future changes
+ * Finish governance materials
+ * Feature planning for 1.11
+ * Topics explored:
+ * Secure container
+ * Virtual Kubelet
+ * Face to face meeting details on working group document
+ * Slides presented: [https://docs.google.com/presentation/d/1P267xBGQtLprbVV-XStpVt8c-um6NqBmQLEIOKhmJAs/edit?usp=sharing](https://docs.google.com/presentation/d/1P267xBGQtLprbVV-XStpVt8c-um6NqBmQLEIOKhmJAs/edit?usp=sharing)
+ * SIG Network [Casey Davenport] (confirmed)
+ * Feature alpha -> beta
+ * IPv6 network support for k8s core components and pod networking - [feature issue](https://github.com/kubernetes/features/issues/508)
+ * CI for IPv6 - PR coming soon.
+ * [Core DNS integration](https://github.com/kubernetes/community/pull/1956) replacement for kubeDNS (single binary, better performance)
+ * [Feature issue](https://github.com/kubernetes/features/issues/427)
+ * IPvs kube-proxy staying in beta - [outstanding issues](https://github.com/kubernetes/kubernetes/issues?q=is%3Aopen+is%3Aissue+label%3Aarea%2Fipvs)
+ * Ingress requirements gathering.
+ * Sent survey to ingress users.
+ * Have [results from survey](https://github.com/bowei/k8s-ingress-survey-2018) and starting interpretation
+ * Network plumbing group discussing adding networking interfaces for pods - [specification doc](https://docs.google.com/document/d/1Ny03h6IDVy_e_vmElOqR7UdTPAG_RNydhVE1Kx54kFQ/edit)
+ * Traffic shaping moved to a CNI plugin - [proposal](https://github.com/kubernetes/community/pull/1893)
+ * Designing service routing to be topology aware - [proposal](https://github.com/kubernetes/community/pull/1551)
+* [ 0:00 ] **Announcements**
+ * Shoutouts this week:
+ * Vlad Ionescu: Shoutout to @yomateo and @foxie for how much they are helping in #kubernetes-users! Matthew has a lot of examples on his Github and Ashley is amazing regarding on-prem and general k8s questions( learned so much by reading their answers).
+ * Jaice DuMars: I know I keep saying it, but @cblecker @dims @liggitt ~ The POWER TRIO of release-assist awesomeness!! THANK YOU!!
+ * Josh Berkus: Shout out to Pavel Pospisil, for pitching in to close a PVC blocker bug despite being between jobs this week and not having his normal dev machine.
+ * Josh Berkus: Shoutout to Klaus Ma for leading the closure of several storage bugs, including staying up until 1am his time to attend meetings
+ * Jordan Liggitt: Shoutout to the branch managers: @wojtekt, @jpbetz, @mbohlool for dealing with unending cherry-picks over the last couple weeks
+ * Meet Our Contributors is looking for contributors to come on Apr 4th at 330 and 9p UTC
+ * Join the slack channel (same name)
+
+
+## March 15, 2018- ([recording](https://www.youtube.com/watch?v=tvP_HPFteKI))
+
+
+
+* Moderators: Solly Ross [SIG]
+* Note Taker: Kris Nova
+* [ 0:00 ] Demo -- Kuberhealthy presentation (sandbox project applicant) - Eric Greer (eric.greer@comcast.com)
+ * [https://docs.google.com/presentation/d/1tL80i7VTBUlDs5KXy7TBZcHVX45lFST2cmmE3oyjYWc/edit?usp=sharing](https://docs.google.com/presentation/d/1tL80i7VTBUlDs5KXy7TBZcHVX45lFST2cmmE3oyjYWc/edit?usp=sharing)
+ * Repository TBD (may be open sourced via corporation or go direct to sandbox if applicable)
+ * Tool for checking cluster health by standing up pods on each node, checking status of components in kube-system, etc
+ * Designed to check cluster health beyond normal logs, monitoring, metrics -- make sure you can actually run applications
+ * Currently deployed internally in production, but should generally be considered alpha at the moment
+* [ 0:00 ] Release Updates
+ * 1.10 Update [Jaice Singer DuMars ~ Release lead]
+ * Release team is in a meeting right meow
+ * Due to the security releases, and some scalability testing issues we decided to push the release from 3/21 to 3/26, and lift code freeze by EOD Monday assuming all goes to plan
+ * You can see our progress in [videos](https://www.youtube.com/watch?v=e14tlUBd2jQ&list=PL69nYSiGNLP3QKkOsDsO6A0Y1rhgP84iZ) and in the burndown meeting [notes](http://bit.ly/k8s110-burndown)
+ * We're looking for a Release Lead for 1.11, code name "Everything Croissant" - read the role description and requirements [here](https://github.com/kubernetes/sig-release/blob/master/release-process-documentation/release-team-guides/release-lead.md), and contact @jdumars in Slack if you are interested in knowing more
+ * Release retrospective in this time slot, in 2 weeks: 3/29
+ * Current Release cycle (e.g. 1.7) [First Last ~ Release role]
+ * Prior Release cycle (e.g. 1.6.6) [First Last ~ Release role]
+* [ 0:00 ] Graph o' the Week
+ * No graph this week, tune in next time!
+* [ 0:00 ] SIG Updates
+ * SIG Auth [Eric Chiang]
+ * Notes: [[link]](https://docs.google.com/document/d/1wyOkDwRDQetjTBeaPbJfkt1M8f_q3nxN5ta5v_OTfzA/edit)
+ * Overview
+ * Betas: PodSecurityPolicy, Auditing
+ * Alphas: TokenRequest API, client-go external credential providers, encryption-at-rest external KMS integration
+ * Considering Bug Bounty for Kubernetes
+ * SIG Instrumentation [Piotr Szczesniak]
+ * Introduced external metrics API
+ * Metrics will come from other systems other than k8s
+ * Integrates with Prometheus and other monitoring systems with adapter
+ * Graduated metrics API to beta
+ * Inspired by heapster
+ * By default applied to all kubernetes clusters
+ * Started discussion around securing instrumentation endpoint
+ * Cross SIG effort between sig-auth and sig-instrumentation
+ * Master metrics API, Custom metrics API
+ * Plans to graduate these to GA
+ * Deprecate Heapster
+ * Plans in the works for a historical metrics API
+ * Agreement to have logging on architecture and vision
+ * Similar to metrics architecture, which was a foundation for many design decisions
+ * Discussion on exposing kubelet health status
+ * Useful for monitoring the state of Kubelet
+ * Different than the metrics endpoint
+ * Still figuring out how to do this
+ * Would like to move sig-instrumentation projects to a new home
+* [ 0:00 ] Announcements
+ * Registration for the Contributor Summit is now live:
+ * See [this page](https://events.linuxfoundation.org/events/kubecon-cloudnativecon-europe-2018/co-located-events/kubernetes-contributor-summit/) for details
+ * Please register if you're planning on attending, we need this so we have the correct amount of food!
+ * Just registering for Kubecon is not enough!
+ * [Office Hours Next Week!](https://github.com/kubernetes/community/blob/master/events/office-hours.md)
+ * Volunteer developers needed to answer questions
+ * [Helm Summit Videos](https://www.youtube.com/playlist?list=PL69nYSiGNLP3PlhEKrGA0oN4eY8c4oaAH&disable_polymer=true) are up.
+ * Shoutouts this week
+ * Someone doing great work out there that you'd like to highlight? Let us know in #shoutout on slack and we'll mention them here:
+ * [@shyamjvs](https://github.com/shyamjvs) - diagnosing 2 critical performance problems which could have blocked the 1.10 release
+ * [@verullt](https://github.com/verult) - taking on most of the open storage issues and resolving them as quickly as possible
+ * Quang Huynh for all the shiny new looks he's given prow.k8s.io during his internship!
+ * Jaice wants to say "Release mvps: [@dims](https://github.com/dims) [@cblecker](https://github.com/cblecker)"
+ * Thanks to the ever-helpful Andrew Chen ([@chenopis](https://github.com/chenopis)) for getting me through merge conflicts and branch merges in preparation for the 1.10 release. (From Jennifer Rondeau)
+ * Stefan Schimanski would like to thank:
+ * Nick Chase ([@nickchase](https://github.com/nickchase)) - for editing down 92 pages of release notes into something consumable by humans!
+ * Mik Vyatskov ([@crassirostris](https://github.com/crassirostris)) - for doing an awesome job driving and owning the auditing feature
+ * Josh Berkus would like to thank Jordan Liggitt ([@liggitt](https://github.com/liggitt))
+ * And finally congratulations to Brad Topol ([@bradtopol](https://github.com/bradtopol)) for joining the maintainer team on docs.
+
+
+## March 8, 2018 - ([recording](https://youtu.be/fySqkBQnJ8I))
+
+
+
+* **Moderators**: Jorge Castro [SIG Contrib Ex]
+* **Note Taker**: This could be you! [Company/SIG]
+* [ 0:00 ]** Demo **-- KubeFlow - Jeremy Lewi ([jlewi@google.com](mailto:jlewi@google.com))
+ * [Link to slides](https://docs.google.com/presentation/d/1p82_DKJmIPjFS69EJ8p4StCClJW-JjGgxSWfhzw7Abw/edit#slide=id.g30f6ce7d33_0_378)
+ * [Kubeflow Repo](https://github.com/kubeflow/kubeflow)
+* [ 0:00 ]** Release Updates**
+ * 1.10 [Jaice Singer DuMars ~ Release lead]
+ * **Week 10 of 12**, the full schedule and some important information is [here](https://github.com/kubernetes/sig-release/blob/master/releases/release-1.10/release-1.10.md)
+ * Release status is yellow, which means there's a chance our release date might slip by a small margin of days ~ we're working on sorting this out and should know more by early next week
+ * Next week, we will be entering [c](https://github.com/kubernetes/sig-release/blob/master/releases/release-1.10/release-1.10.md#code-slush)runch week for the release, so majority of release activities like documentation and release notes are completed, as well as drafts for blog posts, and FAQs for media.
+ * To meet our release date, the v1.10 branch must be in very good shape by the end of next week, with fixes either en route, or in queue.
+ * **Code freeze will end on March 14th at 6PM Pacific time**, after which any additions to the 1.10 release will need to be cherry picked, in close collaboration with the release team.
+ * Release team meetings will move to the daily cadence next week. If you join either the [SIG-Release](https://groups.google.com/forum/#!forum/kubernetes-sig-release) or [kubernetes-milestone-burndown](https://groups.google.com/forum/#!forum/kubernetes-milestone-burndown) groups, you should get an invite.
+* [ 0:00 ] **Graph o' the Week **Zach Corleissen, SIG Docs
+ * Weekly update on data from devstats.k8s.io
+ * [https://k8s.devstats.cncf.io/d/44/time-metrics?orgId=1&var-period=w&var-repogroup_name=Docs&var-repogroup=docs&var-apichange=All&var-size_name=All&var-size=all&var-full_name=Kubernetes](https://k8s.devstats.cncf.io/d/44/time-metrics?orgId=1&var-period=w&var-repogroup_name=Docs&var-repogroup=docs&var-apichange=All&var-size_name=All&var-size=all&var-full_name=Kubernetes)
+ * Docs folks had vague anxiety (without concrete data) on their response times for issues and PRs. Devstats shows less than approx. 4 days initial response times during the last year, outside of a few spikes associated with holidays on the calendar and KubeCon.
+ * Introduction of prow into kubernetes/website led to a demonstrable improvement in early 2018
+* [ 0:00 ] **SIG Updates**
+ * SIG Apps [Adnan Abdulhussein] (confirmed)
+ * [https://docs.google.com/presentation/d/1yTM5bi4C2cr_L-Ow1G2W934-vXS3r_z-PayDAaKx73o/edit?usp=sharing](https://docs.google.com/presentation/d/1yTM5bi4C2cr_L-Ow1G2W934-vXS3r_z-PayDAaKx73o/edit?usp=sharing)
+ * SIG OpenStack [Chris Hoge] (confirmed)
+ * [https://docs.google.com/presentation/d/1DtBKFlPhb74v9bXN6-RSNpLagh2wbDs3JJuno7IzgSw/edit?usp=sharing](https://docs.google.com/presentation/d/1DtBKFlPhb74v9bXN6-RSNpLagh2wbDs3JJuno7IzgSw/edit?usp=sharing)
+ * SIG UI [Sebastian Floreks] (sends regrets)
+ * Due to reasons independent from us, me and maciaszczykm have been moved from full-time Dashboard contribution to another project. We will be less active now, but still want to finish the migration and try to be a part of Dashboard project.
+ * We are working only on migration from AngularJS to Angular 5. Around 70% of features have been rewritten. Unfortunately, due to mentioned reasons we can not provide any ETA regarding the end of migration process. Progress and changes can be tracked from: [https://github.com/kubernetes/dashboard/pull/2727](https://github.com/kubernetes/dashboard/pull/2727)
+ * Dashboard is on a soft code freeze until migration is finished. Only critical bugs will be fixed during this time.
+ * (See notes from the last few meetings): [https://github.com/kubernetes/community/tree/master/sig-ui](https://github.com/kubernetes/community/tree/master/sig-ui)
+* [ 0:00 ] **Announcements**
+ * SIG Charter and Subprojects Update [pwittrock]
+ * [SIG Governance Charter Templates](https://github.com/kubernetes/community/blob/master/committee-steering/governance/README.md)
+ * At least one more detailed template under development
+ * [Governance.md updated with subprojects](https://github.com/kubernetes/community/blob/master/governance.md#subprojects)
+ * [WIP: Subproject Meta](https://docs.google.com/document/d/1FHauGII5LNVM-dZcNfzYZ-6WRs9RoPctQ4bw5dczrkk/edit#heading=h.2nslsje41be1)
+ * [WIP: Charter FAQ (the "Why"s)](https://github.com/kubernetes/community/pull/1908)
+ * Reminder: [Contributor Summit](https://github.com/kubernetes/community/tree/master/events/2018/05-contributor-summit), 1 May, day before Kubecon
+ * CNCF would like feedback on the draft blog post for 1.10 beta:
+ * [http://blog.kubernetes.io/2018/03/first-beta-version-of-kubernetes-1-10.html](http://blog.kubernetes.io/2018/03/first-beta-version-of-kubernetes-1-10.html)
+ * Please contact [Natasha Woods](mailto:nwoods@linuxfoundation.org) with your feedback
+ * Shoutouts this week
+ * See someone doing something great for the community? Mention them in #shoutouts on slack.
+ * Maru Newby (@marun) for [https://github.com/kubernetes/test-infra/pull/7083](https://github.com/kubernetes/test-infra/pull/7083) also Cole Wagner (@cjwagner) and Benjamin Elder (@bentheelder) who have all be super helpful getting this release moving forward.
+ * Meet our Contributors (1st weds of the month - AMA kubernetes.io/community for cal invite) - Aaron Crickenberger (@spiffxp), Davanum Srinivas (@dims), Ilya Dmitrichenko (@errordeveloper), Jennifer Rondeau ( @jrondeau), Kris Nova (@kris-nova), Solly Ross (@directxman12), Jeff Grafton (@ixdy) and Jorge Castro (@jorge)
+
+
+## March 1, 2018 - ([recording](https://youtu.be/mpfqSBcdSHI))
+
+
+
+* **Moderators**: Solly Ross [SIG]
+* **Note Taker**: Chris Short - chris@chrisshort.net
+* [ 0:00 ]** Demo **-- Sonobuoy - diagnostic tool for k8s - Chuck Ha ([chuck@heptio.com](mailto:chuck@heptio.com))
+ * [Link to slides](https://docs.google.com/presentation/d/1aiCdN5RY-mCZdqav5RuVpJfgzW9UGo1-Zlq3aITZ830/edit#slide=id.g3378780fb7_0_65)
+ * [Link to repository](https://github.com/heptio/sonobuoy)
+ * #sonobuoy on the k8s slack
+ * "We love feedback"
+* [ 0:00 ]** Release Updates**
+ * 1.10 Release Update [Jaice Singer DuMars ~ Release lead]
+ * Week 9 of 12, full schedule and some important information is [here](https://github.com/kubernetes/sig-release/blob/master/releases/release-1.10/release-1.10.md)
+ * We're in code freeze
+ * Why do we have code freeze?
+ * Provides opportunity for SIGs to focus on bugs/cleanup
+ * Allows time for technical debt elimination while we have a quiet submit queue, e.g. [this](https://groups.google.com/d/msg/kubernetes-dev/UFCzs-Zjj9E/b3_qjj71AwAJ)
+ *
+ * How does it work?
+ * Isolate release-relevant items (issues and PRs) by use of the **<code>status/approved-for-milestone</code></strong> label
+ * If it's in the milestone with that label, we pay close attention, otherwise it is assumed not impacting the release
+ * Bot will nag you about labels so we can focus on what's really important
+ * Check out that [beta](https://github.com/kubernetes/kubernetes/releases/tag/v1.10.0-beta.1), <strong>please</strong>
+ * Collecting [known issues](https://github.com/kubernetes/kubernetes/issues/59764) in a single place so we can properly document it as part of the release notes
+ * Release notes and user-facing documentation should be close to complete ~ <strong>PRs for docs need to be ready for review by tomorrow (6 PM PT unless otherwise stated)</strong>
+ * 1.8.9 should be out today
+ * 1.7.13 is out
+* [ 0:00 ] <strong>Graph o' the Week </strong>[Josh Berkus]
+ * Weekly update on data from devstats.k8s.io
+ * [Issues and PRs by Milestone](https://k8s.devstats.cncf.io/d/IIUa5kezk/open-issues-prs-by-milestone?orgId=1&from=now-7d&to=now&var-sig_name=All&var-sig=all&var-milestone_name=v1.10&var-milestone=v1_10&var-repo_name=kubernetes%2Fkubernetes&var-repo=kubernetes_kubernetes&var-full_name=Kubernetes)
+ * Allows us to compare workload/readiness with prior releases:
+ * [1.10 at code freeze](https://k8s.devstats.cncf.io/d/IIUa5kezk/open-issues-prs-by-milestone?orgId=1&from=1517101031268&to=1519779431269&var-sig_name=All&var-sig=all&var-milestone_name=v1.10&var-milestone=v1_10&var-repo_name=kubernetes%2Fkubernetes&var-repo=kubernetes_kubernetes&var-full_name=Kubernetes)
+ * [1.9 at code freeze](https://k8s.devstats.cncf.io/d/IIUa5kezk/open-issues-prs-by-milestone?orgId=1&from=1509407831268&to=1511830631269&var-sig_name=All&var-sig=all&var-milestone_name=v1.9&var-milestone=v1_9&var-repo_name=kubernetes%2Fkubernetes&var-repo=kubernetes_kubernetes&var-full_name=Kubernetes)
+ * [1.8 at code freeze](https://k8s.devstats.cncf.io/d/IIUa5kezk/open-issues-prs-by-milestone?orgId=1&from=1501804631268&to=1504483031269&var-sig_name=All&var-sig=all&var-milestone_name=v1.8&var-milestone=v1_8&var-repo_name=kubernetes%2Fkubernetes&var-repo=kubernetes_kubernetes&var-full_name=Kubernetes)
+ * Overcounting issue due to github bug
+* [ 0:00 ] <strong>SIG Updates</strong>
+ * SIG Big Data [Anirudh Ramanathan]
+ * Acting as a bridge to external projects, sometimes work on forks, and then upstream it.
+ * Apache Spark - (now tracked in [JIRA](https://issues.apache.org/jira/browse/SPARK-23529?jql=project%20%3D%20SPARK%20AND%20component%20%3D%20Kubernetes) & [mailing lists](https://spark.apache.org/community.html))
+ * Graduated from a fork and merged back into project - released as Spark 2.3 yesterday!
+ * Top billed feature - k8s support ([link](https://spark.apache.org/releases/spark-release-2-3-0.html))
+ * 3 new ASF committers from our SIG
+ * Spark-submit with operator semantics ([WIP](https://github.com/GoogleCloudPlatform/spark-on-k8s-operator))
+ * Apache Airflow ([link](http://incubator.apache.org/projects/airflow.html))
+ * Trying to make a k8s-native DAG scheduler
+ * Currently upstreaming the k8s executor.
+ * Airflow v1.10 will be first release with k8s constructs in it.
+ * HDFS ([link](https://github.com/apache-spark-on-k8s/kubernetes-HDFS/))
+ * Hardening existing work
+ * Added HA namenode and fault tolerance to running within k8s/containers
+ * Demo coming soon in SIG Apps
+ * Success = performant and secure HDFS.
+ * Kube-arbitrator ([link](https://github.com/kubernetes-incubator/kube-arbitrator))
+ * Joint work with sig-scheduling.
+ * Ongoing discussion, MVP coming soon.
+ * SIG Storage [Saad Ali] - [Slides](https://docs.google.com/presentation/d/1VNQQ9Lzn6ahy9zHm0aE6KOzS6YzCREzNE2Jla2ESbC8/edit?usp=sharing)
+ * Primary work for 1.10 is moving alpha features to beta including: CSI, Local storage, mount propagation, volume protection, ephemeral storage, etc.
+ * In 1.11 will be working on topology-aware storage and moving beta features to GA
+ * SIG Multicluster [Christian Bell]
+ * FederationV1: Low interest in fixing open bugs; bots are auto closing issues
+ * Top-level docs need to update/reflect reality that at the current pace of development, FederationV1 will not reach the maturity as single-cluster Kubernetes.
+ * Most work on federation has moved to a WG FederationV2
+ * Currently in "brainstorming" phase
+ * Moving away from having a completely consistent api with non-federated Kubernetes API
+ * [Cluster Registry](https://github.com/kubernetes/cluster-registry): An API for maintaining a list of clusters and associated metadata. Move to beta this quarter. Being consumed by Kubernetes (Kubemci) and non-Kubernetes projects (istio multi-cluster).
+ * [Kubemci](https://github.com/GoogleCloudPlatform/k8s-multicluster-ingress): A command-line tool (and eventually controller) to configure ingress across multiple clusters.
+* [ 0:00 ] <strong>Announcements</strong>
+ * [Owner/Maintainer ](https://github.com/kubernetes/community/pull/1861/files)[pwittrock]
+ * Maintainer is folding into Owner
+ * Reminder: Contributor Summit happens 1 May, day before Kubecon
+ * [https://github.com/kubernetes/community/tree/master/events/2018/05-contributor-summit](https://github.com/kubernetes/community/tree/master/events/2018/05-contributor-summit)
+ * Kubecon price increase March 9
+ * [https://events.linuxfoundation.org/events/kubecon-cloudnativecon-europe-2018/](https://events.linuxfoundation.org/events/kubecon-cloudnativecon-europe-2018/)
+ * Copenhagen May 2-4, 2018
+ * [Meet Our Contributors is next Weds!](https://github.com/kubernetes/community/blob/master/mentoring/meet-our-contributors.md)
+ * Two times! 330p and 9p UTC
+ * Ask current contributors anything on slack #meet-our-contributors - testing infra, how to make first time contribution, how did they get involved in k8s
+ * Shoutouts!
+ * None on slack this week, thank someone in #shoutouts!
+ * Top 5 in the the Kubernetes StackOverflow tag for the week: Radek "Goblin" Pieczonka, aerokite, Vikram Hosakote, Jonah Benton, and fiunchinho
+
+
+## February 22, 2018 - ([recording](https://www.youtube.com/watch?v=7pN0xdiFqPE))
+
+
+
+* **Moderators**: Jorge Castro [SIG ContribEx]
+* **Note Taker**: Jaice Singer DuMars
+* [ 0:00 ]** Demo **No demo this week
+* [ 0:01 ]** Release Updates**
+ * 1.10 [Jaice Singer DuMars ~ Release lead]
+ * Week 8 of 12, full schedule and some important information is [here](https://github.com/kubernetes/sig-release/blob/master/releases/release-1.10/release-1.10.md)
+ * We're in code slush (Josh Berkus)
+ * Trying to filter out issues that are not critical to 1.10 release health
+ * SIGs must take ownership of [issues](https://github.com/kubernetes/sig-release/issues/86) with "Approved-For-Milestone" labels + Priority + type + status
+ * **CODE FREEZE ON MONDAY ~ 6PM PST**
+ * No new commits to the release-1.10 branch unless:
+ * SIG approved and tied to an existing issue
+ * Bug fixes (critical)
+ * Test flake fixes (or other release-specific code)
+ * If you have an exception, please contact @jdumars / [jdumars@gmail.com](mailto:jdumars@gmail.com)
+ * Release appears on track for 3/21 delivery, but don't count on it
+ * [https://github.com/kubernetes/sig-release/issues/86](https://github.com/kubernetes/sig-release/issues/86)
+ * If a release team member asks for something (e.g. docs), please make your best effort to help them.
+* [ 0:07 ] **Graph o' the Week **[Josh Berkus]
+ * Weekly update on data from devstats.k8s.io
+ * New and occasional contributors: [Issues](https://k8s.devstats.cncf.io/d/ey0DOdqzz/new-and-episodic-issues?orgId=1) and [PRs](https://k8s.devstats.cncf.io/d/rCYj6D3kz/new-and-episodic-contributors?orgId=1)
+ * Shows volume of contributions/contributors from community members who are NOT full-time
+ * Important measure of how "welcoming" a community is.
+ * Also important because we've added a lot of process and need to be sure that's not a major blocker for new contributors.
+* [ 0:00 ] **SIG Updates**
+ * SIG Cluster Lifecycle [First Last]
+ * Not happening
+* [ 0:00 ] **Announcements**
+ * Reminder: Contributor Summit happens 1 May, day before Kubecon
+ * [https://github.com/kubernetes/community/tree/master/events/2018/05-contributor-summit](https://github.com/kubernetes/community/tree/master/events/2018/05-contributor-summit)
+ * Shoutouts this week
+ * Zhonghu Xu - @hzxuzhonghu for many high quality apiserver APIs PRs
+ * Mario & Ilya Dmitrichenko for helping out with [user office hours](https://github.com/kubernetes/community/blob/master/events/office-hours.md) this week!
+ * Volunteers still needed!
+ * Someone doing great work? Give them a shoutout in #shoutouts and we'll mention them during the community meeting.
+ * Meet Our Contributors!!
+ * March 7th at two times
+ * #meet-our-contributors in slack for questions and more details
+ * We'd like to do live peer code reviews, too!
+ * Need SIG volunteers to say what is new in 1.10 in a webinar. Minimal time investment. -- contact [nchase@mirantis.com](mailto:nchase@mirantis.com) or @nickchase
+
+
+## February 15, 2018 -[ (recording](https://www.youtube.com/watch?v=eqg5P81zPbs))
+
+
+
+* **Moderators**: Josh Berkus [Contribex]
+* **Note Taker**: Tim Pepper [VMWare/Contribex]
+* [ 0:00 ]** Demo **-- AppZ by Cloudbourne [Rejith Krishnan rkrishnan@cloudbourne.com] (c)
+ * Link to slides: n/a, see live demo in recording
+ * Youtube channel: [https://www.youtube.com/c/Cloudbourne](https://www.youtube.com/c/Cloudbourne)
+ * [https://github.com/rejith/tomcat-loadgen](https://github.com/rejith/tomcat-loadgen)
+ * Platform integrates SCM (GitHub), build (Maven, Gradle, Jenkins), and deploys/monitors app in k8s. Builds on demand in response to commits in SCM.
+ * Dev, test, prod would use separate yaml files (example for a synthetic load generator using tomcat in [appz.yml](https://github.com/rejith/tomcat-loadgen/blob/master/appz.yml)), each describing the build/deploy/monitor needs for the app
+* [ 0:00 ]** Release Updates**
+ * 1.10 [Jaice Singer DuMars ~ Release lead](c)
+ * week 7 of 12 of the Kubernetes 1.10 release cycle
+ * full schedule and some important information is [here](https://github.com/kubernetes/sig-release/blob/master/releases/release-1.10/release-1.10.md)
+ * we're looking on track to meet our release date of March 21st
+ * This week. we're cutting a beta, and setting up the release branch ~ nothing has changed in terms of merges
+ * Next week, we will be entering [code slush](https://github.com/kubernetes/sig-release/blob/master/releases/release-1.10/release-1.10.md#code-slush) We use this time prior to [Code Freeze](https://github.com/kubernetes/sig-release/blob/master/releases/release-1.10/release-1.10.md#code-freeze) to help reduce issue noise from miscellaneous changes that aren't related to issues that SIGs have approved for the milestone. SIGs are the keepers of this label, not the release team, although we can add the label at the request of a SIG if needed
+ * **All issues in the milestone are considered release-blocking**. That allows any SIG to pull the "stop chain" on the release to prevent defects from impacting our community
+ * **code freeze** begins on** February 26th**
+ * 1.9
+ * 1.9.3 is out
+ * 1.9.4 in 2 weeks (planned)
+ * 1.8
+ * 1.8.8 is out
+* [ 0:00 ] **SIG Updates**
+ * SIG Testing [Aaron Crickenberger](c)
+ * Held F2F meeting in Seattle Jan 26, thanks to EKS team for hosting (Bob Wise/AWS offers similar to any other SIG needing periodic f2f space in Seattle)
+ * Jenkins is no more, all jobs kicked off by prow now run natively on Kubernetes
+ * Testing commons subproject ([agenda](https://docs.google.com/document/d/1TOC8vnmlkWw6HRNHoe5xSv5-qv7LelX6XK3UVCHuwb0/edit#heading=h.tnoevy5f439o), [wed bi-weekly 7:30am pt](https://zoom.us/my/k8s.sig.testing)) is a forum for discussing how and what to test
+ * Testgrid in go as of jan 16 loads pages WAAAAAY faster
+ * [https://k8s-testgrid.appspot.com/](https://k8s-testgrid.appspot.com/)
+ * [Python vs go](https://www.dropbox.com/s/q1s98e1d1re3wdb/testgrid-go-vs-python.png)
+ * Prow UI updates
+ * [https://go.k8s.io/bot-commands](https://go.k8s.io/bot-commands)
+ * [https://prow.k8s.io/plugins](https://prow.k8s.io/plugins)
+ * Where are we at with tide these days
+ * [Umbrella issue](https://github.com/kubernetes/test-infra/issues/3866)
+ * [Discussion on how best to represent tide status](https://github.com/kubernetes/test-infra/issues/6145)
+ * [Implement bazel remote caching for faster builds](https://github.com/kubernetes/test-infra/issues/6808)
+ * [Proposal: upload conformance results to testgrid](https://docs.google.com/document/d/1lGvP89_DdeNO84I86BVAU4qY3h2VCRll45tGrpyx90A/edit)
+ * [Proposal: release-blocking and merge-blocking criteria](https://docs.google.com/document/d/1kCDdmlpTnHPQt5z8JzODdFCc3T2D4MKR53twsDZu20c/edit)
+ * Docker in Docker / local e2e
+ * Get to a world where e2e doesn't require full blown cloud
+ * For local dev and for PR's, save on cluster standup/teardown time
+ * [https://github.com/kubernetes/kubernetes/pull/51661](https://github.com/kubernetes/kubernetes/pull/51661)
+ * [Discussed feb 6 meeting](https://docs.google.com/document/d/1z8MQpr_jTwhmjLMUaqQyBk1EYG_Y_3D4y4YdMJ7V1Kk/edit#heading=h.4quubo30kopo)
+ * Misc
+ * [Setting up automation for kubernetes-sigs org](https://github.com/kubernetes/test-infra/pull/6623)
+ * [The label_sync tool](https://github.com/kubernetes/test-infra/tree/master/label_sync)
+ * SIG Contribex [Paris Pittman](c)
+ * Charter
+ * Draft is being socialized in group now
+ * Using tl;dr template from SC (not approved yet but getting ahead of curve)
+ * Contributor Guide
+ * Solving for discoverability, holes in process/documentation, better flow
+ * New area in [k/community ](https://github.com/kubernetes/community/tree/master/contributors/guide)
+ * New on [Kubernetes.io; will be done by 1.10 release ](https://kubernetes.io/docs/imported/community/guide/)
+ * [Mentoring](https://github.com/kubernetes/community/tree/master/mentoring)
+ * Focusing on contributor membership growth; very important to reduce possibilities of burnout, learning and development of current contributors, etc
+ * Testing phase and learning a lot - Group Mentoring, Google Summer of Code, Outreachy, Meet Our Contributors, Proposed "Buddy" Guide Program
+ * Need help!
+ * [issue/1753](https://github.com/kubernetes/community/issues/1753), [issue/1803](https://github.com/kubernetes/community/issues/1803); need more outreachy organization sponsors and sig/wg mentors/projects; need more SIGs/WGs interested in group mentoring
+ * Building skills workshops for group mentoring and future k8s learning and dev. Examples in the [mentee guide](https://github.com/kubernetes/community/blob/master/mentoring/group-mentee-guide.md).
+ * Examples: communication, code review, writing docs/rel notes, testing, etc.
+ * DevStats
+ * Working on User guide -> [https://github.com/cncf/devstats/issues/35](https://github.com/cncf/devstats/issues/35)
+ * Shoutout to docs for adoption and creating SLOs
+ * What questions do you want answered about the project?
+ * Documenting and improving [communication platforms](https://github.com/kubernetes/community/tree/master/communication)
+ * [Slack guidelines](https://github.com/kubernetes/community/blob/master/communication/slack-guidelines.md)
+ * Working on calendar solutions
+ * Issue Triage and Labels
+ * Proposed and created [Triage Guidelines](https://github.com/kubernetes/community/blob/master/contributors/guide/issue-triage.md) to quickly close issues and clearly define the scope of triage or issues management
+ * Proposed new labels to identify issues that are candidates for close so that issues can be closed quickly manually or with automation with a reasoning for a clear statistic and measurement triage efforts. It is WIP with positive feedbacks from community.
+ * Misc
+ * Roadshow!
+ * F2F this Tuesday @ INDEX
+ * Contributor Summit in Copenhagen
+ * May 1; registration will be on KubeCon site this week
+ * New weekly meeting (from bi-weekly) same day / time (Weds @ 5pUTC)
+ * SIG API Machinery [Daniel Smith](c)
+ * Reminder: SIG-API doesn't own the API (that's SIG-architecture), but rather mechanics in API server, registry and discovery
+ * Design proposal: [https://goo.gl/UbCRuf](https://goo.gl/UbCRuf)
+ * Seeking feedback on webhook mechanism (slack, sig meeting, email to list) and considering some action on it next quarter
+ * Go Contexts: considering addition to go client (recent similar change going into cloud provider). Should be a very mechanical code change.
+* [ 0:00 ] **Announcements**
+ * Office hours next week!
+ * [https://github.com/kubernetes/community/blob/master/events/office-hours.md](https://github.com/kubernetes/community/blob/master/events/office-hours.md)
+ * Reminder: Contributor Summit will be 1 May, the day before Kubecon EU: [https://github.com/kubernetes/community/tree/master/events/2018/05-contributor-summit](https://github.com/kubernetes/community/tree/master/events/2018/05-contributor-summit)
+ * /lgtm, /approve and the principle of least surprise
+ * [https://github.com/kubernetes/test-infra/issues/6589](https://github.com/kubernetes/test-infra/issues/6589)
+ * Do we all need to use [the exact same code review process](https://github.com/kubernetes/community/blob/master/contributors/guide/owners.md#the-code-review-process)?
+ * How could we make the existing process clearer and better understood?
+
+
+## February 8, 2018 - ([recording](https://www.youtube.com/watch?v=L1Mk__ddbBg))
+
+
+
+* **Moderators**: Paris Pittman [SIG ContribEx]
+* **Note Taker**: Josh Berkus [Red Hat/SIG-Release]
+* [ 0:00 ]** Demo **-- [stork](https://github.com/libopenstorage/stork), storage orchestration runtime for Kubernetes - Dinesh Israni ([disrani@portworx.com](mailto:disrani@portworx.com))
+ * Administrative support for Hyperconverged Storage (that is, Kube storage running in pods).
+ * Has health monitor for storage nodes with automated failover.
+ * Supports the (currently alpha) snapshot provisioner.
+ * Live demo involving Stork storage with MySQL on top.
+ * Currently only supports Portworx storage, but want contributions from other storage drivers.
+ * [Slide](https://docs.google.com/a/portworx.com/presentation/d/e/2PACX-1vQz3SddQVZFvvymniqeOwgUTO9Yb54YqIVLJzL4eM7TU45zjlPOvdyVTjDl7MyuCwDRpKS8lVtUAYiY/pub?start=false&loop=false&delayms=3000)
+* [ 0:14 ]** Release Updates**
+ * **1.10 **[Jaice Singer DuMars ~ Release lead]
+ * Follow along in the official [schedule](https://github.com/kubernetes/sig-release/blob/master/releases/release-1.10/release-1.10.md), we're in week 6 of 12
+ * Please follow the schedule, we will enforce it
+ * **Feature Freeze has passed**, but we are accepting exceptions on a case-by-case basis using the process found [here](https://github.com/kubernetes/features/blob/master/EXCEPTIONS.md)
+ * If you're curious about what is on deck for 1.10 planned work, pay a visit to the [Features Tracking Spreadsheet](https://docs.google.com/spreadsheets/d/17bZrKTk8dOx5nomLrD1-93uBfajK5JS-v1o-nCLJmzE/edit?usp=sharing)
+ * Next week, we will be cutting our first beta release, as well as assembling the release branch
+ * Release team meetings are in the weekly cadence on Mondays at 10am Pacific until February 26th. If you join either the [SIG-Release](https://groups.google.com/forum/#!forum/kubernetes-sig-release) or [kubernetes-milestone-burndown](https://groups.google.com/forum/#!forum/kubernetes-milestone-burndown) groups, you should get an invite
+ * Meetings may or may not include puppies
+ * please remember that **code freeze** begins on** <blink>February 26th</blink>**
+ * Trying to make sure that nobody is surprised by the schedule!
+ *
+* [ 0:18 ] **Graph o' the Week [Tim Pepper; 1.10 release issue triage shadow]**
+ * Release cadence and feature activity: what is an "issue" or "bug" or "feature" in kubernetes and Github? We all know GitHub has an "issue" object and there are lots of k8s labels in them, but for feature it's complicated:
+ * project specific labels represent release activities and also "kind/feature"
+ * plus a GitHub object "project"
+ * plus a GitHub object "milestone""
+ * _and_ https://github.com/kubernetes/features
+ * Does the "complicated" show up in the stats? Are features efficiently created earlier in the cycle and closed as the cycle progresses?
+ * [SIG Issues - 7 Days Moving Average - SIG Release - Kind All - With Release markers](https://k8s.devstats.cncf.io/d/000000031/sig-issues?orgId=1&var-period=d7&var-sig=release&var-kind=All&from=now-1y&to=now)
+ * Doesn't show clear trends, it's a bit chaotic.
+ * [SIG Issues - 7 Days Moving Average - SIG All - Kind feature - With Release markers](https://k8s.devstats.cncf.io/d/000000031/sig-issues?orgId=1&from=now-1y&to=now&var-period=d7&var-sig=All&var-kind=feature)
+ * You can see phases for issues tagged feature. However, the numbers are increasing towards the end of the release instead of being front-loaded.
+ * For 1.10 we had one SIG which was notably late, trying to figure out if this was a general problem.
+ * You can see roughly 4 phases: open beginning, feature frozen, code frozen, final stabilization.
+ * And a bonus chart of [features ages](https://k8s.devstats.cncf.io/d/000000002/issues-age?orgId=1&var-period=d7&var-repogroup_name=All&var-repogroup=all&var-sig_name=All&var-kind_name=feature&var-prio_name=All&var-sig=all&var-kind=feature&var-prio=all&from=now-1y&to=now)
+ * We'd like to see people incoming to devstats and looking for answers, trying to figure out how to improve the release.
+* [ 0:23 ] **SIG Updates**
+ * SIG Architecture - [ Brian Grant, co-lead ]
+ * Working on identifying subproject owners
+ * Related to SC decision
+ * Need to map existing code subprojects to SIGs
+ * A lot of the work done by Aaron Crickenberger
+ * Have done a few directories, such as for Workloads APIs
+ * Check SIGs.yaml for what's been identified, make sure that it's correct.
+ * Reviewing architectural issues as they arise
+ * Still working on implementing the KEP
+ * A formalization of the design proposal process
+ * SIG Scalability - [Bob Wise]
+ * [Slides](https://docs.google.com/presentation/d/1QunsQVGe4Ky570dI3hwBPH-BdD65wHkMz-g0S_fPYww/edit#slide=id.p )
+ * Moved meeting to 30min later, biweekly to not conflict with SIG-Arch
+ * They believe that the "[https://docs.google.com/presentation/d/1QunsQVGe4Ky570dI3hwBPH-BdD65wHkMz-g0S_fPYww/edit#slide=id.p](https://docs.google.com/presentation/d/1QunsQVGe4Ky570dI3hwBPH-BdD65wHkMz-g0S_fPYww/edit#slide=id.p)<span style="text-decoration:underline;"> </span>bigger clusters" problem is not interesting to existing members right now, clusters are big (5000 nodes) and stable. If you want bigger than that, join the SIG.
+ * Mainly about avoiding regressions now.
+ * [They have a new charter](https://github.com/kubernetes/community/pull/1607 ):
+ * Primary work on tooling/monitoring to detect & avoid scaling regressions this year.
+ * They have lots of interest from users in running big clusters, not sure that they're explaining things to those users.
+ * SIG Scheduling - [Bobby Salamat]
+ * A major work item for 1.10 is to move priority and preemption to Beta
+ * New feature in 1.9 alpha.
+ * Very useful for multiple very different workloads
+ * Example: allow production workload to push dev workload aside
+ * Performance improvements
+ * Enable equivalence cache and move it to Beta
+ * Optimize Affinity/Anti-affinity
+ * These are much slower than other predicates (like 10X)
+ * Design a new extension model for scheduler and build a scheduling framework
+ * Currently, the extension model communication is too slow for some plugins.
+ * Three incubators
+ * Kube-arbitrator
+ * "Gang scheduling" so that all pods of a group get scheduled, or not.
+ * Supporting quota for hierarchical namespaces
+ * Cluster capacity tool
+ * Checks if a pod can be scheduled in a certain cluster based on resources
+ * Descheduler
+ * Automated removal of pods to free up resources
+* [ 0:41 ] **Announcements**
+ * Steering Committee Update [Brendan Burns]
+ * [New Repository structure proposal](https://github.com/kubernetes/community/pull/1752)
+ * Sunsetting kubernetes-incubator, won't accept any new projects
+ * 3 classes of repositories: (a) associated repos, (b) sig-owned repositories, (c) kubernetes repositories (approved by sig-arch)
+ * This is about new repos going forward, <span style="text-decoration:underline;">not</span> a mandate for existing repos anytime soon.
+ * Except for a few things like Owners files
+ * SIGs who want to move from incubator to sigs repos, stay tuned for details
+ * Question [Matt Farina]: associated repos: who would want one of these? Why do this?
+ * Brendan: as a prerequisite for submitting a feature to Kubernetes. CLA is gateway here. Also, even for external things a consistent process is good and makes it easier for contributors.
+ * Example: Kube-sanity project. Doesn't belong to a SIG, but all Kube contributors.
+ * Intel Intro [Jose Palafox]
+ * Jose is the program manager for Intel's Kubernetes efforts.
+ * They have a team of 16 engineers on Kube.
+ * Reach out to Jose if you want to collaborate with them.
+ * #shoutouts [Jorge Castro]
+ * Duffie Cooley, Stefan Schimanski, Craig Tracey, Timothy St. Clair, Chuck Ha, Liz Frost, Nikhita Raghunath, Aaron Crickenberger, Ilya Dmitrichenko, Ihor Dvoretski, Ellen Korbes and Tim Pepper
+ * SIG Schedule for this call for the next few months: [SIG Update Schedule](https://docs.google.com/spreadsheets/d/1adztrJ05mQ_cjatYSnvyiy85KjuI6-GuXsRsP-T2R3k/edit#gid=0) (always posted at the top of this document)
+ * Schedule is fixed, please check it.
+ * Meet Our Contributors - first weds of the month
+ * First live-streamed one!
+ * [GH Page](https://github.com/kubernetes/community/blob/master/mentoring/meet-our-contributors.md) ; [Call for volunteers](https://github.com/kubernetes/community/issues/1753)
+ * #meet-our-contributors on slack for questions and code review snips
+
+
+## February 1, 2018 - [recording](https://www.youtube.com/watch?v=Oj-0l7vdUac)
+
+
+
+* **Moderators**: Solly Ross [SIG Autoscaling]
+* **Note Taker**: First Last [Company/SIG]
+* [ 0:00 ]** Demo **-- [generator-kubegen](https://github.com/sesispla/generator-kubegen), a Kubernetes config generation tool - Sergio Sisternes ([ssistern@everis.com](mailto:ssistern@everis.com))
+ * Link to slides
+ * [Link to repositories](https://github.com/sesispla/generator-kubegen)
+ * Yeoman-based wizard for generating Kubernetes YAML
+ * Asks basic questions, generated Kubernetes object definitions
+ * Can create everything for a basic app, or just individual objects
+ * Questions:
+ * (not a question) Anybody interested in common patterns for should join the App Def WG
+* [ 0:00 ]** Release Updates**
+ * 1.10 [Jaice Singer DuMars ~ Release lead/Ihor Dvoretskyi ~ Features lead]
+ * Follow along in the official [schedule](https://github.com/kubernetes/sig-release/blob/master/releases/release-1.10/release-1.10.md), we're in week 5 of 12
+ * Features freeze has passed, and looking good!
+ * [Alphas have been cut successfully](https://github.com/kubernetes/kubernetes/releases/tag/v1.10.0-alpha.2)
+ * Marketing activities (e.g. blog preparation) are beginning
+ * Questions:
+ * (not a question) Lots of issues without labels, please go in and make sure 1.10 issues have labels
+ * How do we go in and try out the latest alphas, e.g. cluster provisioning tool?
+ * Kubeadm supports alphas, as long as they've been pushed to GCS buckets
+* [ 0:00 ] **Graph o' the Week **[Aaron Crickenberger]
+ * Weekly update on data from devstats.k8s.io
+ * Actually a table! What a twist!
+ * [https://k8s.devstats.cncf.io/dashboard/db/developers-summary?orgId=1](https://k8s.devstats.cncf.io/dashboard/db/developers-summary?orgId=1)
+ * Collects GitHub events (e.g. comments, commits, etc), and associates them with GitHub users
+ * Can break down by releases, time
+ * Not yet broken down by repositories
+ * Questions:
+ * Does it include incubator
+ * Yes, includes all kubernetes-associated repos (kubernetes, kubernetes-incubator, helm, kube-clients)
+* [ 0:00 ] **SIG Updates**
+ * SIG Cluster Ops [Rob Hirschfeld] - [http://bit.ly/k8sclops](http://bit.ly/k8sclops)
+ * New time! 1 hour earlier (12 am Pacific)
+ * Next meeting one hour from now (every two weeks)
+ * Change in format to be more "meetup" style
+ * Looking to bring in more demos & discussions
+ * Specifically want to hear from operators
+ * Fine w/ longer format vendor demos to get feedback
+ * Want to hear more about different deployment patterns
+ * SIG Autoscaling [Solly Ross]
+ * Work continues on VPA (Vertical Pod Autoscaler)
+ * Follow it at [https://github.com/kubernetes/autoscaler](https://github.com/kubernetes/autoscaler)
+ * Investigating minor additions to HPA v2 to improve flexibility with regards to "standalone"/"unassociated" metrics before graduation
+ * Continuing to get feedback on metrics API adapters and identify and improve issues
+ * Meeting times at [https://github.com/kubernetes/community/blob/master/sig-autoscaling/README.md](https://github.com/kubernetes/community/blob/master/sig-autoscaling/README.md)
+* [ 0:00 ] **Announcements**
+ * Steering Committee update
+ * Formalizing subprojects
+ * Part of [proposal from sig architecture](https://docs.google.com/document/d/1FHauGII5LNVM-dZcNfzYZ-6WRs9RoPctQ4bw5dczrkk/edit#heading=h.2nslsje41be1)
+ * Want to make sure everything is owned by some group (SIG or subset thereof), sometimes things are owned by a group within a SIG
+ * [Issue](https://github.com/kubernetes/community/issues/1673)
+ * [Initial implementation PR](https://github.com/kubernetes/community/pull/1674 )
+ * SIG leads should look at PR and sanity check it
+ * This is a non-binding first pass, goal was to make sure every repo had an owning sig, would like help iterating on what subprojects exist and who should own them
+ * Seemed easier to do in one place vs. distributed across all repos
+ * Next steps involve building automation to consume/enforce, making individual repos source of truth via additions to OWNERS files
+ * Examples:
+ * sig-apps owns the "charts" subproject, which corresponds to the "charts" repo
+ * Sig-apps also owns "Workloads API" subproject, corresponding to the API types, clients, etc for the workloads types
+ * Could have a project containing all kubernetes client repos, for instance
+ * Questions
+ * If there's a new subproject that needs a repo, who decides what gets a repo in kubernetes
+ * Answer: see below :-)
+ * Upcoming
+ * Repositories proposal (aka "the incubator problem")
+ * Moving towards 3 classes of projects (how formal things are)
+ * Core kubernetes repos (everything in kubernetes/kubernetes + staging, more-or-less), fairly formal, has process, lots of testing, etc
+ * SIG repos (encourage SIGs to create repos as they see fit, maybe create subproject repos, either stuff outside of core, or stuff that is a prototype before going into core)
+ * Associated repos (needs CLA bot turned on, code-of-conduct but that's about it)
+ * Doc forthcoming
+ * Expectations of SIG charters and template charters
+ * A checklist/template will come out eventually
+ * Feel free to discuss and submit before then
+ * CNCF graduation
+ * Looking to graduate Kubernetes through the CNCF, making it the first project to do so
+ * Amazon participation update [Bob Wise]
+ * Increasing/ramping up direct involvement
+ * Expect to see more contributions around testing in the short run, AWS experience
+ * Participate in SIG AWS (both EKS and non-EKS)
+ * Can be found on Slack as well, feel free to reach out with feedback, ideas, etc
+ * SIG Testing Commons subproject announced [Tim St. Clair]
+ * Focus on
+ * tests are written
+ * contributing tests
+ * cleaning up tests
+ * what things are tested
+ * e2e framework
+ * Conformance
+ * Please come participate
+ * Kubernetes Documentation [User Journeys MVP](https://kubernetes.io/docs/home/) launched [Andrew Chen]
+ * Please give SIG Docs for feedback, still adding things later
+ * Can contribute normally (join SIG docs for more information)
+ * New landing page incorporating personas (users, contributors, operators)
+ * Levels of knowledge (foundational, advanced, etc)
+ * Can also just browse docs directly
+ * SIG Arch Announcement [Joe Beda]
+ * Control Plane naming
+ * Feel free to comment offline or on the issue if you have comments
+ * TL;DR: call it the "control plane"
+ * Issue: [https://github.com/kubernetes/website/issues/6525](https://github.com/kubernetes/website/issues/6525)
+ * Contributor Summit for Kubecon EU [Jorge and Paris]
+ * SAVE THE DATE: May 1, 2018
+ * [https://github.com/kubernetes/community/pull/1718](https://github.com/kubernetes/community/pull/1718)
+ * #shoutouts - [Jorge Castro]
+ * New channel in Slack
+ * Someone do something great for the community? Give them a shoutout here and we'll take the time to thank them for their work at the end of each community meeting.
+ *
+
+
+## January 25, 2018 - ([recording](https://www.youtube.com/watch?v=hAg6aGAG3bs))
+
+
+
+* **Moderators**: Mario Loria [Meetup Organizer / Liquidweb] (confirmed)
+* **Note Taker(s)**: Jorge Castro [SIG Contributor Experience]
+* [ 0:00 ]** Demo **-- [kube-toolkit](https://github.com/radu-matei/kube-toolkit) - toolkit for creating gRPC-based CLI and web tools for Kubernetes - Radu Matei ( [radu@radu-matei.com](mailto:radu@radu-matei.com) ) (confirmed)
+ * kube-exec [https://github.com/radu-matei/kube-exec](https://github.com/radu-matei/kube-exec) - os/exec for remote K8S pods
+* [ 0:00] **INDEX Conference** [Jeff Borek]
+ * [https://developer.ibm.com/indexconf/](https://developer.ibm.com/indexconf/)
+ * Offering space at the Moscone on 20 Feb for communities to hold face to face meetings. Attendance on the 20th is COMPLETELY FREE, but you must register, you'll get 50% off the rest of the conference if you want to stay!
+ * Please contact **jborek@us.ibm.com** if you're interested in claiming some space for your SIG.
+ * "Meet the SIGs" community day will be on the 20th, with Sarah Novotny delivering the keynote.
+ * SIGs interested in participating:
+ * SIG Contributor Experience
+ * SIG Docs
+ * … add yours
+* [ 0:00 ]** Release Updates**
+ * 1.10 [Jaice Singer DuMars ~ Release leader]
+ * Follow along in the official [schedule](https://github.com/kubernetes/sig-release/blob/master/releases/release-1.10/release-1.10.md), we're in week 4 of 12
+ * Feature freeze pushed to 1/29 [Ihor Dvoretskyi, Features Lead]
+ * [Feature summary](https://docs.google.com/spreadsheets/d/17bZrKTk8dOx5nomLrD1-93uBfajK5JS-v1o-nCLJmzE/edit#gid=0)
+ * Shooting for Alpha next week on Tuesday
+ * 1.9.2 is out (thank you Mehdy!)
+ * 1.8.8 - no ETA
+* [ 0:00 ] **Graph o' the Week **[Aaron Crickenberger]
+ * Weekly update on data from devstats.k8s.io
+ * [https://k8s.devstats.cncf.io/dashboard/db/sig-mentions-categories?orgId=1](https://k8s.devstats.cncf.io/dashboard/db/sig-mentions-categories?orgId=1)
+ * Pick a sig, for example SIG-CLI, look into [https://github.com/kubernetes/community/tree/master/sig-cli](https://github.com/kubernetes/community/tree/master/sig-cli) and there are 8 different github teams for SIG-CLI which can be used issue and PR in mentions. In the devstats then you can view: [https://k8s.devstats.cncf.io/dashboard/db/sig-mentions-categories?orgId=1](https://k8s.devstats.cncf.io/dashboard/db/sig-mentions-categories?orgId=1) se lecting the SIG at the top an you see the usage of the git teams in mentions for that SIG. Are all the github teams across all the sigs actually in use? There's a TONNE of them (30 sigs x 8 teams). Which does a person use to ping the right set of people? This devstats graph tunnel down is the current best way to find out. Alternatively need to discuss if these could be simplified into a smaller number of subteams, or do SIG's find the separation into subteams useful?
+* [ 0:00 ] **SIG Updates**
+ * SIG Service Catalog [Paul Morie] (confirmed)
+ *
+ * SIG CLI [Sean Sullivan] (confirmed)
+ * Moving apply and merge to the server side
+ * Breaking up the monolithic kubectl.
+ *
+* [ 0:00 ] **Announcements**
+ * SIG leads: register to offer intros and deep dives in SIG track at KubeCon Copenhagen (May 2-4): [overview](https://groups.google.com/forum/#!searchin/kubernetes-dev/kohn%7Csort:date/kubernetes-dev/5U-eNRBav2Q/g71MW47ZAgAJ), [signup](https://docs.google.com/forms/d/e/1FAIpQLSedSif6MwGfdI1-Rb33NRjTYwotQtIhNL7-ebtYQoDARPB2Tw/viewform) (1/31 deadline)
+ * [SIG Contributor Experience news: new lead, new meeting](https://groups.google.com/forum/#!topic/kubernetes-dev/65S1Y3IK8PQ)
+ * [Meet Our Contributors ](https://github.com/kubernetes/community/blob/master/mentoring/meet-our-contributors.md)- Feb 7th [Paris]
+ * 730a PST/ 3:30 pm UTC & 1pm PST / 9pm UTC
+ * Need contributor volunteers for 1pmPST/9pmUTC session -> [Sign Up [WIP]](https://docs.google.com/spreadsheets/d/1OKc4h-0QLKCbncSloRf_gYklpHYVyNxZmfEM9hlK1xQ/edit?usp=sharing) on m-o-c tab
+ * 30 mins AMA; 30 mins live peer code review
+ * Part of larger mentoring initiatives -> [GH repo](https://github.com/kubernetes/community/tree/master/mentoring) ;[ Issue](https://github.com/kubernetes/community/issues/1672)
+ * New SIG-Release lead, Jaice Singer DuMars replacing Phil Wittrock
+ * A new SIG Scheduling lead has been scheduled via Bobby Salamat
+
+
+## January 18, 2018 - ([recording](https://www.youtube.com/watch?v=x67RK7W-BnM))
+
+
+
+* **Moderators**: Tim Pepper [VMware/SIG Contrib-Ex]
+* **Note Taker**: Jaice Singer DuMars [Microsoft/SIG-breakfast]
+* **Chat Transcript**
+* [ 0:00 ]** Demo **-- Kubernetes on Docker for Mac - Jenny Burcio (jenny@docker.com), Arun Gupta ([arun.gupta@gmail.com](mailto:arun.gupta@gmail.com)) (confirmed)
+ * Link to slides
+ * Link to repositories
+ * Not slated for Linux atm, but Windows might be sooner than later
+ * Uses a CRD where Docker command talks to CRD, creates a new service objects to translate compose files
+* [ 0:00 ]** Release Updates**
+ * 1.10 [Jaice Singer DuMars ~ Release lead]
+ * Follow along with the [schedule](https://github.com/kubernetes/sig-release/blob/master/releases/release-1.10/release-1.10.md)
+ * Alpha delayed - requires individual access on Google side so waiting for Caleb's onboarding, also bicycling can be dangerous
+ * [Features](https://github.com/kubernetes/features/issues?utf8=%E2%9C%93&q=is%3Aissue+is%3Aopen+milestone%3Av1.10) update
+ * **Feature freeze is coming, 1/22 [Ihor]**
+ * **Please, ensure that your feature is targeting [1.10 milestone](https://github.com/kubernetes/features/issues?utf8=%E2%9C%93&q=is%3Aissue+is%3Aopen+milestone%3Av1.10) on GitHub**
+ * **Add your feature to the [spreadsheet](https://docs.google.com/spreadsheets/d/17bZrKTk8dOx5nomLrD1-93uBfajK5JS-v1o-nCLJmzE/edit#gid=0)**
+ * 1.9.2 Should be today (thanks Mehdy!)
+ * 1.8.7 is out (thanks David!)
+* [ 0:00 ] **Graph o' the Week **[Jorge Castro]
+ * Weekly update on data from devstats.k8s.io
+ * [Approvers](https://k8s.devstats.cncf.io/dashboard/db/approvers?orgId=1&var-period=q&var-repogroups=All)
+ * [Approvers Histogram](https://k8s.devstats.cncf.io/dashboard/db/approvers-histogram?orgId=1&var-period_name=Last%20month&var-period=m&var-repogroup_name=All&var-repogroup=all)
+* [ 0:20 ] **SIG Updates**
+ * SIG Docs [Devin Donnelly, Andrew Chen]
+ * K8s Docs Structure
+ * Link to slides (see video for narrative)
+ *
+ * SIG Network [Dan Williams]
+ * 1.10 features and work
+ * IPv6 single and dual-stack
+ * IPVS proxy to Beta/GA
+ * Move more kubenet to CNI
+ * Windows proxy and networking
+ * Multiple pod IP addresses (necessary for IPv6 dual-stack)
+ * Continue exploring new Service API
+ * Topology aware ingress and proxies
+ * Continued work on more flexible pod networking through informal Network Plumbing Working Group
+ * SIG Service Catalog [Paul Morie]
+ * Paul is out sick today and sends his regrets, he'll do an update next week
+ * Feel better Paul! +1
+* [0:37] **(Steering Committee)** **Sig Governance Update** [Phillip Wittrock]
+ * Goals: help community to self organize by providing a template charter for SIGs
+ * Complete: reached out to SIG leads with a long form questionnaire to get detailed insight into how various SIGs are structured and function
+ * In progress: developing a template which defines SIG structure and governance within a SIG charter
+ * **Important:** to contribute your insight and experiences to the process, answer these questions [https://goo.gl/Zm81Ly](https://goo.gl/Zm81Ly)
+* [ 0:39 ] **Announcements**
+ * GSoC [Ihor D]
+ * [https://github.com/cncf/soc](https://github.com/cncf/soc); [k8s gh](https://github.com/kubernetes/community/blob/master/mentoring/google-summer-of-code.md)
+ * nikhita has volunteered to drive this program for Kubernetes
+ * SIG Intros & Deep Dives sessions registration at KubeCon & CloudNativeCon will be announced shortly (stay tuned!)
+ * Changes to this meeting's format [Jorge Castro]
+ * SIGs scheduled per cycle instead of adhoc
+ * Demo changes
+ * Note takers (+1)
+ * Meet our Contributors - Ask Us Anything [Paris]
+ * Feb 7th - 8:30am
+ * [adding link]
+
+
+## January 11, 2018 - recording
+
+
+
+* **Moderators**: Swarna Podila [SIG Awesome]
+* **Note Taker**: First Last [Company/SIG]
+* **Chat Transcript**
+* [ 0:00 ]** Demo **--01/11 [KQueen](https://github.com/mirantis/KQueen) Kubernetes cluster manager demo: Jakub Pavlik ( [jpavlik@mirantis.com](mailto:jpavlik@mirantis.com) )
+*
+ * Tech issues, we'll reschedule this demo at a later date.
+* [ 0:00 ]** Release Updates**
+ * 1.10 [Jaice Singer DuMars ~ Release lead]
+ * It's week 2 of 12 of the release (full schedule and some important information is [here](https://github.com/kubernetes/sig-release/blob/master/releases/release-1.10/release-1.10.md))
+ * The [release team](http://bit.ly/k8s110-team) is fully staffed!
+ * we have added a role guide for the [Communications Coordinator](https://github.com/kubernetes/sig-release/blob/master/release-process-documentation/release-team-guides/communications_coordinator.md)
+ * [PR](https://github.com/kubernetes/sig-release/pull/69) out for the Release Leader role documentation
+ * The next major deadline is Feature Freeze on January 22nd [Ihor]
+ * If you're targeting a feature for 1.10 - please, ensure that it's updated in the [Features repo under 1.10 Milestone](https://github.com/kubernetes/features/issues?q=is%3Aopen+is%3Aissue+milestone%3Av1.10);
+ * Ihor will start grooming the 1.10 backlog in the features repo on Monday; now it's a call for feature owners to update the features in the repo
+ * [Features tracking spreadsheet is ready for your contributions](https://docs.google.com/spreadsheets/d/17bZrKTk8dOx5nomLrD1-93uBfajK5JS-v1o-nCLJmzE/edit#gid=0) (shared r/w with kubernetes-dev);
+ * 1.9.2 - due Thursday
+ * 1.8.7 - Early next week, with PR deadline on Friday
+* [ 0:00 ] **Graph o' the Week **[Aaron Crickenberger / @spiffxp]
+ * Weekly update on data from devstats.k8s.io
+ * Today's graph: [https://k8s.devstats.cncf.io/dashboard/db/need-rebase-prs](https://k8s.devstats.cncf.io/dashboard/db/need-rebase-prs)
+ * [What are the repo groups?](https://github.com/cncf/devstats/blob/master/scripts/kubernetes/repo_groups.sql)
+ * IMO this graph is more useful than [All Need Rebase PR's](https://k8s.devstats.cncf.io/dashboard/db/all-need-rebase-prs?orgId=1)
+ * [I'm asking that we remove the "All… " dashboards](https://github.com/cncf/devstats/issues/41) and merge their graphs into the more detailed dashboards
+ * [Github search: all open kubernetes PR's](https://github.com/pulls?utf8=%E2%9C%93&q=is%3Apr+is%3Aopen+user%3Akubernetes)
+ * [Github search: all open kubernetes PR's with label:needs-rebase](https://github.com/pulls?utf8=%E2%9C%93&q=is%3Apr+is%3Aopen+user%3Akubernetes+label%3Aneeds-rebase+)
+ * [The munger responsible for applying needs-rebase](https://github.com/kubernetes/test-infra/blob/master/mungegithub/mungers/needs_rebase.go)
+ * [We are migrating mungers out of github](https://github.com/kubernetes/test-infra/issues/3331)
+ * [We've added a prow plugin for needs-rebase](https://github.com/kubernetes/test-infra/pull/6121)
+ * [We plan on migrating mungegithub to github.com/kubernetes-retired](https://github.com/kubernetes/test-infra/issues/6104)
+* [ 0:00 ] **SIG Updates ([List of SIGs](https://github.com/kubernetes/community/blob/master/sig-list.md))**
+ * SIG Azure [Jaice Singer DuMars] (confirmed)
+ * Moving Microsoft upstream planning 100% to the SIG
+ * Cloud provider breakout work continues
+ * 1.10 planning was yesterday, we're going to try out the new features process
+ * SIG Node[Dawn Chen] (confirmed)
+ * 1.10 planning
+ * [https://docs.google.com/document/d/15F3nWPPG3keP0pzxgucPjA7UBj3C31VsFElO7KkDU04/edit?userstoinvite=doug.maceachern@gmail.com&ts=5a579cac](https://docs.google.com/document/d/15F3nWPPG3keP0pzxgucPjA7UBj3C31VsFElO7KkDU04/edit?userstoinvite=doug.maceachern@gmail.com&ts=5a579cac)
+* What's the latest on combining provider work into a single SIG vs breaking it out?
+ * Please see [https://github.com/kubernetes/community/tree/master/wg-cloud-provider](https://github.com/kubernetes/community/tree/master/wg-cloud-provider)
+ * ([Notes](https://docs.google.com/document/d/1OZE-ub-v6B8y-GuaWejL-vU_f9jsjBbrim4LtTfxssw/edit#heading=h.w7i4ksrweimp))
+* [ 0:00 ] **Announcements**
+ * The final call for [KubeCon/CloudNativeCon EU 2018](https://events.linuxfoundation.org/events/kubecon-cloudnativecon-europe-2018/) CFP submissions! [Ihor]
+ * CFP form will close on Jan 12 at 11:59 PT
+ * Early pricing deadline is the same ^
+ * IBM is organizing [INDEX](https://developer.ibm.com/indexconf/) conference in San Francisco late February. There's a free space for f2f meetings at Moscone Center on February 20. [Ihor]
+ * If your SIG (or a different group of contributors) would like to organize a f2f meeting on February 20 in SF, please reach Ihor Dvoretskyi about the details
+ * Setting up merge automation for all github.com/kubernetes repos [Aaron Crickenberger / @spiffxp]
+ * [https://github.com/kubernetes/test-infra/issues/6227](https://github.com/kubernetes/test-infra/issues/6227)
+ * [https://groups.google.com/d/msg/kubernetes-dev/h-0hGFJ8x1E/g4UuGr5zDAAJ](https://groups.google.com/d/msg/kubernetes-dev/h-0hGFJ8x1E/g4UuGr5zDAAJ)
+ * k8s Office hours this Wednesday! Ping Jorge Castro (@jorge on slack) if you want to volunteer [https://git.k8s.io/community/events/office-hours.md](https://git.k8s.io/community/events/office-hours.md)
+
+
+## January 04, 2018 - ([recording](https://www.youtube.com/watch?v=fdXS-mSX7F8))
+
+
+
+* **Moderators**: Chris Short []
+* **Note Taker**: Jaice Singer DuMars [SIG-kiwi]
+* **Chat Transcript**
+* [ 0:02 ]** Demo [ 10 minutes ] **-- [kube-arbitrator](https://github.com/kubernetes-incubator/kube-arbitrator) demo: Klaus Ma (@k82cn, [madaxa@cn.ibm.com](mailto:madaxa@cn.ibm.com) / @jinzhejz, [jinzhej@cn.ibm.com](mailto:jinzhej@cn.ibm.com) )
+ * [design doc](https://docs.google.com/document/d/1-H2hnZap7gQivcSU-9j4ZrJ8wE_WwcfOkTeAGjzUyLA/edit#heading=h.uedqgav5zc53)
+ * [https://github.com/kubernetes-incubator/kube-arbitrator](https://github.com/kubernetes-incubator/kube-arbitrator)
+* [ 0:10 ]** Release Updates [ 5 minutes ]**
+ * 1.10 [Jaice Singer DuMars ~ Release lead]
+ * [Schedule](https://github.com/kubernetes/sig-release/blob/master/releases/release-1.10/release-1.10.md) is set ([http://bit.ly/k8s110-schedule](http://bit.ly/k8s110-schedule))
+ * [Team](https://github.com/kubernetes/sig-release/blob/master/releases/release-1.10/release_team.md) is forming ([http://bit.ly/k8s110-team](http://bit.ly/k8s110-team))
+ * **Need** CI Signal, Bug Triage shadow, and Branch Manager (Google only)
+ * Key dates:
+ * **release day**, Wednesday, March 21st
+ * **Feature freeze** is Monday, January 22nd
+ * **Code freeze** begins Monday February 26th and ends Wednesday, March 14th
+ * **Docs** must be completed and reviewed by Friday, March 9th
+* [ 0:15 ] **Graph o' the Week **[Aaron Crickenberger / @spiffxp] [** 5 minutes** ]
+ * Weekly update on data from devstats.k8s.io
+ * [https://k8s.devstats.cncf.io/dashboard/db/bot-commands](https://k8s.devstats.cncf.io/dashboard/db/bot-commands)
+ * Manually updated list of bot commands: [https://go.k8s.io/bot-commands](https://go.k8s.io/bot-commands)
+ * Auto-generated help page with examples: [https://prow.k8s.io/plugin-help.html](https://prow.k8s.io/plugin-help.html)
+ * Mapping of repository groups: [https://github.com/cncf/devstats/blob/master/scripts/kubernetes/repo_groups.sql](https://github.com/cncf/devstats/blob/master/scripts/kubernetes/repo_groups.sql)
+ * Most used command: /cc (to be used for reviews)
+ * Recent command that's seen growth: /hold
+ * Nit: /lgtm can count as /approve, devstats can't tell the difference
+ * Some recent spikes of interest: /priority, /lifecycle, /close
+* [ 0:20 ] **SIG Updates [ 5 minutes ] **
+ * SIG Instrumentation [Frederic Branczyk, CoreOS ]
+ * Presentation link
+ * multiple kube-state-metrics releases - many new metrics, stability, and features ~ making metrics actionable
+ * 1.0 has been released
+ * Core and Custom metrics APIs have been promoted to beta
+ * Formally defined APIs for metrics, across all workload types
+ * Can be anything arbitrary your system can capture
+ * Beta as of 1.8, just remember that they are aggregated API servers, so the implementations may be in flux or different
+ * Custom Prometheus Adapter (need GH link from DirectXMan12/k8s-prometheus-adapter
+ * Can autoscale on arbitrary metrics collected in Prometheus
+ * Removing heapster dependencies
+ * Heapster maintainers have come into the SIG
+ * Meet every thursday 6PM European time
+* [ 0:26 ] **Announcements [ 5 minutes ] **
+ * [tstclair] - Socialize proposal to move from 4 -> 3 release cycles a year to reduce
+ * [https://groups.google.com/forum/#!topic/kubernetes-dev/nvEMOYKF8Kk](https://groups.google.com/forum/#!topic/kubernetes-dev/nvEMOYKF8Kk)
+ * Check with your cloud providers wrt. Meltdown/Spectre: [https://meltdownattack.com/](https://meltdownattack.com/)
+ * (Too much info to cover here)
+ * Office Hours is back, 17 Jan! [https://git.k8s.io/community/events/office-hours.md](https://git.k8s.io/community/events/office-hours.md)
+ * New slack guidelines -> [https://github.com/kubernetes/community/blob/master/communication/slack-guidelines.md](https://github.com/kubernetes/community/blob/master/communication/slack-guidelines.md)
+ * Group mentoring cohort #1 kicked off today! \o/
+ * Current members to reviewers
+ * [https://goo.gl/forms/nAWxAWpVBdNQbyWy1](https://goo.gl/forms/nAWxAWpVBdNQbyWy1)
+ * Contributor Office Hours (will be renamed) coming at the end of the month - date TBA; doc to be created in the mentoring docs folder -> [https://github.com/kubernetes/community/tree/master/mentoring](https://github.com/kubernetes/community/tree/master/mentoring)
+ * KubeCon/CloudNativeCon EU 2018 CFP closes on Jan 12 - [https://events.linuxfoundation.org/events/kubecon-cloudnativecon-europe-2018/](https://events.linuxfoundation.org/events/kubecon-cloudnativecon-europe-2018/)
+* [ 0:00 ] **1.9 Release Retrospective Part 2 [ 30 minutes ] **
+ * NOTE: Please add your retro items to the document below the [ part 2 ] line
+ * The [retro doc](http://bit.ly/kube19retro)
+ * Part One [recording](https://youtu.be/oagLX--fdDs)
diff --git a/communication/moderation.md b/communication/moderation.md
index 31f43376..6118b12b 100644
--- a/communication/moderation.md
+++ b/communication/moderation.md
@@ -3,6 +3,8 @@
This page describes the rules and best practices for people chosen to moderate Kubernetes communications channels.
This includes, Slack and the mailing lists and _any communication tool_ used in an official manner by the project.
+- Check the [centralized list of administrators](./moderators.md) for contact information.
+
## Roles and Responsibilities
As part of volunteering to become a moderator you are now representative of the Kubernetes community and it is your responsibility to remain aware of your contributions in this space.
diff --git a/communication/moderators.md b/communication/moderators.md
new file mode 100644
index 00000000..cec398bf
--- /dev/null
+++ b/communication/moderators.md
@@ -0,0 +1,62 @@
+# Community Moderators
+
+The following people are responsible for moderating/administrating Kuberentes communication channels and their home time zone.
+See our [moderation guidelines](./moderating.md) for policies and recommendations.
+
+## Mailing Lists
+
+### kubernetes-dev
+
+### Administrators
+
+- Sarah Novotny (@sarahnovotny) - PT
+- Brian Grant (@bgrant0607) - PT
+
+### Moderators
+
+- Paris Pittman (@parispittman) - PT
+- Jorge Castro (@castrojo) - ET
+- Jaice Singer DuMars - (@jdumars) - PT
+- Louis Taylor (@kragniz)- CET
+- Nikhita Raghunath (@nikhita) - IT
+
+## GitHub
+
+- [GitHub Administration Team](https://github.com/kubernetes/community/tree/master/github-management#github-administration-team)
+
+## discuss.kubernetes.io
+
+### Administrators
+
+- Paris Pittman (@parispittman) - PT
+- Jorge Castro (@castrojo) - ET
+- Bob Killen (@mrbobbytables) - ET
+- Jeffrey Sica (@jeefy) - ET
+
+### Additional Moderators
+
+- Ihor Dvoretskyi (@idvoretskyi) - CET
+
+## YouTube Channel
+
+- Paris Pittman (@parispittman) - PT
+- Sarah Novotny (@sarahnovotny) - PT
+- Bob Hrdinsky - PT
+- Ihor Dvoretskyi (@idvoretskyi) - CET
+- Jeffrey Sica (@jeefy) - ET
+- Jorge Castro (@castrojo) - ET
+- Joe Beda - (@joebeda) - PT
+- Jaice Singer DuMars - (@jdumars) - PT
+
+## Slack
+
+- Chris Aniszczyk (@caniszczyk) - CT
+- Ihor Dvoretskyi (@idvoretskyi) - CET
+- Jaice Singer DuMars (@jdumars) - PT
+- Jorge Castro (@castrojo) - ET
+- Paris Pittman (@parispittman) - PT
+
+## Zoom
+
+- Paris Pittman (@parispittman) - PT
+- Jorge Castro (@castrojo) - ET
diff --git a/communication/slack-guidelines.md b/communication/slack-guidelines.md
index 23634339..32d20e27 100644
--- a/communication/slack-guidelines.md
+++ b/communication/slack-guidelines.md
@@ -10,12 +10,8 @@ Chat is searchable and public. Do not make comments that you would not say on a
Kubernetes adheres to Cloud Native Compute Foundation's [Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md) throughout the project, and includes all communication mediums.
## ADMINS
-(by Slack ID and timezone)
-* caniszczyk - CT
-* idvoretskyi - CET
-* jdumars - ET
-* jorge - CT
-* paris - PT
+
+- Check the [centralized list of administrators](./moderators.md) for contact information.
Slack Admins should make sure to mention this in the “What I do” section of their Slack profile, as well as for which time zone.
diff --git a/communication/zoom-guidelines.md b/communication/zoom-guidelines.md
index 18aec1e1..bfa6b43e 100644
--- a/communication/zoom-guidelines.md
+++ b/communication/zoom-guidelines.md
@@ -32,8 +32,7 @@ Contact [SIG Contributor Experience](https://github.com/kubernetes/community/tre
## Admins
-- @parispittman
-- @castrojo
+- Check the [centralized list of administrators](./moderators.md) for contact information.
Each SIG should have at least one person with a paid Zoom account.
See the [SIG Creation procedure](https://github.com/kubernetes/community/blob/master/sig-governance.md#sig-creation-procedure) document on how to set up an initial account.
diff --git a/contributors/design-proposals/api-machinery/customresource-conversion-webhook.md b/contributors/design-proposals/api-machinery/customresource-conversion-webhook.md
new file mode 100644
index 00000000..37054763
--- /dev/null
+++ b/contributors/design-proposals/api-machinery/customresource-conversion-webhook.md
@@ -0,0 +1,859 @@
+# CRD Conversion Webhook
+
+Status: Approved
+
+Version: Alpha
+
+Implementation Owner: @mbohlool
+
+Authors: @mbohlool, @erictune
+
+Thanks: @dbsmith, @deads2k, @sttts, @liggit, @enisoc
+
+### Summary
+
+This document proposes a detailed plan for adding support for version-conversion of Kubernetes resources defined via Custom Resource Definitions (CRD). The API Server is extended to call out to a webhook at appropriate parts of the handler stack for CRDs.
+
+No new resources are added; the [CRD resource](https://github.com/kubernetes/kubernetes/blob/34383aa0a49ab916d74ea897cebc79ce0acfc9dd/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/types.go#L187) is extended to include conversion information as well as multiple schema definitions, one for each apiVersion that is to be served.
+
+
+## Definitions
+
+**Webhook Resource**: a Kubernetes resource (or portion of a resource) that informs the API Server that it should call out to a Webhook Host for certain operations.
+
+**Webhook Host**: a process / binary which accepts HTTP connections, intended to be called by the Kubernetes API Server as part of a Webhook.
+
+**Webhook**: In Kubernetes, refers to the idea of having the API server make an HTTP request to another service at a point in its request processing stack. Examples are [Authentication webhooks](https://kubernetes.io/docs/reference/access-authn-authz/webhook/) and [Admission Webhooks](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/). Usually refers to the system of Webhook Host and Webhook Resource together, but occasionally used to mean just Host or just Resource.
+
+**Conversion Webhook**: Webhook that can convert an object from one version to another.
+
+**Custom Resource**: In the context of this document, it refers to resources defined as Custom Resource Definition (in contrast with extension API server’s resources).
+
+**CRD Package**: CRD definition, plus associated associated controller deployment, RBAC roles, etc, which is released by a developer who uses CRDs to create new APIs.
+
+
+## Motivation
+
+Version conversion is, in our experience, the most requested improvement to CRDs. Prospective CRD users want to be certain they can evolve their API before they start down the path of developing a CRD + controller.
+
+
+## Requirements
+
+* As an existing author of a CRD, I can update my API's schema, without breaking existing clients. To that end, I can write a CRD(s) that supports one kind with two (or more) versions. Users of this API can access an object via either version (v1 or v2), and are accessing the same underlying storage (assuming that I have properly defined how to convert between v1 and v2.)
+
+* As a prospective user of CRDs, I don't know what schema changes I may need in the future, but I want to know that they will be possible before I chose CRDs (over EAS, or over a non-Kubernetes API).
+
+* As an author of a CRD Package, my users can upgrade to a new version of my package, and can downgrade to a prior version of my package (assuming that they follow proper upgrade and downgrade procedures; these should not require direct etcd access.)
+
+* As a user, I should be able to request CR in any supported version defined by CRD and get an object has been properly converted to the requested version (assuming the CRD Package Author has properly defined how to convert).
+
+* As an author of a CRD that does not use validation, I can still have different versions which undergo conversion.
+
+* As a user, when I request an object, and webhook-conversion fails, I get an error message that helps me understand the problem.
+
+* As an API machinery code maintainer, this change should not make the API machinery code harder to maintain
+
+* As a cluster owner, when I upgrade to the version of Kubernetes that supports CRD multiple versions, but I don't use the new feature, my existing CRDs work fine. I can roll back to the previous version without any special action.
+
+
+## Summary of Changes
+
+1. A CRD object now represents a group/kind with one or more versions.
+
+2. The CRD API (CustomResourceDefinitionSpec) is extended as follows:
+
+ 1. It has a place to register 1 webhook.
+
+ 2. it holds multiple "versions".
+
+ 3. Some fields which were part of the .spec are now per-version; namely Schema, Subresources, and AdditionalPrinterColumns.
+
+3. A Webhook Host is used to do conversion for a CRD.
+
+ 4. CRD authors will need to write a Webhook Host that accepts any version and returns any version.
+
+ 5. Toolkits like kube-builder and operator-sdk are expected to provide flows to assist users to generate Webhook Hosts.
+
+
+## Detailed Design
+
+
+### CRD API Changes
+
+The CustomResourceDefinitionSpec is extended to have a new section where webhooks are defined:
+
+```golang
+// CustomResourceDefinitionSpec describes how a user wants their resource to appear
+type CustomResourceDefinitionSpec struct {
+ Group string
+ Version string
+ Names CustomResourceDefinitionNames
+ Scope ResourceScope
+ // This optional and correspond to the first version in the versions list
+ Validation *CustomResourceValidation
+ // Optional, correspond to the first version in the versions list
+ Subresources *CustomResourceSubresources
+ Versions []CustomResourceDefinitionVersion
+ // Optional, and correspond to the first version in the versions list
+ AdditionalPrinterColumns []CustomResourceColumnDefinition
+
+ Conversion *CustomResourceConversion
+}
+
+type CustomResourceDefinitionVersion struct {
+ Name string
+ Served Boolean
+ Storage Boolean
+ // These three fields should not be set for first item in Versions list
+ Schema *JSONSchemaProp
+ Subresources *CustomResourceSubresources
+ AdditionalPrinterColumns []CustomResourceColumnDefinition
+}
+
+Type CustomResourceConversion struct {
+ // Conversion strategy, either "nop” or "webhook”. If webhook is set, Webhook field is required.
+ Strategy string
+
+ // Additional information for external conversion if strategy is set to external
+ // +optional
+ Webhook *CustomResourceConversionWebhook
+}
+
+type CustomResourceConversionWebhook {
+ // ClientConfig defines how to communicate with the webhook. This is the same config used for validating/mutating webhooks.
+ ClientConfig WebhookClientConfig
+}
+```
+
+### Defaulting
+
+In case that there is no versions list, a single version with values defaulted to top level version will be created. That means a single version with a name set to spec.version.
+All newly added per version fields (schema, additionalPrinterColumns or subresources) will be defaulted to the coresponding top level field except for the first version in the list that will remain empty.
+
+
+### Validation
+
+To keep backward compatibility, the top level fields (schema, additionalPrinterColumns or subresources) stay the same and source of truth for first (top) version. The first item in the versions list must not set any of those fields. The plan is to use unified version list for v1.
+
+
+### Support Level
+
+The feature will be alpha in the first implementation and will have a feature gate that is defaulted to false. The roll-back story with a feature gate is much more clear. if we have the features as alpha in kubernetes release Y (>X where the feature is missing) and we make it beta in kubernetes release Z, it is not safe to use the feature and downgrade from Y to X but the feature is alpha in Y which is fine. It is safe to downgrade from Z to Y (given that we enable the feature gate in Y) and that is desirable as the feature is beta in Z.
+
+
+### Rollback
+
+Users that need to rollback to version X (but may currently be running version Y > X) of apiserver should not use CRD Webhook Conversion if X is not a version that supports these features. If a user were to create a CRD that uses CRD Webhook Conversion and then rolls back to version X that does not support conversion then the following would happen:
+
+1. The stored custom resources in etcd will not be deleted.
+
+2. Any clients that try to get the custom resources will get a 500 (internal server error). this is distinguishable from a deleted object for get and the list operation will also fail. That means the CRD is not served at all and Clients that try to garbage collect related resources to missing CRs should be aware of this.
+
+3. Any client (e.g. controller) that tries to list the resource (in preparation for watching it) will get a 500 (this is distinguishable from an empty list or a 404).
+
+4. If the user rolls forward again, then custom resources will be served again.
+
+If a user does not use the webhook feature but uses the versioned schema, additionalPrinterColumns, and/or subresources and rollback to a version that does not support them per version, any value set per version will be ignored and only values in top level spec.* will be honor.
+
+Please note that any of the fields added in this design that is not supported in previous kubernetes releases can be removed on an update operation (e.g. status update). The kubernetes release where defined the types but gate them with an alpha feature gate, however, can keep these fields but ignore there value.
+
+### Webhook Request/Response
+
+The Conversion request and response would be similar to [Admission webhooks](https://github.com/kubernetes/kubernetes/blob/951962512b9cfe15b25e9c715a5f33f088854f97/staging/src/k8s.io/api/admission/v1beta1/types.go#L29). The AdmissionReview seems to be redundant but used by other Webhook APIs and added here for consistency.
+
+```golang
+// ConversionReview describes a conversion request/response.
+type ConversionReview struct {
+ metav1.TypeMeta
+ // Request describes the attributes for the conversion request.
+ // +optional
+ Request *ConversionRequest
+ // Response describes the attributes for the conversion response.
+ // +optional
+ Response *ConversionResponse
+}
+
+type ConversionRequest struct {
+ // UID is an identifier for the individual request/response. Useful for logging.
+ UID types.UID
+ // The version to convert given object to. E.g. "stable.example.com/v1"
+ APIVersion string
+ // Object is the CRD object to be converted.
+ Object runtime.RawExtension
+}
+
+type ConversionResponse struct {
+ // UID is an identifier for the individual request/response.
+ // This should be copied over from the corresponding ConversionRequest.
+ UID types.UID
+ // ConvertedObject is the converted version of request.Object.
+ ConvertedObject runtime.RawExtension
+}
+```
+
+If the conversion is failed, the webhook should fail the HTTP request with a proper error code and message that will be used to create a status error for the original API caller.
+
+
+### Monitorability
+
+There should be prometheus variables to show:
+
+* CRD conversion latency
+ * Overall
+ * By webhook name
+ * By request (sum of all conversions in a request)
+ * By CRD
+* Conversion Failures count
+ * Overall
+ * By webhook name
+ * By CRD
+* Timeout failures count
+ * Overall
+ * By webhook name
+ * By CRD
+
+Adding a webhook dynamically adds a key to a map-valued prometheus metric. Webhook host process authors should consider how to make their webhook host monitorable: while eventually we hope to offer a set of best practices around this, for the initial release we won’t have requirements here.
+
+
+### Error Messages
+
+When a conversion webhook fails, e.g. for the GET operation, then the error message from the apiserver to its client should reflect that conversion failed and include additional information to help debug the problem. The error message and HTTP error code returned by the webhook should be included in the error message API server returns to the user. For example:
+
+```bash
+$ kubectl get mykind somename
+error on server: conversion from stored version v1 to requested version v2 for somename: "408 request timeout" while calling service "mywebhookhost.somens.cluster.local:443"
+```
+
+
+For operations that need more than one conversion (e.g. LIST), no partial result will be returned. Instead the whole operation will fail the same way with detailed error messages. To help debugging these kind of operations, the UID of the first failing conversion will also be included in the error message.
+
+
+### Caching
+
+No new caching is planned as part of this work, but the API Server may in the future cache webhook POST responses.
+
+Most API operations are reads. The most common kind of read is a watch. All watched objects are cached in memory. For CRDs, the cache
+is per version. That is the result of having one [REST store object](https://github.com/kubernetes/kubernetes/blob/3cb771a8662ae7d1f79580e0ea9861fd6ab4ecc0/staging/src/k8s.io/apiextensions-apiserver/pkg/registry/customresource/etcd.go#L72) per version which
+was an arbitrary design choice but would be required for better caching with webhook conversion. In this model, each GVK is cached, regardless of whether some GVKs share storage. Thus, watches do not cause conversion. So, conversion webhooks will not add overhead to the watch path. Watch cache is per api server and eventually consistent.
+
+Non-watch reads are also cached (if requested resourceVersion is 0 which is true for generated informers by default, but not for calls like `kubectl get ...`, namespace cleanup, etc). The cached objects are converted and per version (TODO: fact check). So, conversion webhooks will not add overhead here too.
+
+If in the future this proves to be a performance problem, we might need to add caching later. The Authorization and Authentication webhooks already use a simple scheme with APIserver-side caching and a single TTL for expiration. This has worked fine, so we can repeat this process. It does not require Webhook hosts to be aware of the caching.
+
+
+## Examples
+
+
+### Example of Writing Conversion Webhook
+
+Data model for v1:
+
+|data model for v1|
+|-----------------|
+```yaml
+properties:
+ spec:
+ properties:
+ cronSpec:
+ type: string
+ image:
+ type: string
+```
+
+|data model for v2|
+|-----------------|
+```yaml
+properties:
+ spec:
+ properties:
+ min:
+ type: string
+ hour:
+ type: string
+ dayOfMonth:
+ type: string
+ month:
+ type: string
+ dayOfWeek:
+ type: string
+ image:
+ type: string
+```
+
+
+Both schemas can hold the same data (assuming the string format for V1 was a valid format).
+
+|crontab_conversion.go|
+|---------------------|
+
+```golang
+import .../types/v1
+import .../types/v2
+
+// Actual conversion methods
+
+func convertCronV1toV2(cronV1 *v1.Crontab) (*v2.Crontab, error) {
+ items := strings.Split(cronV1.spec.cronSpec, " ")
+ if len(items) != 5 {
+ return nil, fmt.Errorf("invalid spec string, needs five parts: %s", cronV1.spec.cronSpec)
+ }
+ return &v2.Crontab{
+ ObjectMeta: cronV1.ObjectMeta,
+ TypeMeta: metav1.TypeMeta{
+ APIVersion: "stable.example.com/v2",
+ Kind: cronV1.Kind,
+ },
+ spec: v2.CrontabSpec{
+ image: cronV1.spec.image,
+ min: items[0],
+ hour: items[1],
+ dayOfMonth: items[2],
+ month: items[3],
+ dayOfWeek: items[4],
+ },
+ }, nil
+
+}
+
+func convertCronV2toV1(cronV2 *v2.Crontab) (*v1.Crontab, error) {
+ cronspec := cronV2.spec.min + " "
+ cronspec += cronV2.spec.hour + " "
+ cronspec += cronV2.spec.dayOfMonth + " "
+ cronspec += cronV2.spec.month + " "
+ cronspec += cronV2.spec.dayOfWeek
+ return &v1.Crontab{
+ ObjectMeta: cronV2.ObjectMeta,
+ TypeMeta: metav1.TypeMeta{
+ APIVersion: "stable.example.com/v1",
+ Kind: cronV2.Kind,
+ },
+ spec: v1.CrontabSpec{
+ image: cronV2.spec.image,
+ cronSpec: cronspec,
+ },
+ }, nil
+}
+
+// The rest of the file can go into an auto generated framework
+
+func serveCronTabConversion(w http.ResponseWriter, r *http.Request) {
+ request, err := readConversionRequest(r)
+ if err != nil {
+ reportError(w, err)
+ }
+ response := ConversionResponse{}
+ response.UID = request.UID
+ converted, err := convert(request.Object, request.APIVersion)
+ if err != nil {
+ reportError(w, err)
+ }
+ response.ConvertedObject = *converted
+ writeConversionResponse(w, response)
+}
+
+func convert(in runtime.RawExtension, version string) (*runtime.RawExtension, error) {
+ inApiVersion, err := extractAPIVersion(in)
+ if err != nil {
+ return nil, err
+ }
+ switch inApiVersion {
+ case "stable.example.com/v1":
+ var cronV1 v1Crontab
+ if err := json.Unmarshal(in.Raw, &cronV1); err != nil {
+ return nil, err
+ }
+ switch version {
+ case "stable.example.com/v1":
+ // This should not happened as API server will not call the webhook in this case
+ return &in, nil
+ case "stable.example.com/v2":
+ cronV2, err := convertCronV1toV2(&cronV1)
+ if err != nil {
+ return nil, err
+ }
+ raw, err := json.Marshal(cronV2)
+ if err != nil {
+ return nil, err
+ }
+ return &runtime.RawExtension{Raw: raw}, nil
+ }
+ case "stable.example.com/v2":
+ var cronV2 v2Crontab
+ if err := json.Unmarshal(in.Raw, &cronV2); err != nil {
+ return nil, err
+ }
+ switch version {
+ case "stable.example.com/v2":
+ // This should not happened as API server will not call the webhook in this case
+ return &in, nil
+ case "stable.example.com/v1":
+ cronV1, err := convertCronV2toV1(&cronV2)
+ if err != nil {
+ return nil, err
+ }
+ raw, err := json.Marshal(cronV1)
+ if err != nil {
+ return nil, err
+ }
+ return &runtime.RawExtension{Raw: raw}, nil
+ }
+ default:
+ return nil, fmt.Errorf("invalid conversion fromVersion requested: %s", inApiVersion)
+ }
+ return nil, fmt.Errorf("invalid conversion toVersion requested: %s", version)
+}
+
+func extractAPIVersion(in runtime.RawExtension) (string, error) {
+ object := unstructured.Unstructured{}
+ if err := object.UnmarshalJSON(in.Raw); err != nil {
+ return "", err
+ }
+ return object.GetAPIVersion(), nil
+}
+```
+
+Note: not all code is shown for running a web server.
+
+Note: some of this is boilerplate that we expect tools like Kubebuilder will handle for the user.
+
+Also some appropriate tests, most importantly round trip test:
+
+|crontab_conversion_test.go|
+|-|
+
+```golang
+func TestRoundTripFromV1ToV2(t *testing.T) {
+ testObj := v1.Crontab{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "my-new-cron-object",
+ },
+ TypeMeta: metav1.TypeMeta{
+ APIVersion: "stable.example.com/v1",
+ Kind: "CronTab",
+ },
+ spec: v1.CrontabSpec{
+ image: "my-awesome-cron-image",
+ cronSpec: "* * * * */5",
+ },
+ }
+ testRoundTripFromV1(t, testObj)
+}
+
+func testRoundTripFromV1(t *testing.T, v1Object v1.CronTab) {
+ v2Object, err := convertCronV1toV2(v1Object)
+ if err != nil {
+ t.Fatalf("failed to convert v1 crontab to v2: %v", err)
+ }
+ v1Object2, err := convertCronV2toV1(v2Object)
+ if err != nil {
+ t.Fatalf("failed to convert v2 crontab to v1: %v", err)
+ }
+ if !reflect.DeepEqual(v1Object, v1Object2) {
+ t.Errorf("round tripping failed for v1 crontab. v1Object: %v, v2Object: %v, v1ObjectConverted: %v",
+ v1Object, v2Object, v1Object2)
+ }
+}
+```
+
+## Example of Updating CRD from one to two versions
+
+This example uses some files from previous section.
+
+**Step 1**: Start from a CRD with only one version
+
+|crd1.yaml|
+|-|
+
+```yaml
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: crontabs.stable.example.com
+spec:
+ group: stable.example.com
+ versions:
+ - name: v1
+ served: true
+ storage: true
+ schema:
+ properties:
+ spec:
+ properties:
+ cronSpec:
+ type: string
+ image:
+ type: string
+ scope: Namespaced
+ names:
+ plural: crontabs
+ singular: crontab
+ kind: CronTab
+ shortNames:
+ - ct
+```
+
+And create it:
+
+```bash
+Kubectl create -f crd1.yaml
+```
+
+(If you have an existing CRD installed prior to the version of Kubernetes that supports the "versions" field, then you may need to move version field to a single item in the list of versions or just try to touch the CRD after upgrading to the new Kubernetes version which will result in the versions list being defaulted to a single item equal to the top level spec values)
+
+**Step 2**: Create a CR within that one version:
+
+|cr1.yaml|
+|-|
+```yaml
+
+apiVersion: "stable.example.com/v1"
+kind: CronTab
+metadata:
+ name: my-new-cron-object
+spec:
+ cronSpec: "* * * * */5"
+ image: my-awesome-cron-image
+```
+
+And create it:
+
+```bash
+Kubectl create -f cr1.yaml
+```
+
+**Step 3**: Decide to introduce a new version of the API.
+
+**Step 3a**: Write a new OpenAPI data model for the new version (see previous section). Use of a data model is not required, but it is recommended.
+
+**Step 3b**: Write conversion webhook and deploy it as a service named `crontab_conversion`
+
+See the "crontab_conversion.go" file in the previous section.
+
+**Step 3c**: Update the CRD to add the second version.
+
+Do this by adding a new item to the "versions" list, containing the new data model:
+
+|crd2.yaml|
+|-|
+```yaml
+
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: crontabs.stable.example.com
+spec:
+ group: stable.example.com
+ versions:
+ - name: v1
+ served: true
+ storage: false
+ schema:
+ properties:
+ spec:
+ properties:
+ cronSpec:
+ type: string
+ image:
+ type: string
+ - name: v2
+ served: true
+ storage: true
+ schema:
+ properties:
+ spec:
+ properties:
+ min:
+ type: string
+ hour:
+ type: string
+ dayOfMonth:
+ type: string
+ month:
+ type: string
+ dayOfWeek:
+ type: string
+ image:
+ type: string
+ scope: Namespaced
+ names:
+ plural: crontabs
+ singular: crontab
+ kind: CronTab
+ shortNames:
+ - ct
+ conversion:
+ strategy: external
+ webhook:
+ client_config:
+ namespace: crontab
+ service: crontab_conversion
+ Path: /crontab_convert
+```
+
+And apply it:
+
+```bash
+Kubectl apply -f crd2.yaml
+```
+
+**Step 4**: add a new CR in v2:
+
+|cr2.yaml|
+|-|
+```yaml
+
+apiVersion: "stable.example.com/v2"
+kind: CronTab
+metadata:
+ name: my-second-cron-object
+spec:
+ min: "*"
+ hour: "*"
+ day_of_month: "*"
+ dayOfWeek: "*/5"
+ month: "*"
+ image: my-awesome-cron-image
+```
+
+And create it:
+
+```bash
+Kubectl create -f cr2.yaml
+```
+
+**Step 5**: storage now has two custom resources in two different versions. To downgrade to previous CRD, one can apply crd1.yaml but that will fail as the status.storedVersions has both v1 and v2 and those cannot be removed from the spec.versions list. To downgrade, first create a crd2-b.yaml file that sets v1 as storage version and apply it, then follow "*Upgrade existing objects to a new stored version*“ in [this document](https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definition-versioning/). After all CRs in the storage has v1 version, you can apply crd1.yaml.
+
+**Step 5 alternative**: create a crd1-b.yaml that has v2 but not served.
+
+|crd1-b.yaml|
+|-|
+```yaml
+
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: crontabs.stable.example.com
+spec:
+ group: stable.example.com
+ versions:
+ - name: v1
+ served: true
+ storage: true
+ schema:
+ properties:
+ spec:
+ properties:
+ cronSpec:
+ type: string
+ image:
+ type: string
+ - name: v2
+ served: false
+ storage: false
+ scope: Namespaced
+ names:
+ plural: crontabs
+ singular: crontab
+ kind: CronTab
+ shortNames:
+ - ct
+ conversion:
+ strategy: external
+ webhook:
+ client_config:
+ namespace: crontab
+ service: crontab_conversion
+ Path: /crontab_convert
+```
+
+## Alternatives Considered
+
+Other than webhook conversion, a declarative conversion also considered and discussed. The main operator that being discussed was Rename/Move. This section explains why Webhooks are chosen over declarative conversion. This does not mean the declarative approach will not be supported by the webhook would be first conversion method kubernetes supports.
+
+### Webhooks vs Declarative
+
+The table below compares webhook vs declarative in details.
+
+<table>
+ <tr>
+ <td></td>
+ <td>Webhook</td>
+ <td>Declarative</td>
+ </tr>
+ <tr>
+ <td>1. Limitatisons</td>
+ <td>There is no limitation on the type of conversion CRD author can do.</td>
+ <td>Very limited set of conversions will be provided.</td>
+ </tr>
+ <tr>
+ <td>2. User Complexity</td>
+ <td>Harder to implement and the author needs to run an http server. This can be made simpler using tools such as kube-builder.</td>
+ <td>Easy to use as they are in yaml configuration file.</td>
+ </tr>
+ <tr>
+ <td>3. Design Complexity</td>
+ <td>Because the API server calls into an external webhook, there is no need to design a specific conversions.</td>
+ <td>Designing of declarative conversions can be tricky, especially if they are changing the value of fields. Challenges are: Meeting the round-trip-ability requirement, arguing the usefulness of the operator and keeping it simple enough for a declarative system.</td>
+ </tr>
+ <tr>
+ <td>4. Performance</td>
+ <td>Several calls to webhook for one operation (e.g. Apply) might hit performance issues. A monitoring metric helps measure this for later improvements that can be done through batch conversion.</td>
+ <td>Implemented in API Server directly thus there is no performance concerns.</td>
+ </tr>
+ <tr>
+ <td>5. User mistakes</td>
+ <td>Users have freedom to implement any kind of conversion which may not conform with our API convention (e.g. round-tripability. If the conversion is not revertible, old clients may fail and downgrade will also be at risk).</td>
+ <td>Keeping the conversion operators sane and sound would not be user’s problem. For things like rename/move there is already a design that keeps round-tripp-ability but that could be tricky for other operations.</td>
+ </tr>
+ <tr>
+ <td>6. Popularity</td>
+ <td>Because of the freedom in conversion of webhooks, they probably would be more popular</td>
+ <td>Limited set of declarative operators make it a safer but less popular choice at least in the early stages of CRD development</td>
+ </tr>
+ <tr>
+ <td>7. CRD Development Cycles</td>
+ <td>Fit well into the story of CRD development of starting with blob store CRDs, then add Schema, then Add webhook conversions for the freedom of conversion the move as much possible to declarative for safer production.</td>
+ <td>Comes after Webhooks in the development cycles of CRDs</td>
+ </tr>
+</table>
+
+
+Webhook conversion has less limitation for the authors of APIs using CRD which is desirable especially in the early stages of development. Although there is a chance of user mistakes and also it may look more complex to implement a webhook, those can be relieved using sets of good tools/libraries such as kube-builder. Overall, Webhook conversion is the clear winner here. Declarative approach may be considered at a later stage as an alternative but need to be carefully designed.
+
+
+### Caching
+
+* use HTTP caching conventions with Cache-Control, Etags, and a unique URL for each different request). This requires more complexity for the webhook author. This change could be considered as part of an update to all 5 or so kinds of webhooks, but not justified for just this one kind of webhook.
+
+* The CRD object could have a "conversionWebhookVersion" field which the user can increment/change when upgrading/downgrading the webhook to force invalidation of cached objects.
+
+
+## Advice to Users
+
+* A proper webhook host implementation should accept every supported version as input and as output version.
+
+* It should also be able to round trip between versions. E.g. converting an object from v1 to v2 and back to v1 should yield the same object.
+
+* Consider testing your conversion webhook with a fuzz tester that generates random valid objects.
+
+* The webhook should always give the same response with the same request that allows API server to potentially cache the responses in future (modulo bug fixes; when an update is pushed that fixes a bug in the conversion operation it might not take effect for a few minutes.
+
+* If you need to add a new field, just add it. You don't need new schema to add a field.
+
+* Webhook Hosts should be side-effect free.
+
+* Webhook Hosts should not expect to see every conversion operation. Some may be cached in the future.
+
+* Toolkits like KubeBuilder and OperatorKit may assist users in using this new feature by:
+
+ * having a place in their file hierarchy to define multiple schemas for the same kind.
+
+ * having a place in their code templates to define a conversion function.
+
+ * generating a full Webhook Host from a conversion function.
+
+helping users create tests by writing directories containing sample yamls of an object in various versions.
+
+ * using fuzzing to generate random valid objects and checking if they convert.
+
+## Test and Documentation Plan
+
+* Test the upgrade/rollback scenario below.
+
+* Test conversion, refer to the test case section.
+
+* Document CRD conversion and best practices for webhook conversion
+
+* Document to CRD users how to upgrade and downgrade (changing storage version dance, and changes to CRD stored tags).
+
+### Upgrade/Rollback Scenarios
+
+Scenario 1: Upgrading an Operator to have more versions.
+
+* Detect if the cluster version supports webhook conversion
+
+ * Helm chart can require e.g. v1.12 of a Kubernetes API Server.
+
+Scenario 2: Rolling back to a previous version of API Server that does not support CRD Conversions
+
+* I have a cluster
+
+ * I use apiserver v1.11.x, which supports multiple no-conversion-versions of a CRD
+
+* I start to use CRDs
+
+ * I install helm chart "Foo-Operator", which installs a CRD for resource Foo, with 1 version called v1beta1.
+
+ * This uses the old "version" and "
+
+ * I create some Foo resources.
+
+* I upgrade apiserver to v1.12.x
+
+ * version-conversion now supported.
+
+* I upgrade the Foo-Operator chart.
+
+ * This changes the CRD to have two versions, v1beta1 and v1beta2.
+
+ * It installs a Webhook Host to convert them.
+
+ * Assume: v1beta1 is still the storage version.
+
+* I start using multiple versions, so that the CRs are now stored in a mix of versions.
+
+* I downgrade kube-apiserver
+
+ * Emergency happens, I need to downgrade to v1.11.x. Conversion won't be possible anymore.
+
+ * Downgrade
+
+ * Any call needs conversion should fail at this stage (we need to patch 1.11 for this, see issue [#65790](https://github.com/kubernetes/kubernetes/issues/65790)
+
+### Test Cases
+
+* Updating existing CRD to use multiple versions with conversion
+
+ * Define a CRD with one version.
+
+ * Create stored CRs.
+
+ * Update the CRD object to add another (non-storage) version with a conversion webhook
+
+ * Existing CRs are not harmed
+
+ * Can get existing CRs via new api, conversion webhook should be called
+
+ * Can create new CRs with new api, conversion webhook should be called
+
+ * Access new CRs with new api, conversion webhook should not be called
+
+ * Access new CRs with old api, conversion webhook should be called
+
+## Development Plan
+
+Google able to staff development, test, review, and documentation. Help welcome, too, esp. Reviewing.
+
+Not in scope for this work:
+
+* Including CRDs to aggregated OpenAPI spec (fka swagger.json).
+
+* Apply for CRDs
+
+* Make CRDs powerful enough to convert any or all core types to CRDs (in line with that goal, but this is just a step towards it).
+
+### Work items
+
+* Add APIs for conversion webhooks in CustomResourceDefinition type.
+
+* Support multi-version (used to be called validation) Schema
+
+* Support multi-version subresources and AdditionalPrintColumns
+
+* Add a Webhook converter call as a CRD converter (refactor conversion code as needed)
+
+* Ensure able to monitor latency from webhooks. See Monitorability section
+
+* Add Upgrade/Downgrade tests
+
+* Add public documentation
diff --git a/contributors/design-proposals/storage/attacher-detacher-refactor-for-local-storage.md b/contributors/design-proposals/storage/attacher-detacher-refactor-for-local-storage.md
new file mode 100644
index 00000000..0833aa0a
--- /dev/null
+++ b/contributors/design-proposals/storage/attacher-detacher-refactor-for-local-storage.md
@@ -0,0 +1,281 @@
+---
+
+title: Attacher/Detacher refactor for local storage
+
+authors:
+- "@NickrenREN"
+
+owning-sig: sig-storage
+
+participating-sigs:
+ - nil
+
+reviewers:
+ - "@msau42"
+ - "@jsafrane"
+
+approvers:
+ - "@jsafrane"
+ - "@msau42"
+ - "@saad-ali"
+
+editor: TBD
+
+creation-date: 2018-07-30
+
+last-updated: 2018-07-30
+
+status: provisional
+
+---
+
+## Table of Contents
+ * [Table of Contents](#table-of-contents)
+* [Summary](#summary)
+* [Motivation](#motivation)
+ * [Goals](#goals)
+ * [Non-Goals](#non-goals)
+* [Proposal](#proposal)
+* [Implementation](#implementation)
+ * [Volume plugin interface change](#volume-plugin-interface-change)
+ * [MountVolume/UnmountDevice generation function change](#MountVolume/UnmountDevice-generation-function-change)
+ * [Volume plugin change](#volume-plugin-change)
+* [Future](#future)
+
+## Summary
+
+Today, the workflow for a volume to be used by pod is:
+
+- attach a remote volume to the node instance (if it is attachable)
+- wait for the volume to be attached (if it is attachable)
+- mount the device to a global path (if it is attachable)
+- mount the global path to a pod directory
+
+It is ok for remote block storage plugins which have a remote attach api,such as `GCE PD`, `AWS EBS`
+and remote fs storage plugins such as `NFS`, and `Cephfs`.
+
+But it is not so good for plugins which need local attach such as `fc`, `iscsi` and `RBD`.
+
+It is not so good for local storage neither which is not attachable but needs `MountDevice`
+
+
+## Motivation
+
+### Goals
+
+ Update Attacher/Detacher interfaces for local storage
+
+### Non-Goals
+
+ Update `fc`, `iscsi` and `RBD` implementation according to the new interfaces
+
+## Proposal
+
+Here we propose to only update the Attacher/Detacher interfaces for local storage.
+We may expand it in future to `iscsi`, `RBD` and `fc`, if we figure out how to prevent multiple local attach without implementing attacher interface.
+
+## Implementation
+
+### Volume plugin interface change
+
+We can create a new interface `DeviceMounter`, move `GetDeviceMountPath` and `MountDevice` from `Attacher`to it.
+
+We can put `DeviceMounter` in `Attacher` which means any one who implements the `Attacher` interface must implement `DeviceMounter`.
+
+```
+// Attacher can attach a volume to a node.
+type Attacher interface {
+ DeviceMounter
+
+ // Attaches the volume specified by the given spec to the node with the given Name.
+ // On success, returns the device path where the device was attached on the
+ // node.
+ Attach(spec *Spec, nodeName types.NodeName) (string, error)
+
+ // VolumesAreAttached checks whether the list of volumes still attached to the specified
+ // node. It returns a map which maps from the volume spec to the checking result.
+ // If an error is occurred during checking, the error will be returned
+ VolumesAreAttached(specs []*Spec, nodeName types.NodeName) (map[*Spec]bool, error)
+
+ // WaitForAttach blocks until the device is attached to this
+ // node. If it successfully attaches, the path to the device
+ // is returned. Otherwise, if the device does not attach after
+ // the given timeout period, an error will be returned.
+ WaitForAttach(spec *Spec, devicePath string, pod *v1.Pod, timeout time.Duration) (string, error)
+}
+
+// DeviceMounter can mount a block volume to a global path.
+type DeviceMounter interface {
+ // GetDeviceMountPath returns a path where the device should
+ // be mounted after it is attached. This is a global mount
+ // point which should be bind mounted for individual volumes.
+ GetDeviceMountPath(spec *Spec) (string, error)
+
+ // MountDevice mounts the disk to a global path which
+ // individual pods can then bind mount
+ // Note that devicePath can be empty if the volume plugin does not implement any of Attach and WaitForAttach methods.
+ MountDevice(spec *Spec, devicePath string, deviceMountPath string) error
+}
+
+```
+
+Note: we also need to make sure that if our plugin implements the `DeviceMounter` interface,
+then executing mount operation from multiple pods referencing the same volume in parallel should be avoided,
+even if it does not implement the `Attacher` interface.
+
+Since `NestedPendingOperations` can achieve this by setting the same volumeName and same or empty podName in one operation,
+we just need to add another check in `MountVolume`: check if the volume is DeviceMountable.
+
+We also need to create another new interface `DeviceUmounter`, and move `UnmountDevice` to it.
+```
+// Detacher can detach a volume from a node.
+type Detacher interface {
+ DeviceUnmounter
+
+ // Detach the given volume from the node with the given Name.
+ // volumeName is name of the volume as returned from plugin's
+ // GetVolumeName().
+ Detach(volumeName string, nodeName types.NodeName) error
+}
+
+// DeviceUnmounter can unmount a block volume from the global path.
+type DeviceUnmounter interface {
+ // UnmountDevice unmounts the global mount of the disk. This
+ // should only be called once all bind mounts have been
+ // unmounted.
+ UnmountDevice(deviceMountPath string) error
+}
+```
+Accordingly, we need to create a new interface `DeviceMountableVolumePlugin` and move `GetDeviceMountRefs` to it.
+```
+// AttachableVolumePlugin is an extended interface of VolumePlugin and is used for volumes that require attachment
+// to a node before mounting.
+type AttachableVolumePlugin interface {
+ DeviceMountableVolumePlugin
+ NewAttacher() (Attacher, error)
+ NewDetacher() (Detacher, error)
+}
+
+// DeviceMountableVolumePlugin is an extended interface of VolumePlugin and is used
+// for volumes that requires mount device to a node before binding to volume to pod.
+type DeviceMountableVolumePlugin interface {
+ VolumePlugin
+ NewDeviceMounter() (DeviceMounter, error)
+ NewDeviceUmounter() (DeviceUmounter, error)
+ GetDeviceMountRefs(deviceMountPath string) ([]string, error)
+}
+```
+
+### MountVolume/UnmountDevice generation function change
+
+Currently we will check if the volume plugin is attachable in `GenerateMountVolumeFunc`, if it is, we need to call `WaitForAttach` ,`GetDeviceMountPath` and `MountDevice` first, and then set up the volume.
+
+After the refactor, we can split that into three sections: check if volume is attachable, check if it is deviceMountable and set up the volume.
+```
+devicePath := volumeToMount.DevicePath
+if volumeAttacher != nil {
+ devicePath, err = volumeAttacher.WaitForAttach(
+ volumeToMount.VolumeSpec, devicePath, volumeToMount.Pod, waitForAttachTimeout)
+ if err != nil {
+ // On failure, return error. Caller will log and retry.
+ return volumeToMount.GenerateError("MountVolume.WaitForAttach failed", err)
+ }
+ // Write the attached device path back to volumeToMount, which can be used for MountDevice.
+ volumeToMount.DevicePath = devicePath
+}
+
+if volumeDeviceMounter != nil {
+ deviceMountPath, err :=
+ volumeDeviceMounter.GetDeviceMountPath(volumeToMount.VolumeSpec)
+ if err != nil {
+ // On failure, return error. Caller will log and retry.
+ return volumeToMount.GenerateError("MountVolume.GetDeviceMountPath failed", err)
+ }
+ deviceMountPath, err := volumeDeviceMounter.MountDevice(volumeToMount.VolumeSpec, devicePath, deviceMountPath)
+ if err != nil {
+ // On failure, return error. Caller will log and retry.
+ return volumeToMount.GenerateError("MountVolume.MountDevice failed", err)
+ }
+
+ glog.Infof(volumeToMount.GenerateMsgDetailed("MountVolume.MountDevice succeeded", fmt.Sprintf("device mount path %q", deviceMountPath)))
+
+ // Update actual state of world to reflect volume is globally mounted
+ markDeviceMountedErr := actualStateOfWorld.MarkDeviceAsMounted(
+ volumeToMount.VolumeName)
+ if markDeviceMountedErr != nil {
+ // On failure, return error. Caller will log and retry.
+ return volumeToMount.GenerateError("MountVolume.MarkDeviceAsMounted failed", markDeviceMountedErr)
+ }
+}
+```
+Note that since local storage plugin will not implement the Attacher interface, we can get the device path directly from `spec.PersistentVolume.Spec.Local.Path` when we run `MountDevice`
+
+The device unmounting operation will be executed in `GenerateUnmountDeviceFunc`, we can update the device unmounting generation function as below:
+```
+// Get DeviceMounter plugin
+deviceMountableVolumePlugin, err :=
+ og.volumePluginMgr.FindDeviceMountablePluginByName(deviceToDetach.PluginName)
+if err != nil || deviceMountableVolumePlugin == nil {
+ return volumetypes.GeneratedOperations{}, deviceToDetach.GenerateErrorDetailed("UnmountDevice.FindDeviceMountablePluginByName failed", err)
+}
+
+volumeDeviceUmounter, err := deviceMountablePlugin.NewDeviceUmounter()
+if err != nil {
+ return volumetypes.GeneratedOperations{}, deviceToDetach.GenerateErrorDetailed("UnmountDevice.NewDeviceUmounter failed", err)
+}
+
+volumeDeviceMounter, err := deviceMountableVolumePlugin.NewDeviceMounter()
+if err != nil {
+ return volumetypes.GeneratedOperations{}, deviceToDetach.GenerateErrorDetailed("UnmountDevice.NewDeviceMounter failed", err)
+}
+
+unmountDeviceFunc := func() (error, error) {
+ deviceMountPath, err :=
+ volumeDeviceMounter.GetDeviceMountPath(deviceToDetach.VolumeSpec)
+ if err != nil {
+ // On failure, return error. Caller will log and retry.
+ return deviceToDetach.GenerateError("GetDeviceMountPath failed", err)
+ }
+ refs, err := deviceMountablePlugin.GetDeviceMountRefs(deviceMountPath)
+
+ if err != nil || mount.HasMountRefs(deviceMountPath, refs) {
+ if err == nil {
+ err = fmt.Errorf("The device mount path %q is still mounted by other references %v", deviceMountPath, refs)
+ }
+ return deviceToDetach.GenerateError("GetDeviceMountRefs check failed", err)
+ }
+ // Execute unmount
+ unmountDeviceErr := volumeDeviceUmounter.UnmountDevice(deviceMountPath)
+ if unmountDeviceErr != nil {
+ // On failure, return error. Caller will log and retry.
+ return deviceToDetach.GenerateError("UnmountDevice failed", unmountDeviceErr)
+ }
+ // Before logging that UnmountDevice succeeded and moving on,
+ // use mounter.PathIsDevice to check if the path is a device,
+ // if so use mounter.DeviceOpened to check if the device is in use anywhere
+ // else on the system. Retry if it returns true.
+ deviceOpened, deviceOpenedErr := isDeviceOpened(deviceToDetach, mounter)
+ if deviceOpenedErr != nil {
+ return nil, deviceOpenedErr
+ }
+ // The device is still in use elsewhere. Caller will log and retry.
+ if deviceOpened {
+ return deviceToDetach.GenerateError(
+ "UnmountDevice failed",
+ fmt.Errorf("the device is in use when it was no longer expected to be in use"))
+ }
+
+ ...
+
+ return nil, nil
+ }
+
+```
+
+### Volume plugin change
+
+We need to olny implement the DeviceMounter/DeviceUnmounter interface for local storage since it is not attachable.
+And we can keep `fc`,`iscsi` and `RBD` unchanged at the first stage.
+
+## Future
+Update `iscsi`, `RBD` and `fc` volume plugins accordingly, if we figure out how to prevent multiple local attach without implementing attacher interface.
diff --git a/contributors/design-proposals/storage/volume-topology-scheduling.md b/contributors/design-proposals/storage/volume-topology-scheduling.md
index 931ef59c..e685d860 100644
--- a/contributors/design-proposals/storage/volume-topology-scheduling.md
+++ b/contributors/design-proposals/storage/volume-topology-scheduling.md
@@ -862,6 +862,10 @@ processing later in the priority and bind functions.
temporarily cache this decision in the PVC per Node.
8. Otherwise return false.
+Note that we should consider all the cases which may affect predicate cached
+results of CheckVolumeBinding and other scheduler predicates, this will be
+explained later.
+
#### Priority
After all the predicates run, there is a reduced set of Nodes that can fit a
Pod. A new priority function will rank the remaining nodes based on the
@@ -971,8 +975,9 @@ topology-unaware, and we need to make one more pass in the scheduler after all
the PVCs are bound.
This predicate needs to remain in the default scheduler to handle the
-already-bound volumes using the old zonal labeling. It can be removed once that
-mechanism is deprecated and unsupported.
+already-bound volumes using the old zonal labeling, but must be updated to skip
+unbound PVC if StorageClass binding mode is WaitForFirstConsumer. It can be
+removed once that mechanism is deprecated and unsupported.
##### Volume Node Predicate
This is a new predicate added in 1.7 to handle the new PV node affinity. It
@@ -1009,6 +1014,85 @@ assume functions.
* Caching PVC dynamic provisioning decisions per node that the predicate had
made.
+#### Event handling
+
+##### Move pods into active queue
+When a pod is tried and determined to be unschedulable, it will be placed in
+the unschedulable queue by scheduler. It will not be scheduled until being
+moved to active queue. For volume topology scheduling, we need to move
+pods to active queue in following scenarios:
+
+- on PVC add
+
+ Pod which references nonexistent PVCs is unschedulable for now, we need to
+ move pods to active queue when a PVC is added.
+
+- on PVC update
+
+ The proposed design has the scheduler initiating the binding transaction by
+ prebinding the PV and waiting for PV controller to finish binding and put it
+ back in the schedule queue. To achieve this, we need to move pods to active
+ queue on PVC update.
+
+- on PV add
+
+ Pods created when there are no PVs available will be stuck in unschedulable
+ queue. But unbound PVs created for static provisioning and delay binding
+ storage class are skipped in PV controller dynamic provisioning and binding
+ process, will not trigger events to schedule pod again. So we need to move
+ pods to active queue on PV add for this scenario.
+
+- on PV update
+
+ In scheduler assume process, if volume binding is required, scheduler will
+ put pod to unschedulable queue and wait for asynchronous volume binding
+ updates are made. But binding volumes worker may fail to update assumed pod
+ volume bindings due to conflicts if PVs are updated by PV controller or other
+ entities. So we need to move pods to active queue on PV update for this
+ scenario.
+
+- on Storage Class add
+
+ CheckVolumeBindingPred will fail if pod has unbound immediate PVCs. If these
+ PVCs have specified StorageClass name, creating StorageClass objects with
+ late binding for these PVCs will cause predicates to pass, so we need to move
+ pods to active queue when a StorageClass with WaitForFirstConsumer is added.
+
+##### Invalidate predicate equivalence cache
+Scheduler now have an optional [equivalence
+cache](../scheduling/scheduler-equivalence-class.md#goals) to improve
+scheduler's scalability. We need to invalidate
+CheckVolumeBinding/NoVolumeZoneConflict predicate cached results in following
+scenarios to keep equivalence class cache up to date:
+
+- on PVC add/delete
+
+ When PVCs are created or deleted, available PVs to choose from for volume
+ scheduling may change, we need to invalidate CheckVolumeBinding predicate.
+
+- on PVC update
+
+ PVC volume binding may change on PVC update, we need to invalidate
+ CheckVolumeBinding predicate.
+
+- on PV add/delete
+
+ When PVs are created or deleted, available PVs to choose from for volume
+ scheduling will change, we need to to invalidate CheckVolumeBinding
+ predicate.
+
+- on PV update
+
+ CheckVolumeBinding predicate may cache PVs in pod binding cache. When PV got
+ updated, we should invalidate cache, otherwise assume process will fail
+ with out of sync error.
+
+- on StorageClass delete
+
+ When a StorageClass with WaitForFirstConsumer is deleted, PVCs which references
+ this storage class will be in immediate binding mode. We need to invalidate
+ CheckVolumeBinding and NoVolumeZoneConflict.
+
#### Performance and Optimizations
Let:
* N = number of nodes
diff --git a/contributors/devel/conformance-tests.md b/contributors/devel/conformance-tests.md
index abf2085f..b1a0d361 100644
--- a/contributors/devel/conformance-tests.md
+++ b/contributors/devel/conformance-tests.md
@@ -34,6 +34,7 @@ especially in the Nucleus or Application layers as described
(example: the default list of admission plugins should not have to be tweaked for passing conformance).
- cannot rely on any binaries that are not required for the
linux kernel or for a kubelet to run (i.e. git)
+- any container images used in the test must support all architectures for which kubernetes releases are built
### Conformance Test Version Skew Policy
diff --git a/contributors/devel/cri-testing-policy.md b/contributors/devel/cri-testing-policy.md
index fb01b36e..d41de619 100644
--- a/contributors/devel/cri-testing-policy.md
+++ b/contributors/devel/cri-testing-policy.md
@@ -50,16 +50,16 @@ To publish tests results, please submit a proposal in the
briefly explaining your runtime, providing at least two maintainers, and
assigning the proposal to the leads of SIG-Node.
-These test results should be published under the `sig-node` tab, grouped by the
-runtimes, organized as follows.
+These test results should be published under the `sig-node` tab, organized
+as follows.
```
-sig-node -> sig-node-{Kubernetes-version} -> sig-node-{runtime-name} -> [page containing all test jobs]
+sig-node -> sig-node-cri-{Kubernetes-version} -> [page containing the required jobs]
```
-The `sig-node` tab only lists up to three most recent Kubernetes versions,
-including the master branch for the current release cycle, e.g.,
-`sig-node-master, sig-node-1.9, sig-node-1.8`.
+Only the last three most recent Kubernetes versions and the master branch are
+kept at any time. This is consistent with the Kubernetes release schedule and
+policy.
## Test job maintenance
diff --git a/contributors/guide/README.md b/contributors/guide/README.md
index bee0885a..f400ecc2 100644
--- a/contributors/guide/README.md
+++ b/contributors/guide/README.md
@@ -1,14 +1,16 @@
# Kubernetes Contributor Guide
-## Disclaimer
-
-Hello! This is the starting point for our brand new contributor guide, currently underway as per [issue#6102](https://github.com/kubernetes/website/issues/6102) and is in need of help.
-Please be patient, or fix a section below that needs improvement, and submit a pull request! Feel free to browse the [open issues](https://github.com/kubernetes/community/issues?q=is%3Aissue+is%3Aopen+label%3Aarea%2Fcontributor-guide) and file new ones, all feedback welcome!
+<!--
+Contributing to this document?
+Please use semantic line feeds for readability: http://rhodesmill.org/brandon/2012/one-sentence-per-line/
+-->
+This document is the single source of truth for how to contribute to the code base.
+Feel free to browse the [open issues](https://github.com/kubernetes/community/issues?q=is%3Aissue+is%3Aopen+label%3Aarea%2Fcontributor-guide) and file new ones, all feedback welcome!
# Welcome
-Welcome to Kubernetes! This document is the single source of truth for how to contribute to the code base. Please leave comments / suggestions if you find something is missing or incorrect.
+Welcome to Kubernetes!
- [Before you get started](#before-you-get-started)
- [Sign the CLA](#sign-the-cla)
@@ -49,31 +51,40 @@ Please make sure to read and observe our [Code of Conduct](https://github.com/cn
## Setting up your development environment
-If you haven’t set up your environment, please find resources [here](/contributors/devel).
+If you haven’t set up your environment, check the [developer resources](/contributors/devel).
## Community Expectations and Roles
-Kubernetes is a community project. Consequently, it is wholly dependent on its community to provide a productive, friendly and collaborative environment.
+Kubernetes is a community project.
+Consequently, it is wholly dependent on its community to provide a productive, friendly and collaborative environment.
- Read and review the [Community Expectations](community-expectations.md) for an understanding of code and review expectations.
- See [Community Membership](/community-membership.md) for a list the various responsibilities of contributor roles. You are encouraged to move up this contributor ladder as you gain experience.
# Your First Contribution
-Have you ever wanted to contribute to the coolest cloud technology? We will help you understand the organization of the Kubernetes project and direct you to the best places to get started. You'll be able to pick up issues, write code to fix them, and get your work reviewed and merged.
+Have you ever wanted to contribute to the coolest cloud technology?
+We will help you understand the organization of the Kubernetes project and direct you to the best places to get started.
+You'll be able to pick up issues, write code to fix them, and get your work reviewed and merged.
-Please be aware that due to the large number of issues our triage team deals with, we cannot offer technical support in GitHub issues. If you have questions about the development process, feel free to jump into our [Slack Channel](http://slack.k8s.io/) or join our [mailing list](https://groups.google.com/forum/#!forum/kubernetes-dev). You can also ask questions on [ServerFault](https://serverfault.com/questions/tagged/kubernetes) or [Stack Overflow](https://stackoverflow.com/questions/tagged/kubernetes). The Kubernetes team scans Stack Overflow on a regular basis and will try to ensure your questions don't go unanswered.
+Please be aware that due to the large number of issues our triage team deals with, we cannot offer technical support in GitHub issues.
+If you have questions about the development process, feel free to jump into our [Slack Channel](http://slack.k8s.io/) or join our [mailing list](https://groups.google.com/forum/#!forum/kubernetes-dev).
+You can also ask questions on [ServerFault](https://serverfault.com/questions/tagged/kubernetes) or [Stack Overflow](https://stackoverflow.com/questions/tagged/kubernetes).
+The Kubernetes team scans Stack Overflow on a regular basis and will try to ensure your questions don't go unanswered.
## Find something to work on
-Help is always welcome! For example, documentation (like the text you are reading now) can always use improvement. There's always code that can be clarified and variables or functions that can be renamed or commented. There's always a need for more test coverage.
-You get the idea - if you ever see something you think should be fixed, you should own it. Here is how you get started.
+Help is always welcome! For example, documentation (like the text you are reading now) can always use improvement. There's always code that can be clarified and variables or functions that can be renamed or commented.
+There's always a need for more test coverage.
+You get the idea - if you ever see something you think should be fixed, you should own it.
+Here is how you get started.
### Find a good first topic
-There are multiple repositories within the Kubernetes community and a full list of repositories can be found [here](https://github.com/kubernetes/).
-Each repository in the Kubernetes organization has beginner-friendly issues that provide a good first issue. For example, [kubernetes/kubernetes](https://git.k8s.io/kubernetes) has [help wanted](https://go.k8s.io/help-wanted) and [good first issue](https://github.com/kubernetes/kubernetes/labels/good%20first%20issue) labels for issues that should not need deep knowledge of the system.
-The `good first issue` label indicates that members have committed to providing extra assistance for new contributors. Read more [here](/contributors/devel/help-wanted.md).
+There are [multiple repositories](https://github.com/kubernetes/) within the Kubernetes organization.
+Each repository has beginner-friendly issues that provide a good first issue.
+For example, [kubernetes/kubernetes](https://git.k8s.io/kubernetes) has [help wanted](https://go.k8s.io/help-wanted) and [good first issue](https://github.com/kubernetes/kubernetes/labels/good%20first%20issue) labels for issues that should not need deep knowledge of the system.
+The `good first issue` label indicates that members have committed to providing [extra assistance](/contributors/devel/help-wanted.md) for new contributors.
<!-- TODO: review removing this note after 3 months or after the 1.12 release -->
Please note that while several of the repositories in the Kubernetes community have `good first issue` labels already, they are still being applied throughout the community.
@@ -81,20 +92,29 @@ Another good strategy is to find a documentation improvement, such as a missing/
#### Issue Assignment in Github
-Often, new contributors ask to be assigned an issue they are willing to take on. Unfortunately, due to GitHub limitations we can only assign issues to [org members](#community) or repo collaborators. Instead, please state in a comment that you intend to work on this issue and it will be assumed to be yours.
+Often, new contributors ask to be assigned an issue they are willing to take on. Unfortunately, due to GitHub limitations we can only assign issues to [org members](#community) or repo collaborators.
+Instead, please state in a comment that you intend to work on this issue and it will be assumed to be yours.
### Learn about SIGs
-#### Sig structure
+#### SIG structure
-You may have noticed that some repositories in the Kubernetes Organization are owned by Special Interest Groups, or SIGs. We organize the Kubernetes community into SIGs in order to improve our workflow and more easily manage what is a very large community project. The developers within each SIG have autonomy and ownership over that SIG's part of Kubernetes.
+You may have noticed that some repositories in the Kubernetes Organization are owned by Special Interest Groups, or SIGs.
+We organize the community into SIGs in order to improve our workflow and more easily manage what is a very large community project.
+The developers within each SIG have autonomy and ownership over that SIG's part of Kubernetes.
-Some SIGs also have their own `CONTRIBUTING.md` files, which may contain extra information or guidelines in addition to these general ones. These are located in the SIG-specific community directories. For example: the contributor's guide for SIG CLI is located in the *kubernetes/community* repo, as [`/sig-cli/CONTRIBUTING.md`](/sig-cli/CONTRIBUTING.md).
+Some SIGs also have their own `CONTRIBUTING.md` files, which may contain extra information or guidelines in addition to these general ones.
+These are located in the SIG-specific community directories.
+For example: the contributor's guide for SIG CLI is located in the *kubernetes/community* repo, as [`/sig-cli/CONTRIBUTING.md`](/sig-cli/CONTRIBUTING.md).
-Like everything else in Kubernetes, a SIG is an open, community, effort. Anybody is welcome to jump into a SIG and begin fixing issues, critiquing design proposals and reviewing code. SIGs have regular [video meetings](https://kubernetes.io/community/) which everyone is welcome to. Each SIG has a kubernetes slack channel that you can join as well.
+A SIG is an open, community effort.
+Anybody is welcome to jump into a SIG and begin fixing issues, critiquing design proposals and reviewing code.
+SIGs have regular [video meetings](https://kubernetes.io/community/) which everyone is welcome to.
+Each SIG has a slack channel that you can join as well.
There is an entire SIG ([sig-contributor-experience](/sig-contributor-experience/README.md)) devoted to improving your experience as a contributor.
-Contributing to Kubernetes should be easy. If you find a rough edge, let us know! Better yet, help us fix it by joining the SIG; just
+Contributing to Kubernetes should be easy.
+If you find a rough edge, let us know! Better yet, help us fix it by joining the SIG; just
show up to one of the [bi-weekly meetings](https://docs.google.com/document/d/1qf-02B7EOrItQgwXFxgqZ5qjW0mtfu5qkYIF1Hl4ZLI/edit).
#### Find a SIG that is related to your contribution
@@ -103,18 +123,24 @@ Finding the appropriate SIG for your contribution and adding a SIG label will he
For Pull Requests, the automatically assigned reviewer will add a SIG label if you haven't done so. See [Open A Pull Request](#open-a-pull-request) below.
-For Issues, we are still working on a more automated workflow. Since SIGs do not directly map onto Kubernetes subrepositories, it may be difficult to find which SIG your contribution belongs in. Here is the [list of SIGs](/sig-list.md). Determine which is most likely related to your contribution.
+For Issues, we are still working on a more automated workflow.
+Since SIGs do not directly map onto Kubernetes subrepositories, it may be difficult to find which SIG your contribution belongs in.
+Here is the [list of SIGs](/sig-list.md) so that you can determine which is most likely related to your contribution.
*Example:* if you are filing a CNI issue (that's [Container Networking Interface](https://github.com/containernetworking/cni)), you should choose the [Network SIG](http://git.k8s.io/community/sig-network). Add the SIG label in a comment like so:
```
/sig network
```
-Follow the link in the SIG name column to reach each SIGs README. Most SIGs will have a set of GitHub Teams with tags that can be mentioned in a comment on issues and pull requests for higher visibility. If you are not sure about the correct SIG for an issue, you can try SIG-contributor-experience [here](/sig-contributor-experience#github-teams), or [ask in Slack](http://slack.k8s.io/).
+Follow the link in the SIG name column to reach each SIGs README.
+Most SIGs will have a set of GitHub Teams with tags that can be mentioned in a comment on issues and pull requests for higher visibility.
+If you are not sure about the correct SIG for an issue, you can try SIG-contributor-experience [here](/sig-contributor-experience#github-teams), or [ask in Slack](http://slack.k8s.io/).
### File an Issue
-Not ready to contribute code, but see something that needs work? While the community encourages everyone to contribute code, it is also appreciated when someone reports an issue (aka problem). Issues should be filed under the appropriate Kubernetes subrepository.
+Not ready to contribute code, but see something that needs work?
+While the community encourages everyone to contribute code, it is also appreciated when someone reports an issue (aka problem).
+Issues should be filed under the appropriate Kubernetes subrepository.
Check the [issue triage guide](./issue-triage.md) for more information.
*Example:* a documentation issue should be opened to [kubernetes/website](https://github.com/kubernetes/website/issues).
@@ -123,11 +149,15 @@ Make sure to adhere to the prompted submission guidelines while opening an issue
# Contributing
-Kubernetes is open source, but many of the people working on it do so as their day job. In order to avoid forcing people to be "at work" effectively 24/7, we want to establish some semi-formal protocols around development. Hopefully, these rules make things go more smoothly. If you find that this is not the case, please complain loudly.
+Kubernetes is open source, but many of the people working on it do so as their day job.
+In order to avoid forcing people to be "at work" effectively 24/7, we want to establish some semi-formal protocols around development.
+Hopefully, these rules make things go more smoothly.
+If you find that this is not the case, please complain loudly.
-As a potential contributor, your changes and ideas are welcome at any hour of the day or night, weekdays, weekends, and holidays. Please do not ever hesitate to ask a question or send a pull request.
+As a potential contributor, your changes and ideas are welcome at any hour of the day or night, weekdays, weekends, and holidays.
+Please do not ever hesitate to ask a question or send a pull request.
-Our community guiding principles on how to create great code as a big group are found [here](/contributors/devel/collab.md).
+Check out our [community guiding principles](/contributors/devel/collab.md) on how to create great code as a big group.
Beginner focused information can be found below in [Open a Pull Request](#open-a-pull-request) and [Code Review](#code-review).
@@ -137,21 +167,24 @@ For quick reference on contributor resources, we have a handy [contributor cheat
It is best to contact your [SIG](#learn-about-sigs) for issues related to the SIG's topic. Your SIG will be able to help you much more quickly than a general question would.
-For general questions and troubleshooting, use the [kubernetes standard lines of communication](/communication.md) and work through the [kubernetes troubleshooting guide](https://kubernetes.io/docs/tasks/debug-application-cluster/troubleshooting/).
+For general questions and troubleshooting, use the [standard lines of communication](/communication.md) and work through the [troubleshooting guide](https://kubernetes.io/docs/tasks/debug-application-cluster/troubleshooting/).
## GitHub workflow
-To check out code to work on, please refer to [this guide](./github-workflow.md).
+To check out code to work on, please refer to [the GitHub Workflow Guide](./github-workflow.md).
## Open a Pull Request
-Pull requests are often called simply "PR". Kubernetes generally follows the standard [github pull request](https://help.github.com/articles/about-pull-requests/) process, but there is a layer of additional kubernetes specific (and sometimes SIG specific) differences:
+Pull requests are often called simply "PR".
+Kubernetes generally follows the standard [github pull request](https://help.github.com/articles/about-pull-requests/) process, but there is a layer of additional kubernetes specific (and sometimes SIG specific) differences:
- [Kubernetes-specific github workflow](pull-requests.md#the-testing-and-merge-workflow).
The first difference you'll see is that a bot will begin applying structured labels to your PR.
-The bot may also make some helpful suggestions for commands to run in your PR to facilitate review. These `/command` options can be entered in comments to trigger auto-labeling and notifications. The command reference is [here](https://go.k8s.io/bot-commands).
+The bot may also make some helpful suggestions for commands to run in your PR to facilitate review.
+These `/command` options can be entered in comments to trigger auto-labeling and notifications.
+Refer to its [command reference documentation](https://go.k8s.io/bot-commands).
Common new contributor PR issues are:
@@ -162,7 +195,8 @@ Common new contributor PR issues are:
## Code Review
-For a brief description of the importance of code review, please read [On Code Review](/contributors/guide/community-expectations.md#code-review). There are two aspects of code review: giving and receiving.
+For a brief description of the importance of code review, please read [On Code Review](/contributors/guide/community-expectations.md#code-review).
+There are two aspects of code review: giving and receiving.
To make it easier for your PR to receive reviews, consider the reviewers will need you to:
@@ -171,7 +205,8 @@ To make it easier for your PR to receive reviews, consider the reviewers will ne
* break large changes into a logical series of smaller patches which individually make easily understandable changes, and in aggregate solve a broader issue
* label PRs with appropriate SIGs and reviewers: to do this read the messages the bot sends you to guide you through the PR process
-Reviewers, the people giving the review, are highly encouraged to revisit the [Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md) and must go above and beyond to promote a collaborative, respectful Kubernetes community. When reviewing PRs from others [The Gentle Art of Patch Review](http://sage.thesharps.us/2014/09/01/the-gentle-art-of-patch-review/) suggests an iterative series of focuses which is designed to lead new contributors to positive collaboration without inundating them initially with nuances:
+Reviewers, the people giving the review, are highly encouraged to revisit the [Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md) and must go above and beyond to promote a collaborative, respectful community.
+When reviewing PRs from others [The Gentle Art of Patch Review](http://sage.thesharps.us/2014/09/01/the-gentle-art-of-patch-review/) suggests an iterative series of focuses which is designed to lead new contributors to positive collaboration without inundating them initially with nuances:
* Is the idea behind the contribution sound?
* Is the contribution architected correctly?
@@ -179,20 +214,23 @@ Reviewers, the people giving the review, are highly encouraged to revisit the [C
## Testing
-Testing is the responsibility of all contributors and is in part owned by all sigs, but is also coordinated by [sig-testing](/sig-testing).
-
-The main testing overview document is [here](/contributors/devel/testing.md).
+Testing is the responsibility of all contributors and is in part owned by all SIGss, but is also coordinated by [sig-testing](/sig-testing).
+Refer to the [Testing Guide](/contributors/devel/testing.md) for more information.
-There are multiple types of tests in kubernetes. The location of the test code varies with type, as do the specifics of the environment needed to successfully run the test:
+There are multiple types of tests.
+The location of the test code varies with type, as do the specifics of the environment needed to successfully run the test:
* Unit: These confirm that a particular function behaves as intended. Golang includes a native ability for unit testing via the [testing](https://golang.org/pkg/testing/) package. Unit test source code can be found adjacent to the corresponding source code within a given package. For example: functions defined in [kubernetes/cmd/kubeadm/app/util/version.go](https://git.k8s.io/kubernetes/cmd/kubeadm/app/util/version.go) will have unit tests in [kubernetes/cmd/kubeadm/app/util/version_test.go](https://git.k8s.io/kubernetes/cmd/kubeadm/app/util/version_test.go). These are easily run locally by any developer on any OS.
* Integration: These tests cover interactions of package components or interactions between kubernetes components and some other non-kubernetes system resource (eg: etcd). An example would be testing whether a piece of code can correctly store data to or retrieve data from etcd. Integration tests are stored in [kubernetes/test/integration/](https://git.k8s.io/kubernetes/test/integration). Running these can require the developer set up additional functionality on their development system.
-* End-to-end ("e2e"): These are broad tests of overall kubernetes system behavior and coherence. These are more complicated as they require a functional kubernetes cluster built from the sources to be tested. A separate document [here](/contributors/devel/e2e-tests.md) details e2e testing and test cases themselves can be found in [kubernetes/test/e2e/](https://git.k8s.io/kubernetes/test/e2e).
+* End-to-end ("e2e"): These are broad tests of overall system behavior and coherence. These are more complicated as they require a functional kubernetes cluster built from the sources to be tested. A separate [document detailing e2e testing](/contributors/devel/e2e-tests.md) and test cases themselves can be found in [kubernetes/test/e2e/](https://git.k8s.io/kubernetes/test/e2e).
* Conformance: These are a set of testcases, currently a subset of the integration/e2e tests, that the Architecture SIG has approved to define the core set of interoperable features that all Kubernetes deployments must support. For more information on Conformance tests please see the [Conformance Testing](/contributors/devel/conformance-tests.md) Document.
-Continuous integration will run these tests either as pre-submits on PRs, post-submits against master/release branches, or both. The results appear on [testgrid](https://testgrid.k8s.io).
+Continuous integration will run these tests either as pre-submits on PRs, post-submits against master/release branches, or both.
+The results appear on [testgrid](https://testgrid.k8s.io).
-sig-testing is responsible for that official infrastructure and CI. The associated automation is tracked in the [test-infra repo](https://git.k8s.io/test-infra). If you're looking to run e2e tests on your own infrastructure, [kubetest](https://git.k8s.io/test-infra/kubetest) is the mechanism.
+sig-testing is responsible for that official infrastructure and CI.
+The associated automation is tracked in the [test-infra repo](https://git.k8s.io/test-infra).
+If you're looking to run e2e tests on your own infrastructure, [kubetest](https://git.k8s.io/test-infra/kubetest) is the mechanism.
## Security
@@ -206,11 +244,15 @@ sig-testing is responsible for that official infrastructure and CI. The associa
## Issues Management or Triage
-Have you ever noticed the total number of [open issues](https://issues.k8s.io)? This number at any given time is typically high. Helping to manage or triage these open issues can be a great contribution to the Kubernetes project. This is also a great opportunity to learn about the various areas of the project. Refer to the [Kubernetes Issue Triage Guidelines](/contributors/guide/issue-triage.md) for more information.
+Have you ever noticed the total number of [open issues](https://issues.k8s.io)?
+Helping to manage or triage these open issues can be a great contributionand a great opportunity to learn about the various areas of the project.
+Refer to the [Issue Triage Guidelines](/contributors/guide/issue-triage.md) for more information.
# Community
-If you haven't noticed by now, we have a large, lively, and friendly open-source community. We depend on new people becoming members and regular code contributors, so we would like you to come join us. To find out more about our community structure, different levels of membership and code contributors, please [explore here](/community-membership.md).
+If you haven't noticed by now, we have a large, lively, and friendly open-source community.
+We depend on new people becoming members and regular code contributors, so we would like you to come join us!
+The [Community Membership Document](/community-membership.md) covers membership processes and roles.
## Communication
@@ -218,11 +260,13 @@ If you haven't noticed by now, we have a large, lively, and friendly open-source
## Events
-Kubernetes is the main focus of KubeCon + CloudNativeCon, held three times per year in China, Europe and in North America. Information about these and other community events is available on the CNCF [events](https://www.cncf.io/events/) pages.
+Kubernetes participates in KubeCon + CloudNativeCon, held three times per year in China, Europe and in North America.
+Information about these and other community events is available on the CNCF [events](https://www.cncf.io/events/) pages.
### Meetups
-We follow the general [Cloud Native Computing Foundation guidelines](https://github.com/cncf/meetups) for Meetups. You may also contact Paris Pittman via direct message on Kubernetes Slack (@paris) or by email (parispittman@google.com)
+We follow the general [Cloud Native Computing Foundation guidelines](https://github.com/cncf/meetups) for Meetups.
+You may also contact Paris Pittman via direct message on Kubernetes Slack (@paris) or by email (parispittman@google.com)
## Mentorship
@@ -232,4 +276,4 @@ Please learn about our mentoring initiatives [here](http://git.k8s.io/community/
This section includes things that need to be documented, but typical contributors do not need to interact with regularly.
-- [OWNERS files](owners.md) - The Kubernetes organizations are managed with OWNERS files, which outline which parts of the code are owned by what groups.
+- [OWNERS files](owners.md) - The Kubernetes organizations are managed with OWNERS files, which outline which parts of the code are owned by what groups. \ No newline at end of file
diff --git a/contributors/guide/owners.md b/contributors/guide/owners.md
index eb42335a..afe5f742 100644
--- a/contributors/guide/owners.md
+++ b/contributors/guide/owners.md
@@ -77,7 +77,7 @@ GitHub usernames and aliases listed in OWNERS files are case-insensitive.
## Code Review using OWNERS files
This is a simplified description of our [full PR testing and merge
-workflow](/contributors/devel/pull-requests.md#the-testing-and-merge-workflow)
+workflow](/contributors/guide/pull-requests.md#the-testing-and-merge-workflow)
that conveniently forgets about the existence of tests, to focus solely on the roles driven by
OWNERS files. Please see [below](#automation-using-owners-files) for details on how specific
aspects of this process may be configured on a per-repo basis.
diff --git a/contributors/guide/pull-requests.md b/contributors/guide/pull-requests.md
index 21a4ddf6..a2464c3a 100644
--- a/contributors/guide/pull-requests.md
+++ b/contributors/guide/pull-requests.md
@@ -105,6 +105,12 @@ If you want to solicit reviews before the implementation of your pull request is
The GitHub robots will add and remove the `do-not-merge/hold` label as you use the comment commands and the `do-not-merge/work-in-progress` label as you edit your title. While either label is present, your pull request will not be considered for merging.
+## Pull Requests and the Release Cycle
+
+If a pull request has been reviewed, but held or not approved, it might be due to the current phase in the [Release Cycle](https://git.k8s.io/sig-release/ephemera). Occasionally, a SIG may freeze their own code base when working towards a specific feature or goal that could impact other development. During this time, your pull request could remain unmerged while their release work is completed.
+
+If you feel your pull request is in this state, contact the appropriate [SIG](https://git.k8s.io/community/sig-list.md) or [SIG-Release](https://git.k8s.io/sig-release) for clarification.
+
## Comment Commands Reference
[The commands doc](https://go.k8s.io/bot-commands) contains a reference for all comment commands.
diff --git a/events/community-meeting.md b/events/community-meeting.md
index e3d8a7ab..8ec2c3a8 100644
--- a/events/community-meeting.md
+++ b/events/community-meeting.md
@@ -63,9 +63,25 @@ Also, if you are doing a live coding demo, please make sure it has a reasonable
- Ensure you are presenting from a quiet environment.
- If you run out of time while performing your demo, you may ask the audience if they would like a follow-up at a subsequent meeting. If there is enthusiastic support, the community team will help schedule a continuation.
-## SIG Updates
-
-SIGs will give a community update at least once per release cycle per the [schedule](https://docs.google.com/spreadsheets/d/1adztrJ05mQ_cjatYSnvyiy85KjuI6-GuXsRsP-T2R3k).
+## SIG Updates
+
+SIGs will give a community update at least once per release cycle per the [schedule](https://docs.google.com/spreadsheets/d/1adztrJ05mQ_cjatYSnvyiy85KjuI6-GuXsRsP-T2R3k).
+The SIG Update should mention:
+
+- Topics where input is being sought from other SIGs
+- Topics that could affect other SIGs
+- Currently active themes and goals in the SIG
+ - Broad description of future themes and goals if possible
+- Status of any notable features that are transitioning across the spectrum of incubation, alpha, beta, stable/GA, or are being deprecated
+- New or deprecated subprojects
+- Leadership position changes or updates
+- Charter status and updates, if any
+- How people can contribute, areas where help is needed
+- Any pending Kubernetes Enhancement Proposals (KEPs) or general big ideas that might warrant outside input
+- Prior 1.X.Y release patches in flight status
+- Current 1.X release targeted feature status
+
+Since you only usually have ~10 minutes generally speaking if something is internal only to your SIG and doesn't affect others it doesn't need to be mentioned, people can always attend your SIG meeting for the details.
## Archives
diff --git a/generator/sig_readme.tmpl b/generator/sig_readme.tmpl
index 4062dd28..fbaafbbb 100644
--- a/generator/sig_readme.tmpl
+++ b/generator/sig_readme.tmpl
@@ -92,9 +92,9 @@ The google groups contain the archive of Github team notifications.
Mentioning a team on Github will CC its group.
Monitor these for Github activity if you are not a member of the team.
-| Team Name | Details | Google Groups | Description |
-| --------- |:-------:|:-------------:| ----------- |
+| Team Name | Details | Description |
+| --------- |:-------:| ----------- |
{{- range .Contact.GithubTeams }}
-| @kubernetes/{{.Name}} | [link](https://github.com/orgs/kubernetes/teams/{{.Name}}) | [link](https://groups.google.com/forum/#!forum/kubernetes-{{.Name}}) | {{.Description}} |
+| @kubernetes/{{.Name}} | [link](https://github.com/orgs/kubernetes/teams/{{.Name}}) | {{.Description}} |
{{- end }}
{{ end }}
diff --git a/github-management/OWNERS b/github-management/OWNERS
index 70807ad1..cab8996b 100644
--- a/github-management/OWNERS
+++ b/github-management/OWNERS
@@ -1,7 +1,13 @@
reviewers:
+ - calebamiles
- cblecker
+ - fejta
+ - grodrigues3
+ - idvoretskyi
+ - spiffxp
approvers:
- cblecker
+ - grodrigues3
labels:
- sig/contributor-experience
- area/github-management
diff --git a/github-management/README.md b/github-management/README.md
index a15b7a66..043bb5da 100644
--- a/github-management/README.md
+++ b/github-management/README.md
@@ -14,11 +14,36 @@ These polices are overseen by the
Experience Special Interest Group.
## Guides
+- [Opening a request for assistance with GitHub](opening-a-request.md)
- [Organization Owners Guide](org-owners-guide.md)
- [Repository Creation Guidelines](kubernetes-repositories.md)
- [Setting up the CNCF CLA Check](setting-up-cla-check.md)
- [GitHub Permissions](permissions.md)
+## GitHub Administration Team
+
+In order to manage the various organizations that the Kubernetes project owns,
+we have a GitHub Administration team that is responsible for carrying out the
+various tasks.
+
+This team (**[@kubernetes/owners](https://github.com/orgs/kubernetes/teams/owners)**) is as follows:
+* Aaron Crickenberger (**[@spiffxp](https://github.com/spiffxp)**, US Pacific)
+* Caleb Miles (**[@calebamiles](https://github.com/calebamiles)**, US Pacific)
+* Christoph Blecker (**[@cblecker](https://github.com/cblecker)**, CA Pacific)
+* Erick Fejta (**[@fejta](https://github.com/fejta)**, US Pacific)
+* Garrett Rodrigues (**[@grodrigues3](https://github.com/grodrigues3)**, US Pacific)
+* Ihor Dvoretskyi (**[@idvoretskyi](https://github.com/idvoretskyi)**, UA Eastern European)
+
+This team is responsible for holding Org Owner privileges over all the active
+Kubernetes orgs, and will take action in accordance with our polices and
+procedures. All members of this team are subject to the Kubernetes
+[security embargo policy](https://git.k8s.io/sig-release/security-release-process-documentation/security-release-process.md#embargo-policy).
+
+Nominations to this team will come from the Contributor Experience SIG, and
+require confirmation by the Steering Committee before taking effect. Time zones
+and country of origin should be considered when selecting membership, to ensure
+sufficient after North American business hours and holiday coverage.
+
## Project Owned Organizations
The following organizations are currently known to be part of the Kubernetes
diff --git a/github-management/kubernetes-repositories.md b/github-management/kubernetes-repositories.md
index 7b1faf3b..c1082a6a 100644
--- a/github-management/kubernetes-repositories.md
+++ b/github-management/kubernetes-repositories.md
@@ -1,35 +1,35 @@
-## Kubernetes Repository Guidelines
+# Kubernetes Repository Guidelines
This document attempts to outline a structure for creating and associating GitHub repositories with the Kubernetes project. It also describes how and when
repositories are removed.
The document presents a tiered system of repositories with increasingly strict requirements in an attempt to provide the right level of oversight and flexibility for a variety of different projects.
-### Associated Repositories
+## Associated Repositories
Associated repositories conform to the Kubernetes community standards for a repository, but otherwise have no restrictions. Associated repositories exist solely for the purpose of making it easier for the Kubernetes community to work together. There is no implication of support or endorsement of any kind by the Kubernetes project, the goals are purely logistical.
-#### Goals
+### Goals
To facilitate contributions and collaboration from the broader Kubernetes community. Contributions to random projects with random CLAs (or DCOs) can be logistically difficult, so associated repositories should be easier.
-#### Rules
+### Rules
* Must adopt the Kubernetes Code of Conduct statement in their repo.
* All code projects use the Apache License version 2.0. Documentation repositories must use the Creative Commons License version 4.0.
* Must adopt the CNCF CLA bot automation for pull requests.
-### SIG repositories
+## SIG repositories
SIG repositories serve as temporary homes for SIG-sponsored experimental projects or prototypes of new core functionality, or as permanent homes for SIG-specific tools.
-#### Goals
+### Goals
To provide a place for SIGs to collaborate on projects endorsed by and actively worked on by members of the SIG. SIGs should be able to approve and create new repositories for SIG-sponsored projects without requiring higher level approval from a central body (e.g. steering committee or sig-architecture)
-#### Rules for new repositories
+### Rules for new repositories
* For now all repos will live in github.com/kubernetes-sigs/\<project-name\>.
* Must contain the topic for the sponsoring SIG - e.g. `k8s-sig-api-machinery`. (Added through the *Manage topics* link on the repo page.)
@@ -40,7 +40,7 @@ To provide a place for SIGs to collaborate on projects endorsed by and actively
* SIG membership must vote using lazy consensus to create a new repository
* SIG must already have identified all of their existing subprojects and code, with valid OWNERS files, in [`sigs.yaml`](https://github.com/kubernetes/community/blob/master/sigs.yaml)
-#### Rules for donated repositories
+### Rules for donated repositories
The `kubernetes-sigs` organization is primarily intended to house net-new
projects originally created in that organization. However, projects that a SIG
@@ -55,14 +55,14 @@ In addition to the requirements for new repositories, donated repositories must
* Licenses of dependencies are acceptable; project owners can ping [@caniszczyk](https://github.com/caniszczyk) for review of third party deps
* Boilerplate text across all files should attribute copyright as follows: `"Copyright <Project Authors>"` if no CLA was in place prior to donation
-### Core Repositories
+## Core Repositories
Core repositories are considered core components of Kubernetes. They are utilities, tools, applications, or libraries that are expected to be present in every or nearly every Kubernetes cluster, such as components and tools included in official Kubernetes releases. Additionally, the kubernetes.io website, k8s.io machinery, and other project-wide infrastructure will remain in the kubernetes github organization.
-#### Goals
+### Goals
Create a broader base of repositories than the existing gh/kubernetes/kubernetes so that the project can scale. Present expectations about the centrality and importance of the repository in the Kubernetes ecosystem. Carries the endorsement of the Kubernetes community.
-#### Rules
+### Rules
* Must live under `github.com/kubernetes/<project-name>`
* Must adopt the Kubernetes Code of Conduct
@@ -72,7 +72,7 @@ Create a broader base of repositories than the existing gh/kubernetes/kubernetes
* All OWNERS must be members of standing as defined by ability to vote in Kubernetes steering committee elections. in the Kubernetes community
* Repository must be approved by SIG-Architecture
-### Removing Repositories
+## Removing Repositories
As important as it is to add new repositories, it is equally important to
prune old repositories that are no longer relevant or useful.
@@ -85,72 +85,68 @@ wide processes, it ensures a rapid response to potential required fixes
contributors and users receive quick feedback on their issues and
contributions.
-#### Grounds for removal
+### Grounds for removal
+
SIG repositories and core repositories may be removed from the project if they
are deemed _inactive_. Inactive repositories are those that meet any of the
following criteria:
* There are no longer any active maintainers for the project and no
-replacements can be found.
+ replacements can be found.
* All PRs or Issues have gone un-addressed for longer than six months.
* There have been no new commits or other changes in more than a year.
+ * The contents have been folded into another actively maintained project.
Associated repositories are much more loosely associated with the Kubernetes
project and are generally not subject to removal, except under exceptional
circumstances (e.g. a code of conduct violation).
+### Procedure for removal
-#### Procedure for removal
-When a repository is set for removal, it is moved into the
-[kubernetes-retired](https://github.com/kubernetes-retired) organization.
-This maintains the
-complete record of issues, PRs and other contributions, but makes it clear
-that the repository should be considered archival, not active. We will also
-use the [github archive feature](https://help.github.com/articles/archiving-a-github-repository/) to mark the repository as archival and read-only.
-
-The decision to archive a repository will be made by SIG architecture and
-announced on the Kubernetes dev mailing list and community meeting.
+When a repository has been deemed eligible for removal, we take the following steps:
-### FAQ
+ * Ownership of the repo is transferred to the [kubernetes-retired] GitHub organization
+ * The repo description is edited to start with the phrase "[EOL]"
+ * All open issues and PRs are closed
+ * All external collaborators are removed
+ * All webhooks, apps, integrations or services are removed
+ * GitHub Pages are disabled
+ * The repo is marked as archived using [GitHub's archive feature]
+ * The removal is announced on the kubernetes-dev mailing list and community meeting
-*My project is currently in kubernetes-incubator, what is going to happen to it?*
+This maintains the complete record of issues, PRs and other contributions,
+leaves the repository read-only, and makes it clear that the repository
+should be considered retired and unmaintained.
-Nothing. We’ll grandfather existing projects and they can stay in the incubator org for as long as they want to. We expect/hope that most projects will either move out to ecosystem, or into SIG or Core repositories following the same approval process described below.
+## FAQ
+**My project is currently in kubernetes-incubator, what is going to happen to it?**
+Nothing. We’ll grandfather existing projects and they can stay in the incubator org for as long as they want to. We expect/hope that most projects will either move out to ecosystem, or into SIG or Core repositories following the same approval process described below.
-*My project wants to graduate from incubator, how can it do that?*
+**My project wants to graduate from incubator, how can it do that?**
Either approval from a SIG to graduate to a SIG repository, or approval from SIG-Architecture to graduate into the core repository.
-
-
-*My incubator project wants to go GA, how can it do that?*
+**My incubator project wants to go GA, how can it do that?**
For now, the project determines if and when it is GA. For the future, we may define a cross Kubernetes notion of GA for core and sig repositories, but that’s not in this proposal.
-
-
-*My project is currently in core, but doesn’t seem to fit these guidelines, what’s going to happen?*
+**My project is currently in core, but doesn’t seem to fit these guidelines, what’s going to happen?**
For now, nothing. Eventually, we may redistribute projects, but for now the goal is to adapt the process going forward, not re-legislate past decisions.
-
-
-*I’m starting a new project, what should I do?*
+**I’m starting a new project, what should I do?**
Is this a SIG-sponsored project? If so, convince some SIG to host it, take it to the SIG mailing list, meeting and get consensus, then the SIG can create a repo for you in the SIG organization.
-
-
Is this a small-group or personal project? If so, create a repository wherever you’d like, and make it an associated project.
-
-
We suggest starting with the kubernetes-template-project to ensure you have the correct code of conduct, license, etc.
-
-
-*Much of the things needed (e.g. CLA Bot integration) is missing to support associated projects. Many things seem vague. Help!*
+**Much of the things needed (e.g. CLA Bot integration) is missing to support associated projects. Many things seem vague. Help!**
True, we need to improve these things. For now, do the best you can to conform to the spirit of the proposal (e.g. post the code of conduct, etc)
+
+[GitHub's archive feature]: https://help.github.com/articles/archiving-a-github-repository/
+[kubernetes-retired]: https://github.com/kubernetes-retired
diff --git a/github-management/opening-a-request.md b/github-management/opening-a-request.md
new file mode 100644
index 00000000..b0088647
--- /dev/null
+++ b/github-management/opening-a-request.md
@@ -0,0 +1,33 @@
+# Opening a issue for support with GitHub
+
+## GitHub issues
+
+If you need help with the following:
+- Permissions issues
+- Organization membership
+- Third-party integrations
+- Webhooks
+- Repository creation/migration
+- Repository archival
+- Other repository and configuration issues
+
+Please open an issue against the [kubernetes/org] repository describing your
+issue. If your request is urgent, please escalate to **[@kubernetes/owners]**.
+
+## Bot/Automation issues
+
+If you need help with the following:
+- Bot configuration
+- Automatic merging
+- Issue labelling
+- Automation feedback and feature requests
+
+Please open an issue against the [kubernetes/test-infra] repository describing
+your issue. If your request is urgent, please escalate to the
+[test-infra on-call] or reach out to `#testing-ops` on Slack.
+
+
+[kubernetes/org]: https://github.com/kubernetes/org/issues
+[@kubernetes/owners]: https://github.com/orgs/kubernetes/teams/owners
+[kubernetes/test-infra]: https://github.com/kubernetes/test-infra/issues
+[test-infra on-call]: https://go.k8s.io/oncall
diff --git a/github-management/org-owners-guide.md b/github-management/org-owners-guide.md
index ad7c8a7e..0ac510d9 100644
--- a/github-management/org-owners-guide.md
+++ b/github-management/org-owners-guide.md
@@ -4,6 +4,26 @@ The Kubernetes project leverages multiple GitHub organizations to store and
organize code. This guide contains the details on how to run those organizations
for CNCF compliance and for the guidelines of the community.
+## SLOs
+
+The [GitHub Administration Team] will aim to handle requests in the following
+time frames:
+- Organization invites should be handled within 72 hours of all requirements for
+ membership being met (all +1s obtained).
+- Repository creation or migration requests should be responded to within 72
+ hours of the issue being opened. There may be information required or specific
+ requirements that take additional time, but once all requirements are met, the
+ repo should be created within 72 hours.
+- Security or moderation requests should be handled ASAP, and coverage should be
+ provided in multiple time zones and countries.
+- All other requests should be responded to within 72 hours of the issue being
+ opened. The time to resolve these requests will vary depending on the
+ specifics of the request.
+
+If a request is taking longer than the above time frames, or there is a need to
+escalate an urgent request, please mention **[@kubernetes/owners]** on the
+associated issue for assistance.
+
## Organization Naming
Kubernetes managed organizations should be in the form of `kubernetes-[thing]`.
@@ -60,3 +80,7 @@ for all orgs going forward. Notable discrepancies at the moment:
Repositories have additional guidelines and requirements, such as the use of
CLA checking on all contributions. For more details on those please see the
[Kubernetes Template Project](https://github.com/kubernetes/kubernetes-template-project), and the [Repository Guidelines](kubernetes-repositories.md)
+
+
+[GitHub Administration Team]: /github-management/README.md#github-administration-team
+[@kubernetes/owners]: https://github.com/orgs/kubernetes/teams/owners
diff --git a/github-management/permissions.md b/github-management/permissions.md
index 3ae03c70..ba036fee 100644
--- a/github-management/permissions.md
+++ b/github-management/permissions.md
@@ -36,7 +36,8 @@ There are certain actions that require org owner access:
- Transfer repositories
- Approve GitHub application integrations
-**// TODO(cblecker):** Define specific roles that need this.
+In the Kubernetes project, this role is held by the
+[GitHub Administration Team].
### Member
@@ -93,6 +94,7 @@ member in the organization.
[bot commands]: https://go.k8s.io/bot-commands
[community membership]: /community-membership.md
+[GitHub Administration Team]: /github-management/README.md#github-administration-team
[org permissions]:
https://help.github.com/articles/permission-levels-for-an-organization/
[OWNERS]: /contributors/guide/owners.md
diff --git a/keps/NEXT_KEP_NUMBER b/keps/NEXT_KEP_NUMBER
index 3c032078..a45fd52c 100644
--- a/keps/NEXT_KEP_NUMBER
+++ b/keps/NEXT_KEP_NUMBER
@@ -1 +1 @@
-18
+24
diff --git a/keps/sig-auth/0014-dynamic-audit-configuration.md b/keps/sig-auth/0014-dynamic-audit-configuration.md
index de2962b1..3863c058 100644
--- a/keps/sig-auth/0014-dynamic-audit-configuration.md
+++ b/keps/sig-auth/0014-dynamic-audit-configuration.md
@@ -17,8 +17,8 @@ approvers:
- "@yliaog"
editor: TBD
creation-date: 2018-05-18
-last-updated: 2018-07-13
-status: provisional
+last-updated: 2018-07-31
+status: implementable
---
# Dynamic Audit Control
@@ -38,29 +38,41 @@ status: provisional
* [Story 1](#story-1)
* [Story 2](#story-2)
* [Story 3](#story-3)
+ * [Story 4](#story-4)
* [Implementation Details/Notes/Constraints](#implementation-detailsnotesconstraints)
+ * [Feature Gating](#feature-gating)
+ * [Policy Enforcement](#policy-enforcement)
+ * [Aggregated Servers](#aggregated-servers)
* [Risks and Mitigations](#risks-and-mitigations)
* [Privilege Escalation](#privilege-escalation)
+ * [Leaked Resources](#leaked-resources)
* [Webhook Authentication](#webhook-authentication)
+ * [Performance](#performance)
* [Graduation Criteria](#graduation-criteria)
* [Implementation History](#implementation-history)
* [Alternatives](#alternatives)
* [Generalized Dynamic Configuration](#generalized-dynamic-configuration)
+ * [Policy Override](#policy-override)
## Summary
-We want to allow the advanced auditing features to be dynamically configured. Following in the same vein as [Dynamic Admission Control](https://kubernetes.io/docs/admin/extensible-admission-controllers/) we would like to provide a means of configuring the auditing features post cluster provisioning.
+We want to allow the advanced auditing features to be dynamically configured. Following in the same vein as
+[Dynamic Admission Control](https://kubernetes.io/docs/admin/extensible-admission-controllers/) we would like to provide
+a means of configuring the auditing features post cluster provisioning.
## Motivation
-The advanced auditing features are a powerful tool, yet difficult to configure. The configuration requires deep insight into the deployment mechanism of choice and often takes many iterations to configure properly requiring a restart of the apiserver each time. Moreover, the ability to install addon tools that configure and enhance audting is hindered by the overhead in configuration. Such tools frequently run on the cluster requiring future knowledge of how to reach them when the cluster is live. These tools could enhance the security and conformance of the cluster and its applications.
+The advanced auditing features are a powerful tool, yet difficult to configure. The configuration requires deep insight
+into the deployment mechanism of choice and often takes many iterations to configure properly requiring a restart of
+the apiserver each time. Moreover, the ability to install addon tools that configure and enhance auditing is hindered
+by the overhead in configuration. Such tools frequently run on the cluster requiring future knowledge of how to reach
+them when the cluster is live. These tools could enhance the security and conformance of the cluster and its applications.
### Goals
- Provide an api and set of objects to configure the advanced auditing kube-apiserver configuration dynamically
### Non-Goals
- Provide a generic interface to configure all kube-apiserver flags
-- composable audit policies per-endpoint
- configuring non-webhook backends
- configuring audit output (format or per-field filtering)
- authorization of audit output
@@ -74,13 +86,18 @@ A new dynamic audit backend will be introduced that follows suit with the existi
A cluster scoped configuration object will be provided that applies to all events in the cluster.
```golang
-// ClusterAuditConfiguration represents a cluster level audit configuration
-type ClusterAuditConfiguration struct {
+// AuditConfiguration represents a dynamic audit configuration
+type AuditConfiguration struct {
metav1.TypeMeta
v1.ObjectMeta
- // Backends to send events
+ // Policy is the current audit v1beta1 Policy object
+ // if undefined it will default to the statically configured cluster policy if available
+ // if neither exist the backend will fail
+ Policy *Policy
+
+ // Backend to send events
Backend *Backend
}
@@ -127,12 +144,17 @@ type WebhookClientConfig struct {
Multiple definitions can exist as independent solutions. These updates will require the audit API to be registered with the apiserver. The dynamic configurations will be wrapped by truncate and batch options, which are set statically through existing flags. Dynamic configuration will be enabled by a feature gate for pre-stable releases. If existing flags are provided to configure the audit backend they will be taken as a separate backend configuration.
-Example cluster yaml config:
+Example configuration yaml config:
```yaml
apiVersion: audit.k8s.io/v1beta1
-kind: ClusterAuditConfiguration
+kind: AuditConfiguration
metadata:
name: <name>
+policy:
+ rules:
+ - level: <level>
+ omitStages:
+ - stage: <stage>
backend:
webhook:
- initialBackoff: <10s>
@@ -144,11 +166,12 @@ backend:
service: <optional service name>
caBundle: <ca bundle>
```
+A configuration flag will be added that enables dynamic auditing `--audit-dynamic-configuration`, which will default to false.
### User Stories
#### Story 1
-As a cluster admin, I will easily be able to enable the interal auditing features of an existing cluster, and tweak the configurations as necessary. I want to prevent privilege escalation from being able to tamper with a root audit configuration.
+As a cluster admin, I will easily be able to enable the internal auditing features of an existing cluster, and tweak the configurations as necessary. I want to prevent privilege escalation from being able to tamper with a root audit configuration.
#### Story 2
As a Kubernetes extension developer, I will be able to provide drop in extensions that utilize audit data.
@@ -156,11 +179,39 @@ As a Kubernetes extension developer, I will be able to provide drop in extension
#### Story 3
As a cluster admin, I will be able configure multiple audit-policies and webhook endpoints to provide independent auditing facilities.
-### Implementation Details/Notes/Constraints
+#### Story 4
+As a kubernetes developer, I will be able to quickly turn up the audit level on a certain area to debug my application.
-Any actions to the audit configuration objects will be hard coded to log at the `level=RequestResponse` to the previous backend and the new backend. If the apiserver is HA, the configuration will be rolled out in increments.
+### Implementation Details/Notes/Constraints
-Inherently apiserver aggregates and HA apiserver setups will work off the same dynamic configuration object. If separate objects are needed they should be configured as static objects on the node and set through the runtime flags. Aggregated servers will implement the same audit handling mechanisms. A conformance test should be provided as assurance. This needs further discussion with the participating sigs.
+#### Feature Gating
+Introduction of dynamic policy requires changes to the current audit pipeline. Care must be taken that these changes are
+properly gated and do not affect the stability or performance of the current features as they progress to GA. A new decorated
+handler will be provisioned similar to the [existing handlers](https://github.com/kubernetes/apiserver/blob/master/pkg/endpoints/filters/audit.go#L41)
+called `withDynamicAudit`. Another conditional clause will be added where the handlers are
+[provisioned](https://github.com/kubernetes/apiserver/blob/master/pkg/server/config.go#L536) allowing for the proper feature gating.
+
+#### Policy Enforcement
+This addition will move policy enforcement from the main handler to the backends. From the `withDynamicAudit` handler,
+the full event will be generated and then passed to the backends. Each backend will copy the event and then be required to
+drop any pieces that do not conform to its policy. A new sink interface will be required for these changes called `EnforcedSink`,
+this will largely follow suite with the existing sink but take a fully formed event and the authorizer attributes as its
+parameters. It will then utilize the `LevelAndStages` method in the policy
+[checker](https://github.com/kubernetes/apiserver/blob/master/pkg/audit/policy/checker.go) to enforce its policy on the event,
+and drop any unneeded sections. The new dynamic backend will implement the `EnforcedSink` interface, and update its state
+based on a shared informer. For the existing backends to comply, a method will be added that implements the `EnforcedSink` interface.
+
+Implementing the [attribute interface](/Users/patrick/go/src/k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/authorization/authorizer/interfaces.go)
+based on the Event struct was also explored. This would allow us to keep the existing `Sink` interfaces, however it would
+require parsing the request URI twice in the pipeline due to how that field is represented in the Event. This was determined
+to not be worth the cost.
+
+#### Aggregated Servers
+Inherently apiserver aggregates and HA apiserver setups will work off the same dynamic configuration object. If separate
+audit configuration objects are needed they should be configured as static objects on the node and set through the runtime flags. Aggregated servers will implement the same audit handling mechanisms. A conformance test should be provided as assurance. Metadata level
+logging will happen by default at the main api server as it proxies the traffic. The aggregated server will then watch the same
+configuration objects and only log on resource types that it handles. This will duplicate the events sent to the receiving servers
+so they should not expect to key off `{ Audit-ID x Stage }`.
### Risks and Mitigations
@@ -173,6 +224,11 @@ This does open up the attack surface of the audit mechanisms. Having them strict
As a mitigation strategy policy configured through a static file on the api server will not be accessible through the api. This file ensures that an escalation attack cannot tamper with a root configuration, but works independently of any dynamically configured objects.
+#### Leaked Resources
+A user with permissions to create audit policies effectively has read access to the entire cluster (including all secrets data).
+
+A mitigation strategy will be to document the exposure space granted with this resource. Advice will be provided to only allow access to cluster admin level roles.
+
#### Webhook Authentication
With Dynamic Admission control today any authentication mechanism must be provided through a static kubeconfig file on the node. This hinders a lot of the advances in this proposal. All webhooks would require authentication as an unauthenticated endpoint would allow a bad actor to push phony events. Lack of dynamic credential provisioning is problematic to the drop-in extension use case, and difficult to configure.
@@ -182,6 +238,14 @@ It may also be reasonable to provide a dynamic auth configuration from secrets,
This needs further discussion.
+#### Performance
+
+These changes will likely have an O(n) performance impact on the api server per policy. A `DeepCopy` of the event will be
+required for each backend. Also, the request/response object would now be serialized on every [request](https://github.com/kubernetes/kubernetes/blob/cef2d325ee1be894e883d63013f75cfac5cb1246/staging/src/k8s.io/apiserver/pkg/audit/request.go#L150-L152).
+Benchmark testing will be required to understand the scope of the impact and what optimizations may be required. This impact
+is gated by opt-in feature flags, which allows it to move to alpha but these concerns must be tested and reconciled before it
+progresses to beta.
+
## Graduation Criteria
Success will be determined by stability of the provided mechanisms and ease of understanding for the end user.
@@ -193,9 +257,24 @@ Success will be determined by stability of the provided mechanisms and ease of u
- 05/18/2018: initial design
- 06/13/2018: updated design
+- 07/31/2018: dynamic policy addition
## Alternatives
### Generalized Dynamic Configuration
-We could strive for all kube-apiserver flags to be able to be dynamically provisioned in a common way. This is likely a large task and out of the scope of the intentions of this feature.
+We could strive for all kube-apiserver flags to be able to be dynamically provisioned in a common way. This is likely a large
+task and out of the scope of the intentions of this feature.
+
+### Policy Override
+
+There has been discussion over whether the policy configured by api server flags should limit the policies configured dynamically.
+This would allow a cluster admin to narrowly define what is allowed to be logged by the dynamic configurations. While this has upsides
+it was ruled out for the following reasons:
+
+* It would limit user story #4 in the ability to quickly turn up logging when needed
+* It could prove difficult to understand as the policies themselves are fairly complex
+* The use of CRDs would be difficult to bound
+
+The dynamic policy feature is gated by runtime flags. This still provides the cluster provisioner a means to limit audit logging to the
+single runtime object if needed. \ No newline at end of file
diff --git a/sig-azure/0018-20180711-azure-availability-zones.md b/keps/sig-azure/0018-20180711-azure-availability-zones.md
index 99bc76ca..99bc76ca 100644
--- a/sig-azure/0018-20180711-azure-availability-zones.md
+++ b/keps/sig-azure/0018-20180711-azure-availability-zones.md
diff --git a/keps/sig-cloud-provider/0003-testgrid-conformance-e2e.md b/keps/sig-cloud-provider/0018-testgrid-conformance-e2e.md
index e7489ff8..a5b4becb 100644
--- a/keps/sig-cloud-provider/0003-testgrid-conformance-e2e.md
+++ b/keps/sig-cloud-provider/0018-testgrid-conformance-e2e.md
@@ -1,5 +1,5 @@
---
-kep-number: 003
+kep-number: 0018
title: Reporting Conformance Test Results to Testgrid
authors:
- "@andrewsykim"
diff --git a/keps/sig-cloud-provider/0019-cloud-provider-documentation.md b/keps/sig-cloud-provider/0019-cloud-provider-documentation.md
new file mode 100644
index 00000000..0f7bb536
--- /dev/null
+++ b/keps/sig-cloud-provider/0019-cloud-provider-documentation.md
@@ -0,0 +1,145 @@
+---
+kep-number: 0019
+title: Cloud Provider Documentation
+authors:
+ - "@d-nishi"
+ - "@hogepodge"
+owning-sig: sig-cloud-provider
+participating-sigs:
+ - sig-docs
+ - sig-cluster-lifecycle
+ - sig-aws
+ - sig-azure
+ - sig-gcp
+ - sig-openstack
+ - sig-vmware
+reviewers:
+ - "@andrewsykim"
+ - "@calebamiles"
+ - "@hogepodge"
+ - "@jagosan"
+approvers:
+ - "@andrewsykim"
+ - "@hogepodge"
+ - "@jagosan"
+editor: TBD
+status: provisional
+---
+## Transfer the responsibility of maintaining valid documentation for Cloud Provider Code to the Cloud Provider
+
+### Table of Contents
+
+* [Summary](#summary)
+* [Motivation](#motivation)
+ * [Goals](#goals)
+ * [Non-Goals](#non-goals)
+* [Proposal](#proposal)
+* [User Stories [optional]](#user-stories)
+ * [Story 1](#story-1)
+ * [Story 2](#story-2)
+* [Implementation Details/Notes/Constraints [optional]](#implementation-detailsnotesconstraints)
+* [Risks and Mitigations](#risks-and-mitigations)
+* [Graduation Criteria](#graduation-criteria)
+* [Implementation History](#implementation-history)
+* [Alternatives [optional]](#alternatives)
+
+### Summary
+This KEP describes the documentation requirements for both in-tree and out-of-tree cloud controller managers.
+These requirements are meant to capture critical usage documentation that is common between providers, set requirements for individual documentation, and create consistent standards across provider documentation. The scope of this document is limited to in-tree code that interfaces with kube-controller-manager, and out-of-tree code that interfaces with cloud-controller-manager
+
+### Motivation
+Currently documentation for cloud providers for both in-tree and out-of-tree managers is limited in both scope, consistency, and quality. This KEP describes requirements, to be reached in the 1.12 release cycle, to create and maintain consistent documentation across all cloud provider manager code. By establishing these standards, SIG-Cloud-Provider will benefit the user-community by offering a single discoverable source of reliable documentation while relieving the SIG-Docs team from the burden of maintaining out-dated duplicated documentation.
+
+#### Goals
+* Produce a common document that describes how to configure any in-tree cloud provider that can be reused by tools such as kubeadm, to create minimum viable Kubernetes clusters.
+ * Create documentation requirements on how to configure in-tree cloud providers.
+ * Produce documentation for every in-tree cloud provider.
+* Provide a common document that describes how to configure any out-of-tree cloud-controller-manager by provider.
+ * Create documentation requirements on how to configure out-of-tree cloud providers.
+ * Produce documentation for every out-of-tree cloud provider.
+* Maintain developer documentation for anyone wanting to build a new cloud-controller-manager.
+* Generate confidence in SIG-docs to confidently link to SIG-Cloud-Provider documentation for all future releases.
+
+#### Non-Goals
+This KEP is limited to documenting requirements for control plane components for in-tree implementation and cloud-controller-manager for out-of-tree implementation. It is not currently meant to document provider-specific drivers or code (example: Identity & access management: Keystone for Openstack, IAM for AWS etc).
+SIG-Docs is not expected to produce or maintain any of this documentation.
+
+### Proposal
+
+#### Goal 1:
+Produce a common document that describes how to configure any in-tree cloud provider that can be reused by tools such as kubeadm, to create minimum viable Kubernetes clusters.
+
+Kubernetes documentation lists details of current cloud-provider [here](https://kubernetes.io/docs/concepts/cluster-administration/cloud-providers/). Additional documentation [(1),](https://kubernetes.io/docs/concepts/services-networking/service/) [(2)](https://kubernetes.io/docs/tasks/administer-cluster/developing-cloud-controller-manager/) that link to cloud-provider code currently remains detached and poorly maintained.
+
+#### Requirement 1:
+Provide validated manifests for kube-controller-manager, kubelet and kube-apiserver by cloud-provider to enable a Kubernetes administrator to run cloud-provider=<providername> in-tree with kube-controller-manager as is feasible today. This is only relevant to environments where a Kubernetes administrator/user has (or) wants access to the control plane. Environments such as Amazon EKS, Google’s GKE, Azure’s AKS and other such platforms are out of context here.
+
+* Add --cloud-provider=<providername> to the kube-apiserver, kube-controller-manager and every kubelet. These manifests should be regularly updated and listed at this location: https://github.com/kubernetes/kubernetes/tree/master/pkg/cloudprovider/providers/aws/docs/
+ * Example of an [apiserver manifest](https://gist.github.com/d-nishi/1109fec153930e8de04a1bf160cacffb)
+ * Example of [kube-controller-manager](https://gist.github.com/d-nishi/a41691cdf50239986d1e725af4d20033)
+ * Example of a [systemd service for kubelet](https://gist.github.com/d-nishi/289cb82367580eb0cb129c9f967d903d) and [kubelet config](https://gist.github.com/d-nishi/d7f9a1b59c0441d476646dc7cce7e811)
+* Run in-tree cloud-controller-manager as a [daemon-set](https://gist.github.com/d-nishi/38e3b7051029b5d1a1772f3862f62ce9)/deployment/replicaset/static pod on the cluster.
+
+#### Requirement 2:
+Provide validated/tested descriptions with examples of controller features (annotations or tags) that are cloud-provider dependent that can be reused by any Kubernetes administrator to run `cloud-provider-<providername>` in-tree with `kube-controller-manager` as is described in the code <cloudprovider.go> Example: aws.go
+These manifests should be regularly tested and updated post testing in the relevant provider location E.g.: https://github.com/kubernetes/kubernetes/tree/master/pkg/cloudprovider/providers/aws/docs/
+* Node Controller (or) NodeName
+* Service Controller (or) LoadBalancer
+* Volume Controller (or) persistent volume labels controller
+* Other provider-specific-Controller e.g. Route controller for GCP.
+
+#### Goal 2:
+Provide a common document that describes how to configure any out-of-tree cloud-controller-manager by provider.
+
+#### Requirement 1:
+Provide validated manifests for kube-controller-manager, kubelet and kube-apiserver by cloud-provider to enable a Kubernetes administrator to run out-of-tree cloud-controller-manager.
+* Remove --cloud-provider flag from kube-apiserver and kube-controller-manager. Remove this flag from the manifest when the flag is deprecated in a future release. Run kubelet with --cloud-provider=external.
+* Run out-of-tree cloud-controller-manager as a daemon-set/deployment/replicaset/static pod on the cluster.
+
+#### Requirement 2:
+List out the latest annotations or tags that are cloud-provider dependent and will be used by the Kubernetes administrator to run `cloud-provider-<providername>` in-tree with `kube-controller-manager` as is described in the code <cloudprovider.go> Eg. aws.go
+These manifests should be regularly tested and updated in the relevant provider location E.g.: https://github.com/kubernetes/kubernetes/tree/master/pkg/cloudprovider/providers/aws/docs/
+* Node Controller (or) NodeName
+* Service Controller (or) LoadBalancer
+* Volume Controller (or) persistent volume labels controller
+* Other provider-specific-Controller e.g. Route controller for GCP
+
+### User Stories [optional]
+
+#### Story 1
+Sally is a devops engineer wants to run Kubernetes clouds across her on-premise environment and public cloud sites. She wants to use ansible or terraform to bring up Kubernetes v1.11. She references the cloud-provider documentation to understand how to enable in-tree provider code, and has a consistent set of documentation to help her write automation to target each individual cloud.
+
+#### Story 2
+Sam wants to add advanced features to external cloud provider. By consulting the external cloud provider documents, they are able to set up a development and test environment. Where previously documentation was inconsistent and spread across multiple sources, there is a single document that allows them to immediately launch provider code within their target cloud.
+
+### Implementation Details/Notes/Constraints [optional]
+The requirements set forward need to accomplish several things:
+* Identify and abstract common documentation across all providers.
+* Create a consistent format that makes it easy to switch between providers.
+* Allow for provider-specific documentation, quirks, and features.
+
+### Risks and Mitigations
+This proposal relies heavily on individual cloud-provider developers to provide expertise in document generation and maintenance. Documentation can easily drift from implementation, making for a negative user experience.
+To mitigate this, SIG-Cloud-Provider membership will work with developers to keep their documentation up to date. This will include a review of documents along release-cycle boundaries, and adherence to release-cycle deadlines.
+SIG-Cloud-Provider will work with SIG-Docs to establish quality standards and with SIG-Node and SIG Cluster Lifecycle to keep common technical documentation up-to-date.
+
+### Graduation Criteria
+This KEP represents an ongoing effort for the SIG-Cloud-Provider team.
+* Immediate success is measured by the delivery of all goals outlined in the Goals (1) section.
+* Long Term success is measured by the delivery of goals outlined in the Goals (2) section.
+* Long Term success is also measured by the regular upkeep of all goals in Goals (1) and (2) sections.
+
+### Implementation History
+Major milestones in the life cycle of a KEP should be tracked in Implementation History. Major milestones might include:
+* the Summary and Motivation sections being merged signaling SIG acceptance
+* the Proposal section being merged signaling agreement on a proposed design
+* the date implementation started - July 25 2018
+* the first Kubernetes release where an initial version of the KEP was available - v1.12
+* the version of Kubernetes where the KEP graduated to general availability - v1.14
+* the date when the KEP was retired or superseded - NA
+
+### Alternatives [optional]
+The Alternatives section is used to highlight and record other possible approaches to delivering the value proposed by a KEP.
+* SIG docs could tag cloudprovider documentation as a blocking item for Kubernetes releases
+* SIG docs could also assign SIG-<provider> leads to unblock cloudprovider documentation in the planning phase for the release.
+
diff --git a/keps/sig-cloud-provider/providers/0004-cloud-provider-template.md b/keps/sig-cloud-provider/providers/0004-cloud-provider-template.md
index 2a214514..b76cfba5 100644
--- a/keps/sig-cloud-provider/providers/0004-cloud-provider-template.md
+++ b/keps/sig-cloud-provider/providers/0004-cloud-provider-template.md
@@ -67,6 +67,21 @@ For [repository requirements](https://github.com/kubernetes/community/blob/maste
There must be a reasonable amount of user feedback about running Kubernetes for this cloud provider. You may want to link to sources that indicate this such as github issues, product data, customer tesitimonials, etc.
+### Testgrid Integration
+
+Your cloud provider is reporting conformance test results to TestGrid as per the [Reporting Conformance Test Results to Testgrid KEP](https://github.com/kubernetes/community/blob/master/keps/sig-cloud-provider/0003-testgrid-conformance-e2e.md).
+
+### CNCF Certified Kubernetes
+
+Your cloud provider is accepted as part of the [Certified Kubernetes Conformance Program](https://github.com/cncf/k8s-conformance).
+
+### Documentation
+
+There is documentation on running Kubernetes on your cloud provider as per the [cloud provider documentation KEP](https://github.com/kubernetes/community/blob/master/keps/sig-cloud-provider/0004-cloud-provider-documentation.md).
+
+### Technical Leads are members of the Kubernetes Organization
+
+All proposed technical leads for this provider must be members of the Kubernetes organization. Membership is used as a signal for technical ability, commitment to the project, and compliance to the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md) which we believe are important traits for subproject technical leads. Learn more about Kubernetes community membership [here](https://github.com/kubernetes/community/blob/master/community-membership.md).
## Proposal
diff --git a/keps/sig-cloud-provider/providers/0020-cloud-provider-alibaba-cloud.md b/keps/sig-cloud-provider/providers/0020-cloud-provider-alibaba-cloud.md
new file mode 100644
index 00000000..13a5207d
--- /dev/null
+++ b/keps/sig-cloud-provider/providers/0020-cloud-provider-alibaba-cloud.md
@@ -0,0 +1,120 @@
+---
+kep-number: 20
+title: Cloud Provider for Alibaba Cloud
+authors:
+ - "@aoxn"
+owning-sig: sig-cloud-provider
+reviewers:
+ - "@andrewsykim"
+approvers:
+ - "@andrewsykim"
+ - "@hogepodge"
+ - "@jagosan"
+editor: TBD
+creation-date: 2018-06-20
+last-updated: 2018-06-20
+status: provisional
+
+---
+
+# Cloud Provider for Alibaba Cloud
+
+This is a KEP for adding ```Cloud Provider for Alibaba Cloud``` into the Kubernetes ecosystem.
+
+## Table of Contents
+
+* [Table of Contents](#table-of-contents)
+* [Summary](#summary)
+* [Motivation](#motivation)
+ * [Goals](#goals)
+ * [Non-Goals](#non-goals)
+* [Requirements](#requirements)
+* [Proposal](#proposal)
+
+## Summary
+
+Alibaba Cloud provides the Cloud Provider interface implementation as an out-of-tree cloud-controller-manager. It allows Kubernetes clusters to leverage the infrastructure services of Alibaba Cloud .
+It is original open sourced project is [https://github.com/AliyunContainerService/alicloud-controller-manager](https://github.com/AliyunContainerService/alicloud-controller-manager)
+
+## Motivation
+
+### Goals
+
+Cloud Provider of Alibaba Cloud implements interoperability between Kubernetes cluster and Alibaba Cloud. In this project, we will dedicated in:
+- Provide reliable, secure and optimized integration with Alibaba Cloud for Kubernetes
+
+- Help on the improvement for decoupling cloud provider specifics from Kubernetes implementation.
+
+
+
+### Non-Goals
+
+The networking and storage support of Alibaba Cloud for Kubernetes will be provided by other projects.
+
+E.g.
+
+* [Flannel network for Alibaba Cloud VPC](https://github.com/coreos/flannel)
+* [FlexVolume for Alibaba Cloud](https://github.com/AliyunContainerService/flexvolume)
+
+
+## Prerequisites
+
+1. The VPC network is supported in this project. The support for classic network or none ECS environment will be out-of-scope.
+2. When using the instance profile for authentication, an instance role is required to attach to the ECS instance firstly.
+3. Kubernetes version v1.7 or higher
+
+### Repository Requirements
+
+[Alibaba Cloud Controller Manager](https://github.com/AliyunContainerService/alicloud-controller-manager) is a working implementation of the [Kubernetes Cloud Controller Manager](https://kubernetes.io/docs/tasks/administer-cluster/running-cloud-controller/).
+
+The repo requirements is mainly a copy from [cloudprovider KEP](https://github.com/kubernetes/community/blob/master/keps/sig-cloud-provider/0002-cloud-controller-manager.md#repository-requirements). Open the link for more detail.
+
+### User Experience Reports
+As a CNCF Platinum member, Alibaba Cloud is dedicated in providing users with highly secure , stable and efficient cloud service.
+Usage of aliyun container services can be seen from github issues in the existing alicloud controller manager repo: https://github.com/AliyunContainerService/alicloud-controller-manager/issues
+
+### Testgrid Integration
+
+TODO
+
+### CNCF Certified Kubernetes
+
+TODO
+
+### Documentation
+
+TODO
+
+### Technical Leads are members of the Kubernetes Organization
+
+TODO
+
+## Proposal
+
+Here we propose a repository from Kubernetes organization to host our cloud provider implementation. Cloud Provider of Alibaba Cloud would be a subproject under Kubernetes community.
+
+### Subproject Leads
+
+The Leads run operations and processes governing this subproject.
+Leaders:
+- Mark (@denverdino), Alibaba Cloud, Director Engineer
+- Zhimin Tang (@ddbmh), Alibaba Cloud
+- Aoxn (@aoxn), Alibaba Cloud
+
+### Repositories
+
+Cloud Provider of Alibaba Cloud will need a repository under Kubernetes org named ```kubernetes/cloud-provider-alibaba-cloud``` to host any cloud specific code.
+The initial owners will be indicated in the initial OWNER files.
+
+Additionally, SIG-cloud-provider take the ownership of the repo but Alibaba Cloud should have the fully autonomy permission to operator on this subproject.
+
+### Meetings
+
+Cloud Provider meetings is expected to have biweekly. SIG Cloud Provider will provide zoom/youtube channels as required. We will have our first meeting after repo has been settled.
+
+Recommended Meeting Time: Wednesdays at 20:00 PT (Pacific Time) (biweekly). [Convert to your timezone](http://www.thetimezoneconverter.com/?t=20:00&tz=PT%20%28Pacific%20Time%29).
+- Meeting notes and Agenda.
+- Meeting recordings.
+
+
+### Others
diff --git a/keps/sig-cloud-provider/providers/0017-cloud-provider-digitalocean.md b/keps/sig-cloud-provider/providers/0021-cloud-provider-digitalocean.md
index 60e1aad0..c9254fee 100644
--- a/keps/sig-cloud-provider/providers/0017-cloud-provider-digitalocean.md
+++ b/keps/sig-cloud-provider/providers/0021-cloud-provider-digitalocean.md
@@ -1,5 +1,5 @@
---
-kep-number: 17
+kep-number: 21
title: Cloud Provider DigitalOcean
authors:
- "@andrewsykim"
@@ -56,6 +56,22 @@ The existing repository hosting the [DigitalOcean cloud controller manager](http
DigitalOcean recently announced a [Kubernetes offering](https://www.digitalocean.com/products/kubernetes/). Many users have already signed up for early access. DigitalOcean is also a gold member of the CNCF.
+### Testgrid Integration
+
+TODO
+
+### CNCF Certified Kubernetes
+
+TODO
+
+### Documentation
+
+TODO
+
+### Technical Leads are members of the Kubernetes Organization
+
+TODO
+
## Proposal
### Subproject Leads
diff --git a/keps/sig-cloud-provider/providers/cloud-provider-baiducloud.md b/keps/sig-cloud-provider/providers/0022-cloud-provider-baiducloud.md
index d381cb55..2ce5f974 100644
--- a/keps/sig-cloud-provider/providers/cloud-provider-baiducloud.md
+++ b/keps/sig-cloud-provider/providers/0022-cloud-provider-baiducloud.md
@@ -1,3 +1,21 @@
+---
+kep-number: 22
+title: Cloud Provider BaiduCloud
+authors:
+ - "@tizhou86"
+owning-sig: sig-cloud-provider
+reviewers:
+ - "@andrewsykim"
+approvers:
+ - "@andrewsykim"
+ - "@hogepodge"
+ - "@jagosan"
+editor: TBD
+creation-date: 2018-07-23
+last-updated: 2018-07-23
+status: provisional
+
+---
# Cloud Provider BaiduCloud
## Table of Contents
@@ -20,7 +38,7 @@ Baidu is a gold member of CNCF and we have a large team working on Kubernetes an
- Building, deploying, maintaining, supporting, and using Kubernetes on Baidu Cloud Container Engine(CCE) and Baidu Private Cloud(BPC). Both of the project are built on Kubernetes and related CNCF project.
-- Designing, discussing, and maintaining the cloud-provider-baidu repository under Github Kubernetes project.
+- Designing, discussing, and maintaining the cloud-provider-baidu repository under Github Kubernetes project.
### Non-Goals
@@ -47,6 +65,21 @@ CCE-ticket-2: User want to modify the image repository's username.
![CCE-ticket-3](http://agroup-bos.su.bcebos.com/7a4506fcb1fbeeb15c86060cfbb6e69d090c8984)
CCE-ticket-3: User want to have multi-tenant ability in a shared large CCE cluster.
+### Testgrid Integration
+
+TODO
+
+### CNCF Certified Kubernetes
+
+TODO
+
+### Documentation
+
+TODO
+
+### Technical Leads are members of the Kubernetes Organization
+
+TODO
## Proposal
diff --git a/keps/sig-cluster-lifecycle/0008-20180504-kubeadm-config-beta.md b/keps/sig-cluster-lifecycle/0008-kubeadm-config-versioning.md
index f2009693..fe3bde8c 100644
--- a/keps/sig-cluster-lifecycle/0008-20180504-kubeadm-config-beta.md
+++ b/keps/sig-cluster-lifecycle/0008-kubeadm-config-versioning.md
@@ -1,6 +1,6 @@
---
kep-number: draft-20180412
-title: Kubeadm Config Draft
+title: Kubeadm Config versioning
authors:
- "@liztio"
owning-sig: sig-cluster-lifecycle
@@ -18,7 +18,7 @@ replaces: []
superseded-by: []
---
-# Kubeadm Config to Beta
+# Kubeadm Config Versioning
## Table of Contents
diff --git a/keps/sig-cluster-lifecycle/0023-kubeadm-config-v1beta1.md b/keps/sig-cluster-lifecycle/0023-kubeadm-config-v1beta1.md
new file mode 100644
index 00000000..e5b988eb
--- /dev/null
+++ b/keps/sig-cluster-lifecycle/0023-kubeadm-config-v1beta1.md
@@ -0,0 +1,244 @@
+---
+kep-number: 23
+title: Kubeadm config file graduation to v1beta1
+authors:
+ - "@fabriziopandini"
+ - "@luxas"
+owning-sig: sig-cluster-lifecycle
+reviewers:
+ - "@chuckha"
+ - "@detiber"
+ - "@liztio"
+ - "@neolit123"
+approvers:
+ - "@luxas"
+ - "@timothysc"
+editor:
+ - "@fabriziopandini"
+creation-date: 2018-08-01
+last-updated:
+see-also:
+ - KEP 0008
+---
+
+# kubeadm Config file graduation to v1beta1
+
+## Table of Contents
+
+<!-- TOC -->
+
+- [kubeadm Config file graduation to v1beta1](#kubeadm-config-file-graduation-to-v1beta1)
+ - [Table of Contents](#table-of-contents)
+ - [Summary](#summary)
+ - [Motivation](#motivation)
+ - [Goals](#goals)
+ - [Non-Goals](#non-goals)
+ - [Proposal](#proposal)
+ - [Decoupling the kubeadm types from other ComponentConfig types](#decoupling-the-kubeadm-types-from-other-componentconfig-types)
+ - [Re-design how kubeadm configurations are persisted](#re-design-how-kubeadm-configurations-are-persisted)
+ - [Use substructures instead of the current "single flat object"](#use-substructures-instead-of-the-current-single-flat-object)
+ - [Risks and Mitigations](#risks-and-mitigations)
+ - [Graduation Criteria](#graduation-criteria)
+ - [Implementation History](#implementation-history)
+ - [Drawbacks](#drawbacks)
+ - [Alternatives](#alternatives)
+
+<!-- /TOC -->
+
+## Summary
+
+This KEP is meant to describe design goal and the proposed solution for implementing the kubeadm
+config file `v1beta1` version.
+
+The kubeadm config file today is one of the first touch points with Kubernetes for many users and
+also for higher level tools leveraging kubeadm; as a consequence, providing a more stable and
+reliable config file format is considered one of the top priorities for graduating kubeadm itself
+to GA and for the future of kubeadm itself.
+
+## Motivation
+
+The kubeadm config file is a set of YAML documents with versioned structs that follow the Kubernetes
+API conventions with regards to `apiVersion` and `kind`, but these types aren’t exposed as an
+API endpoint in the API server. kubeadm follows the ComponentConfig conventions.
+
+The kubeadm config file was originally created as alternative to command line flags for `kubeadm init`
+and `kubeadm join` actions, but over time the number of options supported by the kubeadm config file
+has grown continuously, while the number of command line flags is intentionally kept under control
+and limited to the most common and simplest use cases.
+
+As a consequence today the kubeadm config file is the only viable way for implementing many use cases
+like e.g. usage of an external etcd, customizing Kubernetes control plane components or kube-proxy
+and kubelet parameters.
+
+Additionally, the kubeadm config file today acts also as a persistent representation of the cluster
+specification that can be used at a any points in time after `kubeadm init` e.g. for executing
+`kubeadm upgrade` actions.
+
+The `v1beta1` version of kubeadm config file is a required, important consolidation step of the
+current config file format, aimed at rationalize the considerable number of attributes added in the
+past, provide a more robust and clean integration with the component config API, address the
+weakness of the current design for representing multi master clusters, and ultimately lay down
+a more sustainable foundation for the evolution of kubeadm itself.
+
+### Goals
+
+- To provide a solution for decoupling the kubeadm ComponentConfig types from other Kubernetes
+ components’ ComponentConfig types.
+
+- To re-design how kubeadm configurations are persisted, addressing the known limitations/weakness
+ of the current design; more in detail, with the aim to provide a better support for high
+ availability clusters, it should be provided a clear separation between cluster wide settings,
+ control plane instance settings, node/kubelet settings and runtime settings (setting used
+ by the current command but not persisted).
+
+- Improve the current kubeadm config file format by using specialized substructures instead
+ of the current "single flat object with only fields".
+
+### Non-Goals
+
+- To steer/coordinate all the implementation efforts for adoption of ComponentConfig across all
+ the different Kubernetes components.
+
+- To define a new home for the Bootstrap Token Go structs
+
+## Proposal
+
+### Decoupling the kubeadm types from other ComponentConfig types
+
+The `v1alpha2` kubeadm config types currently embeds the ComponentConfig for
+kube-proxy and kubelet into the **MasterConfiguration** object; it is expected that also
+ComponentConfig for kube-controller manager, kube-scheduler and kube-apiserver will be added
+in future (non goal of this KEP).
+
+This strong type of dependency - embeds - already created some problem in the v1.10 cycle, and
+despite some improvements, the current situation is not yet ideal, because e.g embedded dependency
+could impact the kubeadm config file life cycle, forcing kubeadm to change its own config file
+version every time one of the embedded component configurations changes.
+
+`v1beta1` config file version is going to address this problem by removing embedded dependencies
+from the _external_ kubeadm config types.
+
+Instead, the user will be allowed to pass other component’s ComponentConfig in separated YAML
+documents inside of the same YAML file given to `kubeadm init --config`.
+
+> please note that the _kubeadm internal config_ will continue to embed components config
+> for the foreseeable future because kubeadm requires the knowledge of such data structures e.g.
+> for propagating network configuration settings to kubelet, setting defaults, validating
+> or manipulating YAML etc.
+
+### Re-design how kubeadm configurations are persisted
+
+Currently the kubeadm **MasterConfiguration** struct is persisted as a whole into the
+`kubeadm-config` ConfigMap, but this situation has well know limitations/weaknesses:
+
+- There is no clear distinction between cluster wide settings (e.g. the kube-apiserver server
+ extra-args that should be consistent across all instances) and control plane instance settings
+ (e.g. the API server advertise address of a kube-apiserver instance).
+ NB. This is currently the main blocker for implementing support for high availability clusters
+ in kubeadm.
+
+- There is no clear distinction between cluster wide settings and node/kubelet specific
+ settings (e.g. the node name of the current node)
+
+- There is no clear distinction between cluster wide settings and runtime configurations
+ (e.g. the token that should be created by kubeadm init)
+
+- ComponentConfigs are stored both in the `kubeadm-config` and in the `kubeproxy-config` and
+ `kubelet-config-vX.Y` ConfigMaps, with the first used as authoritative source for updates,
+ while the others are the one effectively used by components.
+
+Considering all the above points, and also the split of the other components ComponentConfigs
+from the kubeadm **MasterConfiguration** type described in the previous paragraph, it should
+be re-designed how kubeadm configuration is persisted.
+
+The proposed solution leverage on the new kubeadm capability to handle separated YAML documents
+inside of the same kubeadm-config YAML file. More in detail:
+
+- **MasterConfiguration** will be split into two other top-level kinds: **InitConfiguration**
+ and **ClusterConfiguration**.
+- **InitConfiguration** will host the node-specific options like the node name, kubelet CLI flag
+ overrides locally, and ephemeral, init-only configuration like the Bootstrap Tokens to initialize
+ the cluster with.
+- **ClusterConfiguration** will host the cluster-wide configuration, and **ClusterConfiguration**
+ is the object that will be stored in the `kubeadm-config` ConfigMap.
+- Additionally, **NodeConfiguration** will be renamed to **JoinConfiguration** to be consistent with
+ **InitConfiguration** and highlight the coupling to the `kubeadm join` command and its
+ ephemeral nature.
+
+The new `kubeadm init` flow configuration-wise is summarized by the attached schema.
+
+![kubeadm-init](0023-kubeadm-init.png)
+
+[link](0023-kubeadm-init.png)
+
+As a consequence, also how the kubeadm configuration is consumed by kubeadm commands should
+be adapted as described by following schemas:
+
+- [kubeadm join and kubeadm join --master](0023-kubeadm-join.png)
+- [kubeadm upgrade apply](0023-kubeadm-upgrade-apply.png)
+- [kubeadm upgrade node](0023-kubeadm-upgrade-node.png)
+- [kubeadm reset](0023-kubeadm-reset.png)
+
+### Use substructures instead of the current "single flat object"
+
+Even if with few exceptions, the kubeadm **MasterConfiguration** and **NodeConfiguration** types
+in `v1alpha1` and `v1alpha2` are basically single, flat objects that holds all the configuration
+settings, and this fact e.g. doesn’t allow to a clearly/easily understand which configuration
+options relate to each other or apply to the different control plane components.
+
+While redesigning the config file for addressing the main issues described in previous paragraphs,
+kubeadm will provide also a cleaner representation of attributes belonging to single component/used
+for a specific goal by creating dedicated objects, similarly to what’s already improved for
+etcd configuration in the `v1alpha2` version.
+
+### Risks and Mitigations
+
+This is a change mostly driven by kubeadm maintainers, without an explicit buy-in from customers
+using kubeadm in large installations
+
+The differences from the current config file are relevant and kubeadm users can get confused.
+
+Above risks will be mitigated by:
+
+- providing a fully automated conversion mechanism and a set of utilities under the kubeadm
+ config command (a goal and requirement for this KEP)
+- The new structure could potentially make configuration options less discoverable as they’re
+ buried deeper in the code. Sufficient documentation for common and advanced tasks will help
+ mitigate this.
+- writing a blog post before the release cut
+- providing adequate instructions in the release notes
+
+Impact on the code are considerable.
+
+This risk will be mitigated by implementing the change according to following approach:
+
+- introducing a new `v1alpha3` config file as a intermediate step before `v1beta1`
+- implementing all the new machinery e.g. for managing multi YAML documents in one file, early
+ in the cycle
+- ensuring full test coverage about conversion from `v1alpha2` to `v1alpha3`, early in the cycle
+- postponing the final rename from `v1alpha3` to `v1beta1` only when all the graduation criteria
+ are met, or if this is not the case, iterating the above steps in following release cycles
+
+## Graduation Criteria
+
+The kubeadm API group primarily used in kubeadm is `v1beta1` or higher. There is an upgrade path
+from earlier versions. The primary kinds that can be serialized/deserialized are `InitConfiguration`,
+`JoinConfiguration` and `ClusterConfiguration`. ComponentConfig structs for other Kubernetes
+components are supplied besides `ClusterConfiguration` in different YAML documents.
+SIG Cluster Life cycle is happy with the structure of the types.
+
+## Implementation History
+
+TBD
+
+## Drawbacks
+
+The differences from the current kubeadm config are relevant and kubeadm users can get confused.
+
+The impacts on the current codebase are considerable it is required an high commitment from
+the SIG. This comes with a real opportunity cost.
+
+## Alternatives
+
+Graduate kubeadm GA with the current kubeadm config and eventually change afterward
+(respecting GA contract rules).
diff --git a/keps/sig-cluster-lifecycle/0023-kubeadm-init.png b/keps/sig-cluster-lifecycle/0023-kubeadm-init.png
new file mode 100644
index 00000000..9f7e97f1
--- /dev/null
+++ b/keps/sig-cluster-lifecycle/0023-kubeadm-init.png
Binary files differ
diff --git a/keps/sig-cluster-lifecycle/0023-kubeadm-join.png b/keps/sig-cluster-lifecycle/0023-kubeadm-join.png
new file mode 100644
index 00000000..3a4b5d43
--- /dev/null
+++ b/keps/sig-cluster-lifecycle/0023-kubeadm-join.png
Binary files differ
diff --git a/keps/sig-cluster-lifecycle/0023-kubeadm-reset.png b/keps/sig-cluster-lifecycle/0023-kubeadm-reset.png
new file mode 100644
index 00000000..cedb3a71
--- /dev/null
+++ b/keps/sig-cluster-lifecycle/0023-kubeadm-reset.png
Binary files differ
diff --git a/keps/sig-cluster-lifecycle/0023-kubeadm-upgrade-apply.png b/keps/sig-cluster-lifecycle/0023-kubeadm-upgrade-apply.png
new file mode 100644
index 00000000..a573f2bb
--- /dev/null
+++ b/keps/sig-cluster-lifecycle/0023-kubeadm-upgrade-apply.png
Binary files differ
diff --git a/keps/sig-cluster-lifecycle/0023-kubeadm-upgrade-node.png b/keps/sig-cluster-lifecycle/0023-kubeadm-upgrade-node.png
new file mode 100644
index 00000000..2d6cbe2a
--- /dev/null
+++ b/keps/sig-cluster-lifecycle/0023-kubeadm-upgrade-node.png
Binary files differ
diff --git a/keps/sig-contributor-experience/0007-20180403-community-forum.md b/keps/sig-contributor-experience/0007-20180403-community-forum.md
index a274f3ee..51aff94f 100644
--- a/keps/sig-contributor-experience/0007-20180403-community-forum.md
+++ b/keps/sig-contributor-experience/0007-20180403-community-forum.md
@@ -16,7 +16,7 @@ approvers:
editor: TBD
creation-date: 2018-04-03
last-updated: 2018-04-17
-status: provisional
+status: implemented
---
# A community forum for Kubernetes
diff --git a/keps/sig-node/0014-runtime-class.md b/keps/sig-node/0014-runtime-class.md
new file mode 100644
index 00000000..cf067b5d
--- /dev/null
+++ b/keps/sig-node/0014-runtime-class.md
@@ -0,0 +1,397 @@
+---
+kep-number: 14
+title: Runtime Class
+authors:
+ - "@tallclair"
+owning-sig: sig-node
+participating-sigs:
+ - sig-architecture
+reviewers:
+ - TBD
+approvers:
+ - TBD
+editor: TBD
+creation-date: 2018-06-19
+status: provisional
+---
+
+# Runtime Class
+
+## Table of Contents
+
+* [Summary](#summary)
+* [Motivation](#motivation)
+ * [Goals](#goals)
+ * [Non\-Goals](#non-goals)
+ * [User Stories](#user-stories)
+* [Proposal](#proposal)
+ * [API](#api)
+ * [Runtime Handler](#runtime-handler)
+ * [Versioning, Updates, and Rollouts](#versioning-updates-and-rollouts)
+ * [Implementation Details](#implementation-details)
+ * [Risks and Mitigations](#risks-and-mitigations)
+* [Graduation Criteria](#graduation-criteria)
+* [Implementation History](#implementation-history)
+* [Appendix](#appendix)
+ * [Examples of runtime variation](#examples-of-runtime-variation)
+
+## Summary
+
+`RuntimeClass` is a new cluster-scoped resource that surfaces container runtime properties to the
+control plane. RuntimeClasses are assigned to pods through a `runtimeClass` field on the
+`PodSpec`. This provides a new mechanism for supporting multiple runtimes in a cluster and/or node.
+
+## Motivation
+
+There is growing interest in using different runtimes within a cluster. [Sandboxes][] are the
+primary motivator for this right now, with both Kata containers and gVisor looking to integrate with
+Kubernetes. Other runtime models such as Windows containers or even remote runtimes will also
+require support in the future. RuntimeClass provides a way to select between different runtimes
+configured in the cluster and surface their properties (both to the cluster & the user).
+
+In addition to selecting the runtime to use, supporting multiple runtimes raises other problems to
+the control plane level, including: accounting for runtime overhead, scheduling to nodes that
+support the runtime, and surfacing which optional features are supported by different
+runtimes. Although these problems are not tackled by this initial proposal, RuntimeClass provides a
+cluster-scoped resource tied to the runtime that can help solve these problems in a future update.
+
+[Sandboxes]: https://docs.google.com/document/d/1QQ5u1RBDLXWvC8K3pscTtTRThsOeBSts_imYEoRyw8A/edit
+
+### Goals
+
+- Provide a mechanism for surfacing container runtime properties to the control plane
+- Support multiple runtimes per-cluster, and provide a mechanism for users to select the desired
+ runtime
+
+### Non-Goals
+
+- RuntimeClass is NOT RuntimeComponentConfig.
+- RuntimeClass is NOT a general policy mechanism.
+- RuntimeClass is NOT "NodeClass". Although different nodes may run different runtimes, in general
+ RuntimeClass should not be a cross product of runtime properties and node properties.
+
+The following goals are out-of-scope for the initial implementation, but may be explored in a future
+iteration:
+
+- Surfacing support for optional features by runtimes, and surfacing errors caused by
+ incompatible features & runtimes earlier.
+- Automatic runtime or feature discovery - initially RuntimeClasses are manually defined (by the
+ cluster admin or provider), and are asserted to be an accurate representation of the runtime.
+- Scheduling in heterogeneous clusters - it is possible to operate a heterogeneous cluster
+ (different runtime configurations on different nodes) through scheduling primitives like
+ `NodeAffinity` and `Taints+Tolerations`, but the user is responsible for setting these up and
+ automatic runtime-aware scheduling is out-of-scope.
+- Define standardized or conformant runtime classes - although I would like to declare some
+ predefined RuntimeClasses with specific properties, doing so is out-of-scope for this initial KEP.
+- [Pod Overhead][] - Although RuntimeClass is likely to be the configuration mechanism of choice,
+ the details of how pod resource overhead will be implemented is out of scope for this KEP.
+- Provide a mechanism to dynamically register or provision additional runtimes.
+- Requiring specific RuntimeClasses according to policy. This should be addressed by other
+ cluster-level policy mechanisms, such as PodSecurityPolicy.
+- "Fitting" a RuntimeClass to pod requirements - In other words, specifying runtime properties and
+ letting the system match an appropriate RuntimeClass, rather than explicitly assigning a
+ RuntimeClass by name. This approach can increase portability, but can be added seamlessly in a
+ future iteration.
+
+[Pod Overhead]: https://docs.google.com/document/d/1EJKT4gyl58-kzt2bnwkv08MIUZ6lkDpXcxkHqCvvAp4/edit
+
+### User Stories
+
+- As a cluster operator, I want to provide multiple runtime options to support a wide variety of
+ workloads. Examples include native linux containers, "sandboxed" containers, and windows
+ containers.
+- As a cluster operator, I want to provide stable rolling upgrades of runtimes. For
+ example, rolling out an update with backwards incompatible changes or previously unsupported
+ features.
+- As an application developer, I want to select the runtime that best fits my workload.
+- As an application developer, I don't want to study the nitty-gritty details of different runtime
+ implementations, but rather choose from pre-configured classes.
+- As an application developer, I want my application to be portable across clusters that use similar
+ but different variants of a "class" of runtimes.
+
+## Proposal
+
+The initial design includes:
+
+- `RuntimeClass` API resource definition
+- `RuntimeClass` pod field for specifying the RuntimeClass the pod should be run with
+- Kubelet implementation for fetching & interpreting the RuntimeClass
+- CRI API & implementation for passing along the [RuntimeHandler](#runtime-handler).
+
+### API
+
+`RuntimeClass` is a new cluster-scoped resource in the `node.k8s.io` API group.
+
+> _The `node.k8s.io` API group would eventually hold the Node resource when `core` is retired.
+> Alternatives considered: `runtime.k8s.io`, `cluster.k8s.io`_
+
+_(This is a simplified declaration, syntactic details will be covered in the API PR review)_
+
+```go
+type RuntimeClass struct {
+ metav1.TypeMeta
+ // ObjectMeta minimally includes the RuntimeClass name, which is used to reference the class.
+ // Namespace should be left blank.
+ metav1.ObjectMeta
+
+ Spec RuntimeClassSpec
+}
+
+type RuntimeClassSpec struct {
+ // RuntimeHandler specifies the underlying runtime the CRI calls to handle pod and/or container
+ // creation. The possible values are specific to a given configuration & CRI implementation.
+ // The empty string is equivalent to the default behavior.
+ // +optional
+ RuntimeHandler string
+}
+```
+
+The runtime is selected by the pod by specifying the RuntimeClass in the PodSpec. Once the pod is
+scheduled, the RuntimeClass cannot be changed.
+
+```go
+type PodSpec struct {
+ ...
+ // RuntimeClassName refers to a RuntimeClass object with the same name,
+ // which should be used to run this pod.
+ // +optional
+ RuntimeClassName string
+ ...
+}
+```
+
+The `legacy` RuntimeClass name is reserved. The legacy RuntimeClass is defined to be fully backwards
+compatible with current Kubernetes. This means that the legacy runtime does not specify any
+RuntimeHandler or perform any feature validation (all features are "supported").
+
+```go
+const (
+ // RuntimeClassNameLegacy is a reserved RuntimeClass name. The legacy
+ // RuntimeClass does not specify a runtime handler or perform any
+ // feature validation.
+ RuntimeClassNameLegacy = "legacy"
+)
+```
+
+An unspecified RuntimeClassName `""` is equivalent to the `legacy` RuntimeClass, though the field is
+not defaulted to `legacy` (to leave room for configurable defaults in a future update).
+
+#### Examples
+
+Suppose we operate a cluster that lets users choose between native runc containers, and gvisor and
+kata-container sandboxes. We might create the following runtime classes:
+
+```yaml
+kind: RuntimeClass
+apiVersion: node.k8s.io/v1alpha1
+metadata:
+ name: native # equivalent to 'legacy' for now
+spec:
+ runtimeHandler: runc
+---
+kind: RuntimeClass
+apiVersion: node.k8s.io/v1alpha1
+metadata:
+ name: gvisor
+spec:
+ runtimeHandler: gvisor
+----
+kind: RuntimeClass
+apiVersion: node.k8s.io/v1alpha1
+metadata:
+ name: kata-containers
+spec:
+ runtimeHandler: kata-containers
+----
+# provides the default sandbox runtime when users don't care about which they're getting.
+kind: RuntimeClass
+apiVersion: node.k8s.io/v1alpha1
+metadata:
+ name: sandboxed
+spec:
+ runtimeHandler: gvisor
+```
+
+Then when a user creates a workload, they can choose the desired runtime class to use (or not, if
+they want the default).
+
+```yaml
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+ name: sandboxed-nginx
+spec:
+ replicas: 2
+ selector:
+ matchLabels:
+ app: sandboxed-nginx
+ template:
+ metadata:
+ labels:
+ app: sandboxed-nginx
+ spec:
+ runtimeClassName: sandboxed # <---- Reference the desired RuntimeClass
+ containers:
+ - name: nginx
+ image: nginx
+ ports:
+ - containerPort: 80
+ protocol: TCP
+```
+
+#### Runtime Handler
+
+The `RuntimeHandler` is passed to the CRI as part of the `RunPodSandboxRequest`:
+
+```proto
+message RunPodSandboxRequest {
+ // Configuration for creating a PodSandbox.
+ PodSandboxConfig config = 1;
+ // Named runtime configuration to use for this PodSandbox.
+ string RuntimeHandler = 2;
+}
+```
+
+The RuntimeHandler is provided as a mechanism for CRI implementations to select between different
+predetermined configurations. The initial use case is replacing the experimental pod annotations
+currently used for selecting a sandboxed runtime by various CRI implementations:
+
+| CRI Runtime | Pod Annotation |
+| ------------|-------------------------------------------------------------|
+| CRIO | io.kubernetes.cri-o.TrustedSandbox: "false" |
+| containerd | io.kubernetes.cri.untrusted-workload: "true" |
+| frakti | runtime.frakti.alpha.kubernetes.io/OSContainer: "true"<br>runtime.frakti.alpha.kubernetes.io/Unikernel: "true" |
+| windows | experimental.windows.kubernetes.io/isolation-type: "hyperv" |
+
+These implementations could stick with scheme ("trusted" and "untrusted"), but the preferred
+approach is a non-binary one wherein arbitrary handlers can be configured with a name that can be
+matched against the specified RuntimeHandler. For example, containerd might have a configuration
+corresponding to a "kata-runtime" handler:
+
+```
+[plugins.cri.containerd.kata-runtime]
+ runtime_type = "io.containerd.runtime.v1.linux"
+ runtime_engine = "/opt/kata/bin/kata-runtime"
+ runtime_root = ""
+```
+
+This non-binary approach is more flexible: it can still map to a binary RuntimeClass selection
+(e.g. `sandboxed` or `untrusted` RuntimeClasses), but can also support multiple parallel sandbox
+types (e.g. `kata-containers` or `gvisor` RuntimeClasses).
+
+### Versioning, Updates, and Rollouts
+
+Getting upgrades and rollouts right is a very nuanced and complicated problem. For the initial alpha
+implementation, we will kick the can down the road by making the `RuntimeClassSpec` **immutable**,
+thereby requiring changes to be pushed as a newly named RuntimeClass instance. This means that pods
+must be updated to reference the new RuntimeClass, and comes with the advantage of native support
+for rolling updates through the same mechanisms as any other application update. The
+`RuntimeClassName` pod field is also immutable post scheduling.
+
+This conservative approach is preferred since it's much easier to relax constraints in a backwards
+compatible way than tighten them. We should revisit this decision prior to graduating RuntimeClass
+to beta.
+
+### Implementation Details
+
+The Kubelet uses an Informer to keep a local cache of all RuntimeClass objects. When a new pod is
+added, the Kubelet resolves the Pod's RuntimeClass against the local RuntimeClass cache. Once
+resolved, the RuntimeHandler field is passed to the CRI as part of the
+[`RunPodSandboxRequest`][]. At that point, the interpretation of the RuntimeHandler is left to the
+CRI implementation, but it should be cached if needed for subsequent calls.
+
+If the RuntimeClass cannot be resolved (e.g. doesn't exist) at Pod creation, then the request will
+be rejected in admission (controller to be detailed in a following update). If the RuntimeClass
+cannot be resolved by the Kubelet when `RunPodSandbox` should be called, then the Kubelet will fail
+the Pod. The admission check on a replica recreation will prevent the scheduler from thrashing. If
+the `RuntimeHandler` is not recognized by the CRI implementation, then `RunPodSandbox` will return
+an error.
+
+[RunPodSandboxRequest]: https://github.com/kubernetes/kubernetes/blob/b05a61e299777c2030fbcf27a396aff21b35f01b/pkg/kubelet/apis/cri/runtime/v1alpha2/api.proto#L344
+
+### Risks and Mitigations
+
+**Scope creep.** RuntimeClass has a fairly broad charter, but it should not become a default
+dumping ground for every new feature exposed by the node. For each feature, careful consideration
+should be made about whether it belongs on the Pod, Node, RuntimeClass, or some other resource. The
+[non-goals](#non-goals) should be kept in mind when considering RuntimeClass features.
+
+**Becoming a general policy mechanism.** RuntimeClass should not be used a replacement for
+PodSecurityPolicy. The use cases for defining multiple RuntimeClasses for the same underlying
+runtime implementation should be extremely limited (generally only around updates & rollouts). To
+enforce this, no authorization or restrictions are placed directly on RuntimeClass use; in order to
+restrict a user to a specific RuntimeClass, you must use another policy mechanism such as
+PodSecurityPolicy.
+
+**Pushing complexity to the user.** RuntimeClass is a new resource in order to hide the complexity
+of runtime configuration from most users (aside from the cluster admin or provisioner). However, we
+are still side-stepping the issue of precisely defining specific types of runtimes like
+"Sandboxed". However, it is still up for debate whether precisely defining such runtime categories
+is even possible. RuntimeClass allows us to decouple this specification from the implementation, but
+it is still something I hope we can address in a future iteration through the concept of pre-defined
+or "conformant" RuntimeClasses.
+
+**Non-portability.** We are already in a world of non-portability for many features (see [examples
+of runtime variation](#examples-of-runtime-variation). Future improvements to RuntimeClass can help
+address this issue by formally declaring supported features, or matching the runtime that supports a
+given workload automitaclly. Another issue is that pods need to refer to a RuntimeClass by name,
+which may not be defined in every cluster. This is something that can be addressed through
+pre-defined runtime classes (see previous risk), and/or by "fitting" pod requirements to compatible
+RuntimeClasses.
+
+## Graduation Criteria
+
+Alpha:
+
+- Everything described in the current proposal:
+ - Introduce the RuntimeClass API resource
+ - Add a RuntimeClassName field to the PodSpec
+ - Add a RuntimeHandler field to the CRI `RunPodSandboxRequest`
+ - Lookup the RuntimeClass for pods & plumb through the RuntimeHandler in the Kubelet (feature
+ gated)
+- RuntimeClass support in at least one CRI runtime & dockershim
+ - Runtime Handlers can be statically configured by the runtime, and referenced via RuntimeClass
+ - An error is reported when the handler or is unknown or unsupported
+- Testing
+ - [CRI validation tests][cri-validation]
+ - Kubernetes E2E tests (only validating single runtime handler cases)
+
+[cri-validation]: https://github.com/kubernetes-incubator/cri-tools/blob/master/docs/validation.md
+
+Beta:
+
+- Most runtimes support RuntimeClass, and the current [untrusted annotations](#runtime-handler) are
+ deprecated.
+- RuntimeClasses are configured in the E2E environment with test coverage of a non-legacy RuntimeClass
+- The update & upgrade story is revisited, and a longer-term approach is implemented as necessary.
+- The cluster admin can choose which RuntimeClass is the default in a cluster.
+- Additional requirements TBD
+
+## Implementation History
+
+- 2018-06-11: SIG-Node decision to move forward with proposal
+- 2018-06-19: Initial KEP published.
+
+## Appendix
+
+### Examples of runtime variation
+
+- Linux Security Module (LSM) choice - Kubernetes supports both AppArmor & SELinux options on pods,
+ but those are mutually exclusive, and support of either is not required by the runtime. The
+ default configuration is also not well defined.
+- Seccomp-bpf - Kubernetes has alpha support for specifying a seccomp profile, but the default is
+ defined by the runtime, and support is not guaranteed.
+- Windows containers - isolation features are very OS-specific, and most of the current features are
+ limited to linux. As we build out Windows container support, we'll need to add windows-specific
+ features as well.
+- Host namespaces (Network,PID,IPC) may not be supported by virtualization-based runtimes
+ (e.g. Kata-containers & gVisor).
+- Per-pod and Per-container resource overhead varies by runtime.
+- Device support (e.g. GPUs) varies wildly by runtime & nodes.
+- Supported volume types varies by node - it remains TBD whether this information belongs in
+ RuntimeClass.
+- The list of default capabilities is defined in Docker, but not Kubernetes. Future runtimes may
+ have differing defaults, or support a subset of capabilities.
+- `Privileged` mode is not well defined, and thus may have differing implementations.
+- Support for resource over-commit and dynamic resource sizing (e.g. Burstable vs Guaranteed
+ workloads)
diff --git a/sig-api-machinery/README.md b/sig-api-machinery/README.md
index 33020486..74e50e13 100644
--- a/sig-api-machinery/README.md
+++ b/sig-api-machinery/README.md
@@ -107,15 +107,15 @@ The google groups contain the archive of Github team notifications.
Mentioning a team on Github will CC its group.
Monitor these for Github activity if you are not a member of the team.
-| Team Name | Details | Google Groups | Description |
-| --------- |:-------:|:-------------:| ----------- |
-| @kubernetes/sig-api-machinery-api-reviews | [link](https://github.com/orgs/kubernetes/teams/sig-api-machinery-api-reviews) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-api-machinery-api-reviews) | API Changes and Reviews |
-| @kubernetes/sig-api-machinery-bugs | [link](https://github.com/orgs/kubernetes/teams/sig-api-machinery-bugs) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-api-machinery-bugs) | Bug Triage and Troubleshooting |
-| @kubernetes/sig-api-machinery-feature-requests | [link](https://github.com/orgs/kubernetes/teams/sig-api-machinery-feature-requests) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-api-machinery-feature-requests) | Feature Requests |
-| @kubernetes/sig-api-machinery-misc | [link](https://github.com/orgs/kubernetes/teams/sig-api-machinery-misc) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-api-machinery-misc) | General Discussion |
-| @kubernetes/sig-api-machinery-pr-reviews | [link](https://github.com/orgs/kubernetes/teams/sig-api-machinery-pr-reviews) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-api-machinery-pr-reviews) | PR Reviews |
-| @kubernetes/sig-api-machinery-proposals | [link](https://github.com/orgs/kubernetes/teams/sig-api-machinery-proposals) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-api-machinery-proposals) | Design Proposals |
-| @kubernetes/sig-api-machinery-test-failures | [link](https://github.com/orgs/kubernetes/teams/sig-api-machinery-test-failures) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-api-machinery-test-failures) | Test Failures and Triage |
+| Team Name | Details | Description |
+| --------- |:-------:| ----------- |
+| @kubernetes/sig-api-machinery-api-reviews | [link](https://github.com/orgs/kubernetes/teams/sig-api-machinery-api-reviews) | API Changes and Reviews |
+| @kubernetes/sig-api-machinery-bugs | [link](https://github.com/orgs/kubernetes/teams/sig-api-machinery-bugs) | Bug Triage and Troubleshooting |
+| @kubernetes/sig-api-machinery-feature-requests | [link](https://github.com/orgs/kubernetes/teams/sig-api-machinery-feature-requests) | Feature Requests |
+| @kubernetes/sig-api-machinery-misc | [link](https://github.com/orgs/kubernetes/teams/sig-api-machinery-misc) | General Discussion |
+| @kubernetes/sig-api-machinery-pr-reviews | [link](https://github.com/orgs/kubernetes/teams/sig-api-machinery-pr-reviews) | PR Reviews |
+| @kubernetes/sig-api-machinery-proposals | [link](https://github.com/orgs/kubernetes/teams/sig-api-machinery-proposals) | Design Proposals |
+| @kubernetes/sig-api-machinery-test-failures | [link](https://github.com/orgs/kubernetes/teams/sig-api-machinery-test-failures) | Test Failures and Triage |
<!-- BEGIN CUSTOM CONTENT -->
## Additional links
diff --git a/sig-apps/README.md b/sig-apps/README.md
index 84c3323d..7c27aebe 100644
--- a/sig-apps/README.md
+++ b/sig-apps/README.md
@@ -78,15 +78,15 @@ The google groups contain the archive of Github team notifications.
Mentioning a team on Github will CC its group.
Monitor these for Github activity if you are not a member of the team.
-| Team Name | Details | Google Groups | Description |
-| --------- |:-------:|:-------------:| ----------- |
-| @kubernetes/sig-apps-api-reviews | [link](https://github.com/orgs/kubernetes/teams/sig-apps-api-reviews) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-apps-api-reviews) | API Changes and Reviews |
-| @kubernetes/sig-apps-bugs | [link](https://github.com/orgs/kubernetes/teams/sig-apps-bugs) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-apps-bugs) | Bug Triage and Troubleshooting |
-| @kubernetes/sig-apps-feature-requests | [link](https://github.com/orgs/kubernetes/teams/sig-apps-feature-requests) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-apps-feature-requests) | Feature Requests |
-| @kubernetes/sig-apps-misc | [link](https://github.com/orgs/kubernetes/teams/sig-apps-misc) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-apps-misc) | General Discussion |
-| @kubernetes/sig-apps-pr-reviews | [link](https://github.com/orgs/kubernetes/teams/sig-apps-pr-reviews) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-apps-pr-reviews) | PR Reviews |
-| @kubernetes/sig-apps-proposals | [link](https://github.com/orgs/kubernetes/teams/sig-apps-proposals) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-apps-proposals) | Design Proposals |
-| @kubernetes/sig-apps-test-failures | [link](https://github.com/orgs/kubernetes/teams/sig-apps-test-failures) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-apps-test-failures) | Test Failures and Triage |
+| Team Name | Details | Description |
+| --------- |:-------:| ----------- |
+| @kubernetes/sig-apps-api-reviews | [link](https://github.com/orgs/kubernetes/teams/sig-apps-api-reviews) | API Changes and Reviews |
+| @kubernetes/sig-apps-bugs | [link](https://github.com/orgs/kubernetes/teams/sig-apps-bugs) | Bug Triage and Troubleshooting |
+| @kubernetes/sig-apps-feature-requests | [link](https://github.com/orgs/kubernetes/teams/sig-apps-feature-requests) | Feature Requests |
+| @kubernetes/sig-apps-misc | [link](https://github.com/orgs/kubernetes/teams/sig-apps-misc) | General Discussion |
+| @kubernetes/sig-apps-pr-reviews | [link](https://github.com/orgs/kubernetes/teams/sig-apps-pr-reviews) | PR Reviews |
+| @kubernetes/sig-apps-proposals | [link](https://github.com/orgs/kubernetes/teams/sig-apps-proposals) | Design Proposals |
+| @kubernetes/sig-apps-test-failures | [link](https://github.com/orgs/kubernetes/teams/sig-apps-test-failures) | Test Failures and Triage |
<!-- BEGIN CUSTOM CONTENT -->
diff --git a/sig-architecture/README.md b/sig-architecture/README.md
index d38ede92..3955ccb9 100644
--- a/sig-architecture/README.md
+++ b/sig-architecture/README.md
@@ -42,6 +42,9 @@ The following subprojects are owned by sig-architecture:
- **steering**
- Owners:
- https://raw.githubusercontent.com/kubernetes/steering/master/OWNERS
+- **architecture-tracking**
+ - Owners:
+ - https://raw.githubusercontent.com/kubernetes-sigs/architecture-tracking/master/OWNERS
## GitHub Teams
@@ -52,21 +55,28 @@ The google groups contain the archive of Github team notifications.
Mentioning a team on Github will CC its group.
Monitor these for Github activity if you are not a member of the team.
-| Team Name | Details | Google Groups | Description |
-| --------- |:-------:|:-------------:| ----------- |
-| @kubernetes/sig-architecture-api-reviews | [link](https://github.com/orgs/kubernetes/teams/sig-architecture-api-reviews) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-architecture-api-reviews) | API Changes and Reviews |
-| @kubernetes/sig-architecture-bugs | [link](https://github.com/orgs/kubernetes/teams/sig-architecture-bugs) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-architecture-bugs) | Bug Triage and Troubleshooting |
-| @kubernetes/sig-architecture-feature-requests | [link](https://github.com/orgs/kubernetes/teams/sig-architecture-feature-requests) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-architecture-feature-requests) | Feature Requests |
-| @kubernetes/sig-architecture-misc-use-only-as-a-last-resort | [link](https://github.com/orgs/kubernetes/teams/sig-architecture-misc-use-only-as-a-last-resort) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-architecture-misc-use-only-as-a-last-resort) | General Discussion |
-| @kubernetes/sig-architecture-pr-reviews | [link](https://github.com/orgs/kubernetes/teams/sig-architecture-pr-reviews) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-architecture-pr-reviews) | PR Reviews |
-| @kubernetes/sig-architecture-proposals | [link](https://github.com/orgs/kubernetes/teams/sig-architecture-proposals) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-architecture-proposals) | Design Proposals |
-| @kubernetes/sig-architecture-test-failures | [link](https://github.com/orgs/kubernetes/teams/sig-architecture-test-failures) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-architecture-test-failures) | Test Failures and Triage |
+| Team Name | Details | Description |
+| --------- |:-------:| ----------- |
+| @kubernetes/sig-architecture-api-reviews | [link](https://github.com/orgs/kubernetes/teams/sig-architecture-api-reviews) | API Changes and Reviews |
+| @kubernetes/sig-architecture-bugs | [link](https://github.com/orgs/kubernetes/teams/sig-architecture-bugs) | Bug Triage and Troubleshooting |
+| @kubernetes/sig-architecture-feature-requests | [link](https://github.com/orgs/kubernetes/teams/sig-architecture-feature-requests) | Feature Requests |
+| @kubernetes/sig-architecture-misc-use-only-as-a-last-resort | [link](https://github.com/orgs/kubernetes/teams/sig-architecture-misc-use-only-as-a-last-resort) | General Discussion |
+| @kubernetes/sig-architecture-pr-reviews | [link](https://github.com/orgs/kubernetes/teams/sig-architecture-pr-reviews) | PR Reviews |
+| @kubernetes/sig-architecture-proposals | [link](https://github.com/orgs/kubernetes/teams/sig-architecture-proposals) | Design Proposals |
+| @kubernetes/sig-architecture-test-failures | [link](https://github.com/orgs/kubernetes/teams/sig-architecture-test-failures) | Test Failures and Triage |
<!-- BEGIN CUSTOM CONTENT -->
## Additional materials
* [Charter](charter.md)
-* [Backlog](backlog.md)
+
+## Processes owned and tracked by the SIG
+
+[Architecture Tracking Repository](https://github.com/kubernetes-sigs/architecture-tracking/)
+
+* [API Reviews](https://github.com/kubernetes-sigs/architecture-tracking/projects/3)
+* [KEP Reviews](https://github.com/kubernetes-sigs/architecture-tracking/projects/2)
+* [Conformance Test Review](https://github.com/kubernetes-sigs/architecture-tracking/projects/1)
<!-- END CUSTOM CONTENT -->
diff --git a/sig-auth/README.md b/sig-auth/README.md
index 0ba6d2a1..c3b26dd1 100644
--- a/sig-auth/README.md
+++ b/sig-auth/README.md
@@ -10,6 +10,8 @@ To understand how this file is generated, see https://git.k8s.io/community/gener
Covers improvements to Kubernetes authorization, authentication, and cluster security policy.
+The [charter](charter.md) defines the scope and governance of the Auth Special Interest Group.
+
## Meetings
* Regular SIG Meeting: [Wednesdays at 11:00 PT (Pacific Time)](https://docs.google.com/document/d/1FQx0BPlkkl1Bn0c9ocVBxYIKojpmrS1CFP5h0DI68AE/edit) (biweekly). [Convert to your timezone](http://www.thetimezoneconverter.com/?t=11:00&tz=PT%20%28Pacific%20Time%29).
* [Meeting notes and Agenda](https://docs.google.com/document/d/1woLGRoONE3EBVx-wTb4pvp4CI7tmLZ6lS26VTbosLKM/edit#).
@@ -44,15 +46,15 @@ The google groups contain the archive of Github team notifications.
Mentioning a team on Github will CC its group.
Monitor these for Github activity if you are not a member of the team.
-| Team Name | Details | Google Groups | Description |
-| --------- |:-------:|:-------------:| ----------- |
-| @kubernetes/sig-auth-api-reviews | [link](https://github.com/orgs/kubernetes/teams/sig-auth-api-reviews) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-auth-api-reviews) | API Changes and Reviews |
-| @kubernetes/sig-auth-bugs | [link](https://github.com/orgs/kubernetes/teams/sig-auth-bugs) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-auth-bugs) | Bug Triage and Troubleshooting |
-| @kubernetes/sig-auth-feature-requests | [link](https://github.com/orgs/kubernetes/teams/sig-auth-feature-requests) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-auth-feature-requests) | Feature Requests |
-| @kubernetes/sig-auth-misc | [link](https://github.com/orgs/kubernetes/teams/sig-auth-misc) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-auth-misc) | General Discussion |
-| @kubernetes/sig-auth-pr-reviews | [link](https://github.com/orgs/kubernetes/teams/sig-auth-pr-reviews) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-auth-pr-reviews) | PR Reviews |
-| @kubernetes/sig-auth-proposals | [link](https://github.com/orgs/kubernetes/teams/sig-auth-proposals) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-auth-proposals) | Design Proposals |
-| @kubernetes/sig-auth-test-failures | [link](https://github.com/orgs/kubernetes/teams/sig-auth-test-failures) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-auth-test-failures) | Test Failures and Triage |
+| Team Name | Details | Description |
+| --------- |:-------:| ----------- |
+| @kubernetes/sig-auth-api-reviews | [link](https://github.com/orgs/kubernetes/teams/sig-auth-api-reviews) | API Changes and Reviews |
+| @kubernetes/sig-auth-bugs | [link](https://github.com/orgs/kubernetes/teams/sig-auth-bugs) | Bug Triage and Troubleshooting |
+| @kubernetes/sig-auth-feature-requests | [link](https://github.com/orgs/kubernetes/teams/sig-auth-feature-requests) | Feature Requests |
+| @kubernetes/sig-auth-misc | [link](https://github.com/orgs/kubernetes/teams/sig-auth-misc) | General Discussion |
+| @kubernetes/sig-auth-pr-reviews | [link](https://github.com/orgs/kubernetes/teams/sig-auth-pr-reviews) | PR Reviews |
+| @kubernetes/sig-auth-proposals | [link](https://github.com/orgs/kubernetes/teams/sig-auth-proposals) | Design Proposals |
+| @kubernetes/sig-auth-test-failures | [link](https://github.com/orgs/kubernetes/teams/sig-auth-test-failures) | Test Failures and Triage |
<!-- BEGIN CUSTOM CONTENT -->
## Goals
diff --git a/sig-auth/charter.md b/sig-auth/charter.md
new file mode 100644
index 00000000..541c854e
--- /dev/null
+++ b/sig-auth/charter.md
@@ -0,0 +1,69 @@
+# SIG Auth Charter
+
+This charter adheres to the conventions described in the [Kubernetes Charter README] and uses
+the Roles and Organization Management outlined in [sig-governance].
+
+## Scope
+
+SIG Auth is responsible for the design, implementation, and maintenance of features in
+Kubernetes that control and protect access to the API and other core components. This includes
+authentication and authorization, but also encompasses features like auditing and some security
+policy (see below).
+
+### In scope
+
+Link to SIG section in [sigs.yaml]
+
+#### Code, Binaries and Services
+
+- Kubernetes authentication, authorization, audit and security policy features. Examples
+ include:
+ - Authentication, authorization and audit interfaces and extension points
+ - Authentication implementations (service accounts, OIDC, authenticating proxy, webhook,
+ ...)
+ - Authorizer implementations (RBAC + default policy, Node + default policy, webhook, ...)
+ - Security-related admission plugins (NodeRestriction, ServiceAccount, PodSecurityPolicy,
+ ImagePolicy, etc)
+- The mechanisms to protect confidentiality/integrity of API data. Examples include:
+ - Capability for encryption at rest
+ - Capability for secure communication between components
+ - Ensuring users and components can operate with appropriately scoped permissions
+
+#### Cross-cutting and Externally Facing Processes
+
+- Consult with other SIGs and the community on how to apply mechanisms owned by SIG
+ Auth. Examples include:
+ - Review privilege escalation implications of feature and API designs
+ - Core component authentication & authorization (apiserver, kubelet, controller-manager,
+ and scheduler)
+ - Local-storage volume deployment authentication
+ - Cloud provider authorization policy
+ - Container runtime streaming (exec/attach/port-forward) authentication
+ - Best practices for hardening add-ons or other external integrations
+
+### Out of scope
+
+- Reporting of specific vulnerabilities in Kubernetes. Please report using these instructions:
+ https://kubernetes.io/security/
+- General security discussion. Examples of topics that are out of scope for SIG-auth include:
+ - Protection of volume data, container ephemeral data, and other non-API data (prefer: sig-storage
+ and sig-node)
+ - Container isolation (prefer: sig-node and sig-networking)
+ - Bug bounty (prefer: product security team)
+ - Resource quota (prefer: sig-scheduling)
+ - Resource availability / DOS protection (prefer: sig-apimachinery, sig-network, sig-node)
+
+## Roles and Organization Management
+
+This sig follows adheres to the Roles and Organization Management outlined in [sig-governance]
+and opts-in to updates and modifications to [sig-governance].
+
+### Subproject Creation
+
+SIG Auth delegates subproject approval to Technical Leads. See [Subproject creation - Option 1].
+
+
+[sig-governance]: https://github.com/kubernetes/community/blob/master/committee-steering/governance/sig-governance.md
+[sigs.yaml]: https://github.com/kubernetes/community/blob/master/sigs.yaml#L250
+[Kubernetes Charter README]: https://github.com/kubernetes/community/blob/master/committee-steering/governance/README.md
+[Subproject creation - Option 1]: https://github.com/kubernetes/community/blob/master/committee-steering/governance/sig-governance.md#subproject-creation
diff --git a/sig-autoscaling/README.md b/sig-autoscaling/README.md
index 6adb05a8..b8d1c101 100644
--- a/sig-autoscaling/README.md
+++ b/sig-autoscaling/README.md
@@ -46,15 +46,15 @@ The google groups contain the archive of Github team notifications.
Mentioning a team on Github will CC its group.
Monitor these for Github activity if you are not a member of the team.
-| Team Name | Details | Google Groups | Description |
-| --------- |:-------:|:-------------:| ----------- |
-| @kubernetes/sig-autoscaling-api-reviews | [link](https://github.com/orgs/kubernetes/teams/sig-autoscaling-api-reviews) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-autoscaling-api-reviews) | API Changes and Reviews |
-| @kubernetes/sig-autoscaling-bugs | [link](https://github.com/orgs/kubernetes/teams/sig-autoscaling-bugs) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-autoscaling-bugs) | Bug Triage and Troubleshooting |
-| @kubernetes/sig-autoscaling-feature-requests | [link](https://github.com/orgs/kubernetes/teams/sig-autoscaling-feature-requests) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-autoscaling-feature-requests) | Feature Requests |
-| @kubernetes/sig-autoscaling-misc | [link](https://github.com/orgs/kubernetes/teams/sig-autoscaling-misc) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-autoscaling-misc) | General Discussion |
-| @kubernetes/sig-autoscaling-pr-reviews | [link](https://github.com/orgs/kubernetes/teams/sig-autoscaling-pr-reviews) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-autoscaling-pr-reviews) | PR Reviews |
-| @kubernetes/sig-autoscaling-proposals | [link](https://github.com/orgs/kubernetes/teams/sig-autoscaling-proposals) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-autoscaling-proposals) | Design Proposals |
-| @kubernetes/sig-autoscaling-test-failures | [link](https://github.com/orgs/kubernetes/teams/sig-autoscaling-test-failures) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-autoscaling-test-failures) | Test Failures and Triage |
+| Team Name | Details | Description |
+| --------- |:-------:| ----------- |
+| @kubernetes/sig-autoscaling-api-reviews | [link](https://github.com/orgs/kubernetes/teams/sig-autoscaling-api-reviews) | API Changes and Reviews |
+| @kubernetes/sig-autoscaling-bugs | [link](https://github.com/orgs/kubernetes/teams/sig-autoscaling-bugs) | Bug Triage and Troubleshooting |
+| @kubernetes/sig-autoscaling-feature-requests | [link](https://github.com/orgs/kubernetes/teams/sig-autoscaling-feature-requests) | Feature Requests |
+| @kubernetes/sig-autoscaling-misc | [link](https://github.com/orgs/kubernetes/teams/sig-autoscaling-misc) | General Discussion |
+| @kubernetes/sig-autoscaling-pr-reviews | [link](https://github.com/orgs/kubernetes/teams/sig-autoscaling-pr-reviews) | PR Reviews |
+| @kubernetes/sig-autoscaling-proposals | [link](https://github.com/orgs/kubernetes/teams/sig-autoscaling-proposals) | Design Proposals |
+| @kubernetes/sig-autoscaling-test-failures | [link](https://github.com/orgs/kubernetes/teams/sig-autoscaling-test-failures) | Test Failures and Triage |
<!-- BEGIN CUSTOM CONTENT -->
## Concerns
diff --git a/sig-aws/README.md b/sig-aws/README.md
index d2639f09..3d573e35 100644
--- a/sig-aws/README.md
+++ b/sig-aws/README.md
@@ -47,9 +47,9 @@ The google groups contain the archive of Github team notifications.
Mentioning a team on Github will CC its group.
Monitor these for Github activity if you are not a member of the team.
-| Team Name | Details | Google Groups | Description |
-| --------- |:-------:|:-------------:| ----------- |
-| @kubernetes/sig-aws-misc | [link](https://github.com/orgs/kubernetes/teams/sig-aws-misc) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-aws-misc) | General Discussion |
+| Team Name | Details | Description |
+| --------- |:-------:| ----------- |
+| @kubernetes/sig-aws-misc | [link](https://github.com/orgs/kubernetes/teams/sig-aws-misc) | General Discussion |
<!-- BEGIN CUSTOM CONTENT -->
## Participate
diff --git a/sig-azure/README.md b/sig-azure/README.md
index 232bf5d8..3124d607 100644
--- a/sig-azure/README.md
+++ b/sig-azure/README.md
@@ -30,7 +30,6 @@ The Technical Leads of the SIG establish new subprojects, decommission existing
subprojects, and resolve cross-subproject technical issues and decisions.
* Kal Khenidak (**[@khenidak](https://github.com/khenidak)**), Microsoft
-* Cole Mickens (**[@colemickens](https://github.com/colemickens)**), Red Hat
## Contact
* [Slack](https://kubernetes.slack.com/messages/sig-azure)
@@ -53,15 +52,9 @@ The google groups contain the archive of Github team notifications.
Mentioning a team on Github will CC its group.
Monitor these for Github activity if you are not a member of the team.
-| Team Name | Details | Google Groups | Description |
-| --------- |:-------:|:-------------:| ----------- |
-| @kubernetes/sig-azure-api-reviews | [link](https://github.com/orgs/kubernetes/teams/sig-azure-api-reviews) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-azure-api-reviews) | API Changes and Reviews |
-| @kubernetes/sig-azure-bugs | [link](https://github.com/orgs/kubernetes/teams/sig-azure-bugs) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-azure-bugs) | Bug Triage and Troubleshooting |
-| @kubernetes/sig-azure-feature-requests | [link](https://github.com/orgs/kubernetes/teams/sig-azure-feature-requests) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-azure-feature-requests) | Feature Requests |
-| @kubernetes/sig-azure-misc | [link](https://github.com/orgs/kubernetes/teams/sig-azure-misc) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-azure-misc) | General Discussion |
-| @kubernetes/sig-azure-pr-reviews | [link](https://github.com/orgs/kubernetes/teams/sig-azure-pr-reviews) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-azure-pr-reviews) | PR Reviews |
-| @kubernetes/sig-azure-proposals | [link](https://github.com/orgs/kubernetes/teams/sig-azure-proposals) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-azure-proposals) | Design Proposals |
-| @kubernetes/sig-azure-test-failures | [link](https://github.com/orgs/kubernetes/teams/sig-azure-test-failures) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-azure-test-failures) | Test Failures and Triage |
+| Team Name | Details | Description |
+| --------- |:-------:| ----------- |
+| @kubernetes/sig-azure | [link](https://github.com/orgs/kubernetes/teams/sig-azure) | General Discussion |
<!-- BEGIN CUSTOM CONTENT -->
diff --git a/sig-big-data/README.md b/sig-big-data/README.md
index e09fca8e..36573c4e 100644
--- a/sig-big-data/README.md
+++ b/sig-big-data/README.md
@@ -38,15 +38,15 @@ The google groups contain the archive of Github team notifications.
Mentioning a team on Github will CC its group.
Monitor these for Github activity if you are not a member of the team.
-| Team Name | Details | Google Groups | Description |
-| --------- |:-------:|:-------------:| ----------- |
-| @kubernetes/sig-big-data-api-reviews | [link](https://github.com/orgs/kubernetes/teams/sig-big-data-api-reviews) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-big-data-api-reviews) | API Changes and Reviews |
-| @kubernetes/sig-big-data-bugs | [link](https://github.com/orgs/kubernetes/teams/sig-big-data-bugs) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-big-data-bugs) | Bug Triage and Troubleshooting |
-| @kubernetes/sig-big-data-feature-requests | [link](https://github.com/orgs/kubernetes/teams/sig-big-data-feature-requests) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-big-data-feature-requests) | Feature Requests |
-| @kubernetes/sig-big-data-misc | [link](https://github.com/orgs/kubernetes/teams/sig-big-data-misc) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-big-data-misc) | General Discussion |
-| @kubernetes/sig-big-data-pr-reviews | [link](https://github.com/orgs/kubernetes/teams/sig-big-data-pr-reviews) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-big-data-pr-reviews) | PR Reviews |
-| @kubernetes/sig-big-data-proposals | [link](https://github.com/orgs/kubernetes/teams/sig-big-data-proposals) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-big-data-proposals) | Design Proposals |
-| @kubernetes/sig-big-data-test-failures | [link](https://github.com/orgs/kubernetes/teams/sig-big-data-test-failures) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-big-data-test-failures) | Test Failures and Triage |
+| Team Name | Details | Description |
+| --------- |:-------:| ----------- |
+| @kubernetes/sig-big-data-api-reviews | [link](https://github.com/orgs/kubernetes/teams/sig-big-data-api-reviews) | API Changes and Reviews |
+| @kubernetes/sig-big-data-bugs | [link](https://github.com/orgs/kubernetes/teams/sig-big-data-bugs) | Bug Triage and Troubleshooting |
+| @kubernetes/sig-big-data-feature-requests | [link](https://github.com/orgs/kubernetes/teams/sig-big-data-feature-requests) | Feature Requests |
+| @kubernetes/sig-big-data-misc | [link](https://github.com/orgs/kubernetes/teams/sig-big-data-misc) | General Discussion |
+| @kubernetes/sig-big-data-pr-reviews | [link](https://github.com/orgs/kubernetes/teams/sig-big-data-pr-reviews) | PR Reviews |
+| @kubernetes/sig-big-data-proposals | [link](https://github.com/orgs/kubernetes/teams/sig-big-data-proposals) | Design Proposals |
+| @kubernetes/sig-big-data-test-failures | [link](https://github.com/orgs/kubernetes/teams/sig-big-data-test-failures) | Test Failures and Triage |
<!-- BEGIN CUSTOM CONTENT -->
## Goals
diff --git a/sig-cli/README.md b/sig-cli/README.md
index 6ed0140f..075a2cc2 100644
--- a/sig-cli/README.md
+++ b/sig-cli/README.md
@@ -53,16 +53,16 @@ The google groups contain the archive of Github team notifications.
Mentioning a team on Github will CC its group.
Monitor these for Github activity if you are not a member of the team.
-| Team Name | Details | Google Groups | Description |
-| --------- |:-------:|:-------------:| ----------- |
-| @kubernetes/sig-cli-api-reviews | [link](https://github.com/orgs/kubernetes/teams/sig-cli-api-reviews) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-cli-api-reviews) | API Changes and Reviews |
-| @kubernetes/sig-cli-bugs | [link](https://github.com/orgs/kubernetes/teams/sig-cli-bugs) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-cli-bugs) | Bug Triage and Troubleshooting |
-| @kubernetes/sig-cli-feature-requests | [link](https://github.com/orgs/kubernetes/teams/sig-cli-feature-requests) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-cli-feature-requests) | Feature Requests |
-| @kubernetes/sig-cli-maintainers | [link](https://github.com/orgs/kubernetes/teams/sig-cli-maintainers) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-cli-maintainers) | CLI Maintainers |
-| @kubernetes/sig-cli-misc | [link](https://github.com/orgs/kubernetes/teams/sig-cli-misc) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-cli-misc) | General Discussion |
-| @kubernetes/sig-cli-pr-reviews | [link](https://github.com/orgs/kubernetes/teams/sig-cli-pr-reviews) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-cli-pr-reviews) | PR Reviews |
-| @kubernetes/sig-cli-proposals | [link](https://github.com/orgs/kubernetes/teams/sig-cli-proposals) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-cli-proposals) | Design Proposals |
-| @kubernetes/sig-cli-test-failures | [link](https://github.com/orgs/kubernetes/teams/sig-cli-test-failures) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-cli-test-failures) | Test Failures and Triage |
+| Team Name | Details | Description |
+| --------- |:-------:| ----------- |
+| @kubernetes/sig-cli-api-reviews | [link](https://github.com/orgs/kubernetes/teams/sig-cli-api-reviews) | API Changes and Reviews |
+| @kubernetes/sig-cli-bugs | [link](https://github.com/orgs/kubernetes/teams/sig-cli-bugs) | Bug Triage and Troubleshooting |
+| @kubernetes/sig-cli-feature-requests | [link](https://github.com/orgs/kubernetes/teams/sig-cli-feature-requests) | Feature Requests |
+| @kubernetes/sig-cli-maintainers | [link](https://github.com/orgs/kubernetes/teams/sig-cli-maintainers) | CLI Maintainers |
+| @kubernetes/sig-cli-misc | [link](https://github.com/orgs/kubernetes/teams/sig-cli-misc) | General Discussion |
+| @kubernetes/sig-cli-pr-reviews | [link](https://github.com/orgs/kubernetes/teams/sig-cli-pr-reviews) | PR Reviews |
+| @kubernetes/sig-cli-proposals | [link](https://github.com/orgs/kubernetes/teams/sig-cli-proposals) | Design Proposals |
+| @kubernetes/sig-cli-test-failures | [link](https://github.com/orgs/kubernetes/teams/sig-cli-test-failures) | Test Failures and Triage |
<!-- BEGIN CUSTOM CONTENT -->
diff --git a/sig-cloud-provider/README.md b/sig-cloud-provider/README.md
index a80ad156..d5db27e1 100644
--- a/sig-cloud-provider/README.md
+++ b/sig-cloud-provider/README.md
@@ -61,16 +61,16 @@ The google groups contain the archive of Github team notifications.
Mentioning a team on Github will CC its group.
Monitor these for Github activity if you are not a member of the team.
-| Team Name | Details | Google Groups | Description |
-| --------- |:-------:|:-------------:| ----------- |
-| @kubernetes/sig-cloud-provider-api-reviews | [link](https://github.com/orgs/kubernetes/teams/sig-cloud-provider-api-reviews) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-cloud-provider-api-reviews) | API Changes and Reviews |
-| @kubernetes/sig-cloud-provider-bugs | [link](https://github.com/orgs/kubernetes/teams/sig-cloud-provider-bugs) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-cloud-provider-bugs) | Bug Triage and Troubleshooting |
-| @kubernetes/sig-cloud-provider-feature-requests | [link](https://github.com/orgs/kubernetes/teams/sig-cloud-provider-feature-requests) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-cloud-provider-feature-requests) | Feature Requests |
-| @kubernetes/sig-cloud-provider-maintainers | [link](https://github.com/orgs/kubernetes/teams/sig-cloud-provider-maintainers) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-cloud-provider-maintainers) | Cloud Providers Maintainers |
-| @kubernetes/sig-cloud-providers-misc | [link](https://github.com/orgs/kubernetes/teams/sig-cloud-providers-misc) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-cloud-providers-misc) | General Discussion |
-| @kubernetes/sig-cloud-provider-pr-reviews | [link](https://github.com/orgs/kubernetes/teams/sig-cloud-provider-pr-reviews) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-cloud-provider-pr-reviews) | PR Reviews |
-| @kubernetes/sig-cloud-provider-proposals | [link](https://github.com/orgs/kubernetes/teams/sig-cloud-provider-proposals) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-cloud-provider-proposals) | Design Proposals |
-| @kubernetes/sig-cloud-provider-test-failures | [link](https://github.com/orgs/kubernetes/teams/sig-cloud-provider-test-failures) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-cloud-provider-test-failures) | Test Failures and Triage |
+| Team Name | Details | Description |
+| --------- |:-------:| ----------- |
+| @kubernetes/sig-cloud-provider-api-reviews | [link](https://github.com/orgs/kubernetes/teams/sig-cloud-provider-api-reviews) | API Changes and Reviews |
+| @kubernetes/sig-cloud-provider-bugs | [link](https://github.com/orgs/kubernetes/teams/sig-cloud-provider-bugs) | Bug Triage and Troubleshooting |
+| @kubernetes/sig-cloud-provider-feature-requests | [link](https://github.com/orgs/kubernetes/teams/sig-cloud-provider-feature-requests) | Feature Requests |
+| @kubernetes/sig-cloud-provider-maintainers | [link](https://github.com/orgs/kubernetes/teams/sig-cloud-provider-maintainers) | Cloud Providers Maintainers |
+| @kubernetes/sig-cloud-providers-misc | [link](https://github.com/orgs/kubernetes/teams/sig-cloud-providers-misc) | General Discussion |
+| @kubernetes/sig-cloud-provider-pr-reviews | [link](https://github.com/orgs/kubernetes/teams/sig-cloud-provider-pr-reviews) | PR Reviews |
+| @kubernetes/sig-cloud-provider-proposals | [link](https://github.com/orgs/kubernetes/teams/sig-cloud-provider-proposals) | Design Proposals |
+| @kubernetes/sig-cloud-provider-test-failures | [link](https://github.com/orgs/kubernetes/teams/sig-cloud-provider-test-failures) | Test Failures and Triage |
<!-- BEGIN CUSTOM CONTENT -->
diff --git a/sig-cluster-lifecycle/README.md b/sig-cluster-lifecycle/README.md
index a6ced7fc..9dd578ab 100644
--- a/sig-cluster-lifecycle/README.md
+++ b/sig-cluster-lifecycle/README.md
@@ -28,7 +28,6 @@ The Cluster Lifecycle SIG examines how we should change Kubernetes to make it ea
### Chairs
The Chairs of the SIG run operations and processes governing the SIG.
-* Luke Marsden (**[@lukemarsden](https://github.com/lukemarsden)**), Weave
* Robert Bailey (**[@roberthbailey](https://github.com/roberthbailey)**), Google
* Lucas Käldström (**[@luxas](https://github.com/luxas)**), Luxas Labs (occasionally contracting for Weaveworks)
* Timothy St. Clair (**[@timothysc](https://github.com/timothysc)**), Heptio
@@ -47,12 +46,12 @@ The following subprojects are owned by sig-cluster-lifecycle:
- **cluster-api**
- Owners:
- https://raw.githubusercontent.com/kubernetes-sigs/cluster-api/master/OWNERS
-- **cluster-api-provider-gcp**
- - Owners:
- - https://raw.githubusercontent.com/kubernetes-sigs/cluster-api-provider-gcp/master/OWNERS
- **cluster-api-provider-aws**
- Owners:
- https://raw.githubusercontent.com/kubernetes-sigs/cluster-api-provider-aws/master/OWNERS
+- **cluster-api-provider-gcp**
+ - Owners:
+ - https://raw.githubusercontent.com/kubernetes-sigs/cluster-api-provider-gcp/master/OWNERS
- **cluster-api-provider-openstack**
- Owners:
- https://raw.githubusercontent.com/kubernetes-sigs/cluster-api-provider-openstack/master/OWNERS
@@ -72,6 +71,9 @@ The following subprojects are owned by sig-cluster-lifecycle:
- Owners:
- https://raw.githubusercontent.com/kubernetes/kubeadm/master/OWNERS
- https://raw.githubusercontent.com/kubernetes/kubernetes/master/cmd/kubeadm/OWNERS
+- **kubeadm-dind-cluster**
+ - Owners:
+ - https://raw.githubusercontent.com/kubernetes-sigs/kubeadm-dind-cluster/master/OWNERS
- **kubernetes-anywhere**
- Owners:
- https://raw.githubusercontent.com/kubernetes/kubernetes-anywhere/master/OWNERS
@@ -91,15 +93,15 @@ The google groups contain the archive of Github team notifications.
Mentioning a team on Github will CC its group.
Monitor these for Github activity if you are not a member of the team.
-| Team Name | Details | Google Groups | Description |
-| --------- |:-------:|:-------------:| ----------- |
-| @kubernetes/sig-cluster-lifecycle-api-reviews | [link](https://github.com/orgs/kubernetes/teams/sig-cluster-lifecycle-api-reviews) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-cluster-lifecycle-api-reviews) | API Changes and Reviews |
-| @kubernetes/sig-cluster-lifecycle-bugs | [link](https://github.com/orgs/kubernetes/teams/sig-cluster-lifecycle-bugs) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-cluster-lifecycle-bugs) | Bug Triage and Troubleshooting |
-| @kubernetes/sig-cluster-lifecycle-feature-requests | [link](https://github.com/orgs/kubernetes/teams/sig-cluster-lifecycle-feature-requests) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-cluster-lifecycle-feature-requests) | Feature Requests |
-| @kubernetes/sig-cluster-lifecycle-misc | [link](https://github.com/orgs/kubernetes/teams/sig-cluster-lifecycle-misc) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-cluster-lifecycle-misc) | General Discussion |
-| @kubernetes/sig-cluster-lifecycle-pr-reviews | [link](https://github.com/orgs/kubernetes/teams/sig-cluster-lifecycle-pr-reviews) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-cluster-lifecycle-pr-reviews) | PR Reviews |
-| @kubernetes/sig-cluster-lifecycle-proposals | [link](https://github.com/orgs/kubernetes/teams/sig-cluster-lifecycle-proposals) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-cluster-lifecycle-proposals) | Design Proposals |
-| @kubernetes/sig-cluster-lifecycle-test-failures | [link](https://github.com/orgs/kubernetes/teams/sig-cluster-lifecycle-test-failures) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-cluster-lifecycle-test-failures) | Test Failures and Triage |
+| Team Name | Details | Description |
+| --------- |:-------:| ----------- |
+| @kubernetes/sig-cluster-lifecycle-api-reviews | [link](https://github.com/orgs/kubernetes/teams/sig-cluster-lifecycle-api-reviews) | API Changes and Reviews |
+| @kubernetes/sig-cluster-lifecycle-bugs | [link](https://github.com/orgs/kubernetes/teams/sig-cluster-lifecycle-bugs) | Bug Triage and Troubleshooting |
+| @kubernetes/sig-cluster-lifecycle-feature-requests | [link](https://github.com/orgs/kubernetes/teams/sig-cluster-lifecycle-feature-requests) | Feature Requests |
+| @kubernetes/sig-cluster-lifecycle-misc | [link](https://github.com/orgs/kubernetes/teams/sig-cluster-lifecycle-misc) | General Discussion |
+| @kubernetes/sig-cluster-lifecycle-pr-reviews | [link](https://github.com/orgs/kubernetes/teams/sig-cluster-lifecycle-pr-reviews) | PR Reviews |
+| @kubernetes/sig-cluster-lifecycle-proposals | [link](https://github.com/orgs/kubernetes/teams/sig-cluster-lifecycle-proposals) | Design Proposals |
+| @kubernetes/sig-cluster-lifecycle-test-failures | [link](https://github.com/orgs/kubernetes/teams/sig-cluster-lifecycle-test-failures) | Test Failures and Triage |
<!-- BEGIN CUSTOM CONTENT -->
diff --git a/sig-contributor-experience/README.md b/sig-contributor-experience/README.md
index 58df0445..7bb47f51 100644
--- a/sig-contributor-experience/README.md
+++ b/sig-contributor-experience/README.md
@@ -49,9 +49,11 @@ The following subprojects are owned by sig-contributor-experience:
- **github-management**
- Owners:
- https://raw.githubusercontent.com/kubernetes/community/master/github-management/OWNERS
-- **contributors-guide**
+ - https://raw.githubusercontent.com/kubernetes/org/master/OWNERS
+- **contributors-documentation**
- Owners:
- https://raw.githubusercontent.com/kubernetes/community/master/contributors/guide/OWNERS
+ - https://raw.githubusercontent.com/kubernetes-sigs/contributor-site/master/OWNERS
- **devstats**
- Owners:
- Phillels
@@ -74,14 +76,14 @@ The google groups contain the archive of Github team notifications.
Mentioning a team on Github will CC its group.
Monitor these for Github activity if you are not a member of the team.
-| Team Name | Details | Google Groups | Description |
-| --------- |:-------:|:-------------:| ----------- |
-| @kubernetes/sig-contributor-experience-bugs | [link](https://github.com/orgs/kubernetes/teams/sig-contributor-experience-bugs) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-contributor-experience-bugs) | Bug Triage and Troubleshooting |
-| @kubernetes/sig-contributor-experience-feature-requests | [link](https://github.com/orgs/kubernetes/teams/sig-contributor-experience-feature-requests) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-contributor-experience-feature-requests) | Feature Requests |
-| @kubernetes/sig-contributor-experience-misc-use-only-as-a-last-resort | [link](https://github.com/orgs/kubernetes/teams/sig-contributor-experience-misc-use-only-as-a-last-resort) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-contributor-experience-misc-use-only-as-a-last-resort) | General Discussion |
-| @kubernetes/sig-contributor-experience-pr-reviews | [link](https://github.com/orgs/kubernetes/teams/sig-contributor-experience-pr-reviews) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-contributor-experience-pr-reviews) | PR Reviews |
-| @kubernetes/sig-contributor-experience-proposals | [link](https://github.com/orgs/kubernetes/teams/sig-contributor-experience-proposals) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-contributor-experience-proposals) | Design Proposals |
-| @kubernetes/sig-contributor-experience-test-failures | [link](https://github.com/orgs/kubernetes/teams/sig-contributor-experience-test-failures) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-contributor-experience-test-failures) | Test Failures and Triage |
+| Team Name | Details | Description |
+| --------- |:-------:| ----------- |
+| @kubernetes/sig-contributor-experience-bugs | [link](https://github.com/orgs/kubernetes/teams/sig-contributor-experience-bugs) | Bug Triage and Troubleshooting |
+| @kubernetes/sig-contributor-experience-feature-requests | [link](https://github.com/orgs/kubernetes/teams/sig-contributor-experience-feature-requests) | Feature Requests |
+| @kubernetes/sig-contributor-experience-misc-use-only-as-a-last-resort | [link](https://github.com/orgs/kubernetes/teams/sig-contributor-experience-misc-use-only-as-a-last-resort) | General Discussion |
+| @kubernetes/sig-contributor-experience-pr-reviews | [link](https://github.com/orgs/kubernetes/teams/sig-contributor-experience-pr-reviews) | PR Reviews |
+| @kubernetes/sig-contributor-experience-proposals | [link](https://github.com/orgs/kubernetes/teams/sig-contributor-experience-proposals) | Design Proposals |
+| @kubernetes/sig-contributor-experience-test-failures | [link](https://github.com/orgs/kubernetes/teams/sig-contributor-experience-test-failures) | Test Failures and Triage |
<!-- BEGIN CUSTOM CONTENT -->
diff --git a/sig-docs/README.md b/sig-docs/README.md
index 460626a0..65c26ea0 100644
--- a/sig-docs/README.md
+++ b/sig-docs/README.md
@@ -35,12 +35,15 @@ The Chairs of the SIG run operations and processes governing the SIG.
## Subprojects
The following subprojects are owned by sig-docs:
-- **kubernetes-bootcamp**
+- **kubernetes-docs-ja**
- Owners:
- - https://raw.githubusercontent.com/kubernetes/kubernetes-bootcamp/master/OWNERS
-- **kubernetes-docs-cn**
+ - https://raw.githubusercontent.com/kubernetes/kubernetes-docs-ja/master/OWNERS
+- **kubernetes-docs-ko**
- Owners:
- - https://raw.githubusercontent.com/kubernetes/kubernetes-docs-cn/master/OWNERS
+ - https://raw.githubusercontent.com/kubernetes/kubernetes-docs-ko/master/OWNERS
+- **kubernetes-docs-zh**
+ - Owners:
+ - https://raw.githubusercontent.com/kubernetes/kubernetes-docs-zh/master/OWNERS
- **reference-docs**
- Owners:
- https://raw.githubusercontent.com/kubernetes-incubator/reference-docs/master/OWNERS
@@ -57,10 +60,10 @@ The google groups contain the archive of Github team notifications.
Mentioning a team on Github will CC its group.
Monitor these for Github activity if you are not a member of the team.
-| Team Name | Details | Google Groups | Description |
-| --------- |:-------:|:-------------:| ----------- |
-| @kubernetes/sig-docs-maintainers | [link](https://github.com/orgs/kubernetes/teams/sig-docs-maintainers) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-docs-maintainers) | Documentation Maintainers |
-| @kubernetes/sig-docs-pr-reviews | [link](https://github.com/orgs/kubernetes/teams/sig-docs-pr-reviews) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-docs-pr-reviews) | Documentation PR Reviewers |
+| Team Name | Details | Description |
+| --------- |:-------:| ----------- |
+| @kubernetes/sig-docs-maintainers | [link](https://github.com/orgs/kubernetes/teams/sig-docs-maintainers) | Documentation Maintainers |
+| @kubernetes/sig-docs-pr-reviews | [link](https://github.com/orgs/kubernetes/teams/sig-docs-pr-reviews) | Documentation PR Reviewers |
<!-- BEGIN CUSTOM CONTENT -->
## Goals
diff --git a/sig-gcp/README.md b/sig-gcp/README.md
index bf4ad643..f4804ab9 100644
--- a/sig-gcp/README.md
+++ b/sig-gcp/README.md
@@ -32,6 +32,12 @@ The following subprojects are owned by sig-gcp:
- **cloud-provider-gcp**
- Owners:
- https://raw.githubusercontent.com/kubernetes/cloud-provider-gcp/master/OWNERS
+- **gcp-compute-persistent-disk-csi-driver**
+ - Owners:
+ - https://raw.githubusercontent.com/kubernetes-sigs/gcp-compute-persistent-disk-csi-driver/master/OWNERS
+- **gcp-filestore-csi-driver**
+ - Owners:
+ - https://raw.githubusercontent.com/kubernetes-sigs/gcp-filestore-csi-driver/master/OWNERS
## GitHub Teams
@@ -42,15 +48,15 @@ The google groups contain the archive of Github team notifications.
Mentioning a team on Github will CC its group.
Monitor these for Github activity if you are not a member of the team.
-| Team Name | Details | Google Groups | Description |
-| --------- |:-------:|:-------------:| ----------- |
-| @kubernetes/sig-gcp-api-reviews | [link](https://github.com/orgs/kubernetes/teams/sig-gcp-api-reviews) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-gcp-api-reviews) | API Changes and Reviews |
-| @kubernetes/sig-gcp-bugs | [link](https://github.com/orgs/kubernetes/teams/sig-gcp-bugs) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-gcp-bugs) | Bug Triage and Troubleshooting |
-| @kubernetes/sig-gcp-feature-requests | [link](https://github.com/orgs/kubernetes/teams/sig-gcp-feature-requests) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-gcp-feature-requests) | Feature Requests |
-| @kubernetes/sig-gcp-misc | [link](https://github.com/orgs/kubernetes/teams/sig-gcp-misc) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-gcp-misc) | General Discussion |
-| @kubernetes/sig-gcp-pr-reviews | [link](https://github.com/orgs/kubernetes/teams/sig-gcp-pr-reviews) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-gcp-pr-reviews) | PR Reviews |
-| @kubernetes/sig-gcp-proposals | [link](https://github.com/orgs/kubernetes/teams/sig-gcp-proposals) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-gcp-proposals) | Design Proposals |
-| @kubernetes/sig-gcp-test-failures | [link](https://github.com/orgs/kubernetes/teams/sig-gcp-test-failures) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-gcp-test-failures) | Test Failures and Triage |
+| Team Name | Details | Description |
+| --------- |:-------:| ----------- |
+| @kubernetes/sig-gcp-api-reviews | [link](https://github.com/orgs/kubernetes/teams/sig-gcp-api-reviews) | API Changes and Reviews |
+| @kubernetes/sig-gcp-bugs | [link](https://github.com/orgs/kubernetes/teams/sig-gcp-bugs) | Bug Triage and Troubleshooting |
+| @kubernetes/sig-gcp-feature-requests | [link](https://github.com/orgs/kubernetes/teams/sig-gcp-feature-requests) | Feature Requests |
+| @kubernetes/sig-gcp-misc | [link](https://github.com/orgs/kubernetes/teams/sig-gcp-misc) | General Discussion |
+| @kubernetes/sig-gcp-pr-reviews | [link](https://github.com/orgs/kubernetes/teams/sig-gcp-pr-reviews) | PR Reviews |
+| @kubernetes/sig-gcp-proposals | [link](https://github.com/orgs/kubernetes/teams/sig-gcp-proposals) | Design Proposals |
+| @kubernetes/sig-gcp-test-failures | [link](https://github.com/orgs/kubernetes/teams/sig-gcp-test-failures) | Test Failures and Triage |
<!-- BEGIN CUSTOM CONTENT -->
diff --git a/sig-ibmcloud/OWNERS b/sig-ibmcloud/OWNERS
new file mode 100644
index 00000000..4d8ea924
--- /dev/null
+++ b/sig-ibmcloud/OWNERS
@@ -0,0 +1,6 @@
+reviewers:
+ - sig-ibmcloud-leads
+approvers:
+ - sig-ibmcloud-leads
+labels:
+ - sig/ibmcloud
diff --git a/sig-ibmcloud/README.md b/sig-ibmcloud/README.md
index 3b25ed28..ea4401f4 100644
--- a/sig-ibmcloud/README.md
+++ b/sig-ibmcloud/README.md
@@ -37,9 +37,9 @@ The google groups contain the archive of Github team notifications.
Mentioning a team on Github will CC its group.
Monitor these for Github activity if you are not a member of the team.
-| Team Name | Details | Google Groups | Description |
-| --------- |:-------:|:-------------:| ----------- |
-| @kubernetes/sig-ibmcloud-misc | [link](https://github.com/orgs/kubernetes/teams/sig-ibmcloud-misc) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-ibmcloud-misc) | General Discussion |
+| Team Name | Details | Description |
+| --------- |:-------:| ----------- |
+| @kubernetes/sig-ibmcloud-misc | [link](https://github.com/orgs/kubernetes/teams/sig-ibmcloud-misc) | General Discussion |
<!-- BEGIN CUSTOM CONTENT -->
diff --git a/sig-instrumentation/README.md b/sig-instrumentation/README.md
index 3b878591..7bf71dcf 100644
--- a/sig-instrumentation/README.md
+++ b/sig-instrumentation/README.md
@@ -52,15 +52,15 @@ The google groups contain the archive of Github team notifications.
Mentioning a team on Github will CC its group.
Monitor these for Github activity if you are not a member of the team.
-| Team Name | Details | Google Groups | Description |
-| --------- |:-------:|:-------------:| ----------- |
-| @kubernetes/sig-instrumentation-api-reviews | [link](https://github.com/orgs/kubernetes/teams/sig-instrumentation-api-reviews) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-instrumentation-api-reviews) | API Changes and Reviews |
-| @kubernetes/sig-instrumentation-bugs | [link](https://github.com/orgs/kubernetes/teams/sig-instrumentation-bugs) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-instrumentation-bugs) | Bug Triage and Troubleshooting |
-| @kubernetes/sig-instrumentation-feature-requests | [link](https://github.com/orgs/kubernetes/teams/sig-instrumentation-feature-requests) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-instrumentation-feature-requests) | Feature Requests |
-| @kubernetes/sig-instrumentation-misc | [link](https://github.com/orgs/kubernetes/teams/sig-instrumentation-misc) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-instrumentation-misc) | General Discussion |
-| @kubernetes/sig-instrumentation-pr-reviews | [link](https://github.com/orgs/kubernetes/teams/sig-instrumentation-pr-reviews) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-instrumentation-pr-reviews) | PR Reviews |
-| @kubernetes/sig-instrumentation-proposals | [link](https://github.com/orgs/kubernetes/teams/sig-instrumentation-proposals) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-instrumentation-proposals) | Design Proposals |
-| @kubernetes/sig-instrumentation-test-failures | [link](https://github.com/orgs/kubernetes/teams/sig-instrumentation-test-failures) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-instrumentation-test-failures) | Test Failures and Triage |
+| Team Name | Details | Description |
+| --------- |:-------:| ----------- |
+| @kubernetes/sig-instrumentation-api-reviews | [link](https://github.com/orgs/kubernetes/teams/sig-instrumentation-api-reviews) | API Changes and Reviews |
+| @kubernetes/sig-instrumentation-bugs | [link](https://github.com/orgs/kubernetes/teams/sig-instrumentation-bugs) | Bug Triage and Troubleshooting |
+| @kubernetes/sig-instrumentation-feature-requests | [link](https://github.com/orgs/kubernetes/teams/sig-instrumentation-feature-requests) | Feature Requests |
+| @kubernetes/sig-instrumentation-misc | [link](https://github.com/orgs/kubernetes/teams/sig-instrumentation-misc) | General Discussion |
+| @kubernetes/sig-instrumentation-pr-reviews | [link](https://github.com/orgs/kubernetes/teams/sig-instrumentation-pr-reviews) | PR Reviews |
+| @kubernetes/sig-instrumentation-proposals | [link](https://github.com/orgs/kubernetes/teams/sig-instrumentation-proposals) | Design Proposals |
+| @kubernetes/sig-instrumentation-test-failures | [link](https://github.com/orgs/kubernetes/teams/sig-instrumentation-test-failures) | Test Failures and Triage |
<!-- BEGIN CUSTOM CONTENT -->
diff --git a/sig-list.md b/sig-list.md
index 45d9e78f..0dc55c46 100644
--- a/sig-list.md
+++ b/sig-list.md
@@ -32,14 +32,14 @@ When the need arises, a [new SIG can be created](sig-creation-procedure.md)
|[Big Data](sig-big-data/README.md)|big-data|* [Anirudh Ramanathan](https://github.com/foxish), Rockset<br>* [Erik Erlandson](https://github.com/erikerlandson), Red Hat<br>* [Yinan Li](https://github.com/liyinan926), Google<br>|* [Slack](https://kubernetes.slack.com/messages/sig-big-data)<br>* [Mailing List](https://groups.google.com/forum/#!forum/kubernetes-sig-big-data)|* Regular SIG Meeting: [Wednesdays at 17:00 UTC (weekly)](https://docs.google.com/document/d/1FQx0BPlkkl1Bn0c9ocVBxYIKojpmrS1CFP5h0DI68AE/edit)<br>
|[CLI](sig-cli/README.md)|cli|* [Maciej Szulik](https://github.com/soltysh), Red Hat<br>* [Phillip Wittrock](https://github.com/pwittrock), Google<br>* [Tony Ado](https://github.com/AdoHe), Alibaba<br>|* [Slack](https://kubernetes.slack.com/messages/sig-cli)<br>* [Mailing List](https://groups.google.com/forum/#!forum/kubernetes-sig-cli)|* Regular SIG Meeting: [Wednesdays at 09:00 PT (Pacific Time) (biweekly)](https://docs.google.com/document/d/1FQx0BPlkkl1Bn0c9ocVBxYIKojpmrS1CFP5h0DI68AE/edit)<br>
|[Cloud Provider](sig-cloud-provider/README.md)|cloud-provider|* [Andrew Sy Kim](https://github.com/andrewsykim), DigitalOcean<br>* [Chris Hoge](https://github.com/hogepodge), OpenStack Foundation<br>* [Jago Macleod](https://github.com/jagosan), Google<br>|* [Slack](https://kubernetes.slack.com/messages/sig-cloud-provider)<br>* [Mailing List](https://groups.google.com/forum/#!forum/kubernetes-sig-cloud-provider)|* Regular SIG Meeting: [Wednesdays at 1:00 PT (Pacific Time) (biweekly)](https://docs.google.com/document/d/1FQx0BPlkkl1Bn0c9ocVBxYIKojpmrS1CFP5h0DI68AE/edit)<br>
-|[Cluster Lifecycle](sig-cluster-lifecycle/README.md)|cluster-lifecycle|* [Luke Marsden](https://github.com/lukemarsden), Weave<br>* [Robert Bailey](https://github.com/roberthbailey), Google<br>* [Lucas Käldström](https://github.com/luxas), Luxas Labs (occasionally contracting for Weaveworks)<br>* [Timothy St. Clair](https://github.com/timothysc), Heptio<br>|* [Slack](https://kubernetes.slack.com/messages/sig-cluster-lifecycle)<br>* [Mailing List](https://groups.google.com/forum/#!forum/kubernetes-sig-cluster-lifecycle)|* Regular SIG Meeting: [Tuesdays at 09:00 PT (Pacific Time) (weekly)](https://docs.google.com/document/d/1FQx0BPlkkl1Bn0c9ocVBxYIKojpmrS1CFP5h0DI68AE/edit)<br>* kubeadm Office Hours: [Wednesdays at 09:00 PT (Pacific Time) (weekly)](https://docs.google.com/document/d/1FQx0BPlkkl1Bn0c9ocVBxYIKojpmrS1CFP5h0DI68AE/edit)<br>* Cluster API working group: [Wednesdays at 10:00 PT (Pacific Time) (weekly)](https://docs.google.com/document/d/1FQx0BPlkkl1Bn0c9ocVBxYIKojpmrS1CFP5h0DI68AE/edit)<br>* kops Office Hours: [Fridays at 09:00 PT (Pacific Time) (biweekly)](https://docs.google.com/document/d/1FQx0BPlkkl1Bn0c9ocVBxYIKojpmrS1CFP5h0DI68AE/edit)<br>
+|[Cluster Lifecycle](sig-cluster-lifecycle/README.md)|cluster-lifecycle|* [Robert Bailey](https://github.com/roberthbailey), Google<br>* [Lucas Käldström](https://github.com/luxas), Luxas Labs (occasionally contracting for Weaveworks)<br>* [Timothy St. Clair](https://github.com/timothysc), Heptio<br>|* [Slack](https://kubernetes.slack.com/messages/sig-cluster-lifecycle)<br>* [Mailing List](https://groups.google.com/forum/#!forum/kubernetes-sig-cluster-lifecycle)|* Regular SIG Meeting: [Tuesdays at 09:00 PT (Pacific Time) (weekly)](https://docs.google.com/document/d/1FQx0BPlkkl1Bn0c9ocVBxYIKojpmrS1CFP5h0DI68AE/edit)<br>* kubeadm Office Hours: [Wednesdays at 09:00 PT (Pacific Time) (weekly)](https://docs.google.com/document/d/1FQx0BPlkkl1Bn0c9ocVBxYIKojpmrS1CFP5h0DI68AE/edit)<br>* Cluster API working group: [Wednesdays at 10:00 PT (Pacific Time) (weekly)](https://docs.google.com/document/d/1FQx0BPlkkl1Bn0c9ocVBxYIKojpmrS1CFP5h0DI68AE/edit)<br>* kops Office Hours: [Fridays at 09:00 PT (Pacific Time) (biweekly)](https://docs.google.com/document/d/1FQx0BPlkkl1Bn0c9ocVBxYIKojpmrS1CFP5h0DI68AE/edit)<br>
|[Cluster Ops](sig-cluster-ops/README.md)|cluster-ops|* [Rob Hirschfeld](https://github.com/zehicle), RackN<br>* [Jaice Singer DuMars](https://github.com/jdumars), Google<br>|* [Slack](https://kubernetes.slack.com/messages/sig-cluster-ops)<br>* [Mailing List](https://groups.google.com/forum/#!forum/kubernetes-sig-cluster-ops)|* Regular SIG Meeting: [Thursdays at 20:00 UTC (biweekly)](https://docs.google.com/document/d/1FQx0BPlkkl1Bn0c9ocVBxYIKojpmrS1CFP5h0DI68AE/edit)<br>
|[Contributor Experience](sig-contributor-experience/README.md)|contributor-experience|* [Elsie Phillips](https://github.com/Phillels), CoreOS<br>* [Paris Pittman](https://github.com/parispittman), Google<br>|* [Slack](https://kubernetes.slack.com/messages/sig-contribex)<br>* [Mailing List](https://groups.google.com/forum/#!forum/kubernetes-sig-contribex)|* Regular SIG Meeting: [Wednesdays at 9:30 PT (Pacific Time) (weekly)](https://docs.google.com/document/d/1FQx0BPlkkl1Bn0c9ocVBxYIKojpmrS1CFP5h0DI68AE/edit)<br>
|[Docs](sig-docs/README.md)|docs|* [Zach Corleissen](https://github.com/zacharysarah), Linux Foundation<br>* [Andrew Chen](https://github.com/chenopis), Google<br>* [Jared Bhatti](https://github.com/jaredbhatti), Google<br>|* [Slack](https://kubernetes.slack.com/messages/sig-docs)<br>* [Mailing List](https://groups.google.com/forum/#!forum/kubernetes-sig-docs)|* Regular SIG Meeting: [Tuesdays at 17:30 UTC (weekly - except fourth Tuesday every month)](https://docs.google.com/document/d/1FQx0BPlkkl1Bn0c9ocVBxYIKojpmrS1CFP5h0DI68AE/edit)<br>* APAC SIG Meeting: [Wednesdays at 02:00 UTC (monthly - fourth Wednesday every month)](https://docs.google.com/document/d/1FQx0BPlkkl1Bn0c9ocVBxYIKojpmrS1CFP5h0DI68AE/edit)<br>
|[GCP](sig-gcp/README.md)|gcp|* [Adam Worrall](https://github.com/abgworrall), Google<br>|* [Slack](https://kubernetes.slack.com/messages/sig-gcp)<br>* [Mailing List](https://groups.google.com/forum/#!forum/kubernetes-sig-gcp)|* Regular SIG Meeting: [Thursdays at 16:00 UTC (biweekly)](https://docs.google.com/document/d/1FQx0BPlkkl1Bn0c9ocVBxYIKojpmrS1CFP5h0DI68AE/edit)<br>
|[IBMCloud](sig-ibmcloud/README.md)|ibmcloud|* [Khalid Ahmed](https://github.com/khahmed), IBM<br>* [Richard Theis](https://github.com/rtheis), IBM<br>* [Sahdev Zala](https://github.com/spzala), IBM<br>|* [Slack](https://kubernetes.slack.com/messages/sig-ibmcloud)<br>* [Mailing List](https://groups.google.com/forum/#!forum/kubernetes-sig-ibmcloud)|* Regular SIG Meeting: [Wednesdays at 14:00 EST (biweekly)](https://docs.google.com/document/d/1FQx0BPlkkl1Bn0c9ocVBxYIKojpmrS1CFP5h0DI68AE/edit)<br>
|[Instrumentation](sig-instrumentation/README.md)|instrumentation|* [Piotr Szczesniak](https://github.com/piosz), Google<br>* [Frederic Branczyk](https://github.com/brancz), Red Hat<br>|* [Slack](https://kubernetes.slack.com/messages/sig-instrumentation)<br>* [Mailing List](https://groups.google.com/forum/#!forum/kubernetes-sig-instrumentation)|* Regular SIG Meeting: [Thursdays at 17:30 UTC (biweekly)](https://docs.google.com/document/d/1FQx0BPlkkl1Bn0c9ocVBxYIKojpmrS1CFP5h0DI68AE/edit)<br>
-|[Multicluster](sig-multicluster/README.md)|multicluster|* [Christian Bell](https://github.com/csbell), Google<br>* [Quinton Hoole](https://github.com/quinton-hoole), Huawei<br>|* [Slack](https://kubernetes.slack.com/messages/sig-multicluster)<br>* [Mailing List](https://groups.google.com/forum/#!forum/kubernetes-sig-multicluster)|* Regular SIG Meeting: [Tuesdays at 9:30 PT (Pacific Time) (biweekly)](https://docs.google.com/document/d/1FQx0BPlkkl1Bn0c9ocVBxYIKojpmrS1CFP5h0DI68AE/edit)<br>* Federation v2 Working Group: [Wednesdays at 9:30 PT (Pacific Time) (weekly)](https://docs.google.com/document/d/1FQx0BPlkkl1Bn0c9ocVBxYIKojpmrS1CFP5h0DI68AE/edit)<br>
+|[Multicluster](sig-multicluster/README.md)|multicluster|* [Christian Bell](https://github.com/csbell), Google<br>* [Quinton Hoole](https://github.com/quinton-hoole), Huawei<br>|* [Slack](https://kubernetes.slack.com/messages/sig-multicluster)<br>* [Mailing List](https://groups.google.com/forum/#!forum/kubernetes-sig-multicluster)|* Regular SIG Meeting: [Tuesdays at 9:30 PT (Pacific Time) (biweekly)](https://docs.google.com/document/d/1FQx0BPlkkl1Bn0c9ocVBxYIKojpmrS1CFP5h0DI68AE/edit)<br>* Federation v2 Working Group: [Wednesdays at 7:30 PT (Pacific Time) (weekly)](https://docs.google.com/document/d/1FQx0BPlkkl1Bn0c9ocVBxYIKojpmrS1CFP5h0DI68AE/edit)<br>
|[Network](sig-network/README.md)|network|* [Tim Hockin](https://github.com/thockin), Google<br>* [Dan Williams](https://github.com/dcbw), Red Hat<br>* [Casey Davenport](https://github.com/caseydavenport), Tigera<br>|* [Slack](https://kubernetes.slack.com/messages/sig-network)<br>* [Mailing List](https://groups.google.com/forum/#!forum/kubernetes-sig-network)|* Regular SIG Meeting: [Thursdays at 14:00 PT (Pacific Time) (biweekly)](https://docs.google.com/document/d/1FQx0BPlkkl1Bn0c9ocVBxYIKojpmrS1CFP5h0DI68AE/edit)<br>
|[Node](sig-node/README.md)|node|* [Dawn Chen](https://github.com/dchen1107), Google<br>* [Derek Carr](https://github.com/derekwaynecarr), Red Hat<br>|* [Slack](https://kubernetes.slack.com/messages/sig-node)<br>* [Mailing List](https://groups.google.com/forum/#!forum/kubernetes-sig-node)|* Regular SIG Meeting: [Tuesdays at 10:00 PT (Pacific Time) (weekly)](https://docs.google.com/document/d/1FQx0BPlkkl1Bn0c9ocVBxYIKojpmrS1CFP5h0DI68AE/edit)<br>
|[OpenStack](sig-openstack/README.md)|openstack|* [Chris Hoge](https://github.com/hogepodge), OpenStack Foundation<br>* [David Lyle](https://github.com/dklyle), Intel<br>* [Robert Morse](https://github.com/rjmorse), Ticketmaster<br>|* [Slack](https://kubernetes.slack.com/messages/sig-openstack)<br>* [Mailing List](https://groups.google.com/forum/#!forum/kubernetes-sig-openstack)|* Regular SIG Meeting: [Wednesdays at 16:00 PT (Pacific Time) (biweekly)](https://docs.google.com/document/d/1FQx0BPlkkl1Bn0c9ocVBxYIKojpmrS1CFP5h0DI68AE/edit)<br>
@@ -51,7 +51,7 @@ When the need arises, a [new SIG can be created](sig-creation-procedure.md)
|[Storage](sig-storage/README.md)|storage|* [Saad Ali](https://github.com/saad-ali), Google<br>* [Bradley Childs](https://github.com/childsb), Red Hat<br>|* [Slack](https://kubernetes.slack.com/messages/sig-storage)<br>* [Mailing List](https://groups.google.com/forum/#!forum/kubernetes-sig-storage)|* Regular SIG Meeting: [Thursdays at 9:00 PT (Pacific Time) (biweekly)](https://docs.google.com/document/d/1FQx0BPlkkl1Bn0c9ocVBxYIKojpmrS1CFP5h0DI68AE/edit)<br>
|[Testing](sig-testing/README.md)|testing|* [Aaron Crickenberger](https://github.com/spiffxp), Google<br>* [Erick Feja](https://github.com/fejta), Google<br>* [Steve Kuznetsov](https://github.com/stevekuznetsov), Red Hat<br>* [Timothy St. Clair](https://github.com/timothysc), Heptio<br>|* [Slack](https://kubernetes.slack.com/messages/sig-testing)<br>* [Mailing List](https://groups.google.com/forum/#!forum/kubernetes-sig-testing)|* Regular SIG Meeting: [Tuesdays at 13:00 PT (Pacific Time) (weekly)](https://docs.google.com/document/d/1FQx0BPlkkl1Bn0c9ocVBxYIKojpmrS1CFP5h0DI68AE/edit)<br>* (testing-commons) Testing Commons: [Wednesdays at 07:30 PT (Pacific Time) (bi-weekly)](https://docs.google.com/document/d/1FQx0BPlkkl1Bn0c9ocVBxYIKojpmrS1CFP5h0DI68AE/edit)<br>
|[UI](sig-ui/README.md)|ui|* [Dan Romlein](https://github.com/danielromlein), Google<br>* [Sebastian Florek](https://github.com/floreks), Fujitsu<br>|* [Slack](https://kubernetes.slack.com/messages/sig-ui)<br>* [Mailing List](https://groups.google.com/forum/#!forum/kubernetes-sig-ui)|* Regular SIG Meeting: [Thursdays at 18:00 CET (Central European Time) (weekly)](https://groups.google.com/forum/#!forum/kubernetes-sig-ui)<br>
-|[VMware](sig-vmware/README.md)|vmware|* [Fabio Rapposelli](https://github.com/frapposelli), VMware<br>* [Steve Wong](https://github.com/cantbewong), VMware<br>|* [Slack](https://kubernetes.slack.com/messages/sig-vmware)<br>* [Mailing List](https://groups.google.com/forum/#!forum/kubernetes-sig-vmware)|* Regular SIG Meeting: [Thursdays at 18:00 UTC (bi-weekly)](https://docs.google.com/document/d/1FQx0BPlkkl1Bn0c9ocVBxYIKojpmrS1CFP5h0DI68AE/edit)<br>* Cloud Provider vSphere weekly syncup: [Wednesdays at 21:00 UTC (bi-weekly)](https://docs.google.com/document/d/1FQx0BPlkkl1Bn0c9ocVBxYIKojpmrS1CFP5h0DI68AE/edit)<br>
+|[VMware](sig-vmware/README.md)|vmware|* [Fabio Rapposelli](https://github.com/frapposelli), VMware<br>* [Steve Wong](https://github.com/cantbewong), VMware<br>|* [Slack](https://kubernetes.slack.com/messages/sig-vmware)<br>* [Mailing List](https://groups.google.com/forum/#!forum/kubernetes-sig-vmware)|* Regular SIG Meeting: [Thursdays at 18:00 UTC (bi-weekly)](https://docs.google.com/document/d/1FQx0BPlkkl1Bn0c9ocVBxYIKojpmrS1CFP5h0DI68AE/edit)<br>* Cloud Provider vSphere weekly syncup: [Wednesdays at 16:00 UTC (bi-weekly)](https://docs.google.com/document/d/1FQx0BPlkkl1Bn0c9ocVBxYIKojpmrS1CFP5h0DI68AE/edit)<br>
|[Windows](sig-windows/README.md)|windows|* [Michael Michael](https://github.com/michmike), Apprenda<br>* [Patrick Lang](https://github.com/patricklang), Microsoft<br>|* [Slack](https://kubernetes.slack.com/messages/sig-windows)<br>* [Mailing List](https://groups.google.com/forum/#!forum/kubernetes-sig-windows)|* Regular SIG Meeting: [Tuesdays at 12:30 Eastern Standard Time (EST) (weekly)](https://docs.google.com/document/d/1FQx0BPlkkl1Bn0c9ocVBxYIKojpmrS1CFP5h0DI68AE/edit)<br>
### Master Working Group List
diff --git a/sig-multicluster/README.md b/sig-multicluster/README.md
index d69c13fe..2fb331a6 100644
--- a/sig-multicluster/README.md
+++ b/sig-multicluster/README.md
@@ -14,7 +14,7 @@ A Special Interest Group focused on solving common challenges related to the man
* Regular SIG Meeting: [Tuesdays at 9:30 PT (Pacific Time)](https://docs.google.com/document/d/1FQx0BPlkkl1Bn0c9ocVBxYIKojpmrS1CFP5h0DI68AE/edit) (biweekly). [Convert to your timezone](http://www.thetimezoneconverter.com/?t=9:30&tz=PT%20%28Pacific%20Time%29).
* [Meeting notes and Agenda](https://docs.google.com/document/d/18mk62nOXE_MCSSnb4yJD_8UadtzJrYyJxFwbrgabHe8/edit).
* [Meeting recordings](https://www.youtube.com/watch?v=iWKC3FsNHWg&list=PL69nYSiGNLP0HqgyqTby6HlDEz7i1mb0-).
-* Federation v2 Working Group: [Wednesdays at 9:30 PT (Pacific Time)](https://docs.google.com/document/d/1FQx0BPlkkl1Bn0c9ocVBxYIKojpmrS1CFP5h0DI68AE/edit) (weekly). [Convert to your timezone](http://www.thetimezoneconverter.com/?t=9:30&tz=PT%20%28Pacific%20Time%29).
+* Federation v2 Working Group: [Wednesdays at 7:30 PT (Pacific Time)](https://docs.google.com/document/d/1FQx0BPlkkl1Bn0c9ocVBxYIKojpmrS1CFP5h0DI68AE/edit) (weekly). [Convert to your timezone](http://www.thetimezoneconverter.com/?t=7:30&tz=PT%20%28Pacific%20Time%29).
* [Meeting notes and Agenda](https://docs.google.com/document/d/1v-Kb1pUs3ww_x0MiKtgcyTXCAuZlbVlz4_A9wS3_HXY/edit).
* [Meeting recordings](https://www.youtube.com/playlist?list=PL69nYSiGNLP3iKP5EzMbtNT2zOZv6RCrX).
@@ -56,15 +56,15 @@ The google groups contain the archive of Github team notifications.
Mentioning a team on Github will CC its group.
Monitor these for Github activity if you are not a member of the team.
-| Team Name | Details | Google Groups | Description |
-| --------- |:-------:|:-------------:| ----------- |
-| @kubernetes/sig-multicluster-api-reviews | [link](https://github.com/orgs/kubernetes/teams/sig-multicluster-api-reviews) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-multicluster-api-reviews) | API Changes and Reviews |
-| @kubernetes/sig-multicluster-bugs | [link](https://github.com/orgs/kubernetes/teams/sig-multicluster-bugs) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-multicluster-bugs) | Bug Triage and Troubleshooting |
-| @kubernetes/sig-multicluster-feature-requests | [link](https://github.com/orgs/kubernetes/teams/sig-multicluster-feature-requests) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-multicluster-feature-requests) | Feature Requests |
-| @kubernetes/sig-multicluster-misc | [link](https://github.com/orgs/kubernetes/teams/sig-multicluster-misc) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-multicluster-misc) | General Discussion |
-| @kubernetes/sig-multicluster-pr-reviews | [link](https://github.com/orgs/kubernetes/teams/sig-multicluster-pr-reviews) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-multicluster-pr-reviews) | PR Reviews |
-| @kubernetes/sig-multicluster-test-failures | [link](https://github.com/orgs/kubernetes/teams/sig-multicluster-test-failures) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-multicluster-test-failures) | Test Failures and Triage |
-| @kubernetes/sig-mutlicluster-proposals | [link](https://github.com/orgs/kubernetes/teams/sig-mutlicluster-proposals) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-mutlicluster-proposals) | Design Proposals |
+| Team Name | Details | Description |
+| --------- |:-------:| ----------- |
+| @kubernetes/sig-multicluster-api-reviews | [link](https://github.com/orgs/kubernetes/teams/sig-multicluster-api-reviews) | API Changes and Reviews |
+| @kubernetes/sig-multicluster-bugs | [link](https://github.com/orgs/kubernetes/teams/sig-multicluster-bugs) | Bug Triage and Troubleshooting |
+| @kubernetes/sig-multicluster-feature-requests | [link](https://github.com/orgs/kubernetes/teams/sig-multicluster-feature-requests) | Feature Requests |
+| @kubernetes/sig-multicluster-misc | [link](https://github.com/orgs/kubernetes/teams/sig-multicluster-misc) | General Discussion |
+| @kubernetes/sig-multicluster-pr-reviews | [link](https://github.com/orgs/kubernetes/teams/sig-multicluster-pr-reviews) | PR Reviews |
+| @kubernetes/sig-multicluster-test-failures | [link](https://github.com/orgs/kubernetes/teams/sig-multicluster-test-failures) | Test Failures and Triage |
+| @kubernetes/sig-mutlicluster-proposals | [link](https://github.com/orgs/kubernetes/teams/sig-mutlicluster-proposals) | Design Proposals |
<!-- BEGIN CUSTOM CONTENT -->
## Subprojects
diff --git a/sig-network/README.md b/sig-network/README.md
index c1e15ce1..4956251e 100644
--- a/sig-network/README.md
+++ b/sig-network/README.md
@@ -64,15 +64,15 @@ The google groups contain the archive of Github team notifications.
Mentioning a team on Github will CC its group.
Monitor these for Github activity if you are not a member of the team.
-| Team Name | Details | Google Groups | Description |
-| --------- |:-------:|:-------------:| ----------- |
-| @kubernetes/sig-network-api-reviews | [link](https://github.com/orgs/kubernetes/teams/sig-network-api-reviews) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-network-api-reviews) | API Changes and Reviews |
-| @kubernetes/sig-network-bugs | [link](https://github.com/orgs/kubernetes/teams/sig-network-bugs) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-network-bugs) | Bug Triage and Troubleshooting |
-| @kubernetes/sig-network-feature-requests | [link](https://github.com/orgs/kubernetes/teams/sig-network-feature-requests) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-network-feature-requests) | Feature Requests |
-| @kubernetes/sig-network-misc | [link](https://github.com/orgs/kubernetes/teams/sig-network-misc) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-network-misc) | General Discussion |
-| @kubernetes/sig-network-pr-reviews | [link](https://github.com/orgs/kubernetes/teams/sig-network-pr-reviews) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-network-pr-reviews) | PR Reviews |
-| @kubernetes/sig-network-proposals | [link](https://github.com/orgs/kubernetes/teams/sig-network-proposals) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-network-proposals) | Design Proposals |
-| @kubernetes/sig-network-test-failures | [link](https://github.com/orgs/kubernetes/teams/sig-network-test-failures) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-network-test-failures) | Test Failures and Triage |
+| Team Name | Details | Description |
+| --------- |:-------:| ----------- |
+| @kubernetes/sig-network-api-reviews | [link](https://github.com/orgs/kubernetes/teams/sig-network-api-reviews) | API Changes and Reviews |
+| @kubernetes/sig-network-bugs | [link](https://github.com/orgs/kubernetes/teams/sig-network-bugs) | Bug Triage and Troubleshooting |
+| @kubernetes/sig-network-feature-requests | [link](https://github.com/orgs/kubernetes/teams/sig-network-feature-requests) | Feature Requests |
+| @kubernetes/sig-network-misc | [link](https://github.com/orgs/kubernetes/teams/sig-network-misc) | General Discussion |
+| @kubernetes/sig-network-pr-reviews | [link](https://github.com/orgs/kubernetes/teams/sig-network-pr-reviews) | PR Reviews |
+| @kubernetes/sig-network-proposals | [link](https://github.com/orgs/kubernetes/teams/sig-network-proposals) | Design Proposals |
+| @kubernetes/sig-network-test-failures | [link](https://github.com/orgs/kubernetes/teams/sig-network-test-failures) | Test Failures and Triage |
<!-- BEGIN CUSTOM CONTENT -->
## Areas of Responsibility
diff --git a/sig-node/README.md b/sig-node/README.md
index cd43b6fc..e2d1266c 100644
--- a/sig-node/README.md
+++ b/sig-node/README.md
@@ -65,14 +65,14 @@ The google groups contain the archive of Github team notifications.
Mentioning a team on Github will CC its group.
Monitor these for Github activity if you are not a member of the team.
-| Team Name | Details | Google Groups | Description |
-| --------- |:-------:|:-------------:| ----------- |
-| @kubernetes/sig-node-api-reviews | [link](https://github.com/orgs/kubernetes/teams/sig-node-api-reviews) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-node-api-reviews) | API Changes and Reviews |
-| @kubernetes/sig-node-bugs | [link](https://github.com/orgs/kubernetes/teams/sig-node-bugs) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-node-bugs) | Bug Triage and Troubleshooting |
-| @kubernetes/sig-node-feature-requests | [link](https://github.com/orgs/kubernetes/teams/sig-node-feature-requests) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-node-feature-requests) | Feature Requests |
-| @kubernetes/sig-node-pr-reviews | [link](https://github.com/orgs/kubernetes/teams/sig-node-pr-reviews) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-node-pr-reviews) | PR Reviews |
-| @kubernetes/sig-node-proposals | [link](https://github.com/orgs/kubernetes/teams/sig-node-proposals) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-node-proposals) | Design Proposals |
-| @kubernetes/sig-node-test-failures | [link](https://github.com/orgs/kubernetes/teams/sig-node-test-failures) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-node-test-failures) | Test Failures and Triage |
+| Team Name | Details | Description |
+| --------- |:-------:| ----------- |
+| @kubernetes/sig-node-api-reviews | [link](https://github.com/orgs/kubernetes/teams/sig-node-api-reviews) | API Changes and Reviews |
+| @kubernetes/sig-node-bugs | [link](https://github.com/orgs/kubernetes/teams/sig-node-bugs) | Bug Triage and Troubleshooting |
+| @kubernetes/sig-node-feature-requests | [link](https://github.com/orgs/kubernetes/teams/sig-node-feature-requests) | Feature Requests |
+| @kubernetes/sig-node-pr-reviews | [link](https://github.com/orgs/kubernetes/teams/sig-node-pr-reviews) | PR Reviews |
+| @kubernetes/sig-node-proposals | [link](https://github.com/orgs/kubernetes/teams/sig-node-proposals) | Design Proposals |
+| @kubernetes/sig-node-test-failures | [link](https://github.com/orgs/kubernetes/teams/sig-node-test-failures) | Test Failures and Triage |
<!-- BEGIN CUSTOM CONTENT -->
## Goals
diff --git a/sig-openstack/README.md b/sig-openstack/README.md
index a86001fb..1db42bba 100644
--- a/sig-openstack/README.md
+++ b/sig-openstack/README.md
@@ -45,15 +45,15 @@ The google groups contain the archive of Github team notifications.
Mentioning a team on Github will CC its group.
Monitor these for Github activity if you are not a member of the team.
-| Team Name | Details | Google Groups | Description |
-| --------- |:-------:|:-------------:| ----------- |
-| @kubernetes/sig-openstack-api-reviews | [link](https://github.com/orgs/kubernetes/teams/sig-openstack-api-reviews) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-openstack-api-reviews) | API Changes and Reviews |
-| @kubernetes/sig-openstack-bugs | [link](https://github.com/orgs/kubernetes/teams/sig-openstack-bugs) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-openstack-bugs) | Bug Triage and Troubleshooting |
-| @kubernetes/sig-openstack-feature-requests | [link](https://github.com/orgs/kubernetes/teams/sig-openstack-feature-requests) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-openstack-feature-requests) | Feature Requests |
-| @kubernetes/sig-openstack-misc | [link](https://github.com/orgs/kubernetes/teams/sig-openstack-misc) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-openstack-misc) | General Discussion |
-| @kubernetes/sig-openstack-pr-reviews | [link](https://github.com/orgs/kubernetes/teams/sig-openstack-pr-reviews) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-openstack-pr-reviews) | PR Reviews |
-| @kubernetes/sig-openstack-proposals | [link](https://github.com/orgs/kubernetes/teams/sig-openstack-proposals) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-openstack-proposals) | Design Proposals |
-| @kubernetes/sig-openstack-test-failures | [link](https://github.com/orgs/kubernetes/teams/sig-openstack-test-failures) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-openstack-test-failures) | Test Failures and Triage |
+| Team Name | Details | Description |
+| --------- |:-------:| ----------- |
+| @kubernetes/sig-openstack-api-reviews | [link](https://github.com/orgs/kubernetes/teams/sig-openstack-api-reviews) | API Changes and Reviews |
+| @kubernetes/sig-openstack-bugs | [link](https://github.com/orgs/kubernetes/teams/sig-openstack-bugs) | Bug Triage and Troubleshooting |
+| @kubernetes/sig-openstack-feature-requests | [link](https://github.com/orgs/kubernetes/teams/sig-openstack-feature-requests) | Feature Requests |
+| @kubernetes/sig-openstack-misc | [link](https://github.com/orgs/kubernetes/teams/sig-openstack-misc) | General Discussion |
+| @kubernetes/sig-openstack-pr-reviews | [link](https://github.com/orgs/kubernetes/teams/sig-openstack-pr-reviews) | PR Reviews |
+| @kubernetes/sig-openstack-proposals | [link](https://github.com/orgs/kubernetes/teams/sig-openstack-proposals) | Design Proposals |
+| @kubernetes/sig-openstack-test-failures | [link](https://github.com/orgs/kubernetes/teams/sig-openstack-test-failures) | Test Failures and Triage |
<!-- BEGIN CUSTOM CONTENT -->
diff --git a/sig-release/README.md b/sig-release/README.md
index 20515f50..e5e8459d 100644
--- a/sig-release/README.md
+++ b/sig-release/README.md
@@ -51,17 +51,17 @@ The google groups contain the archive of Github team notifications.
Mentioning a team on Github will CC its group.
Monitor these for Github activity if you are not a member of the team.
-| Team Name | Details | Google Groups | Description |
-| --------- |:-------:|:-------------:| ----------- |
-| @kubernetes/sig-release-admins | [link](https://github.com/orgs/kubernetes/teams/sig-release-admins) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-release-admins) | Release Team Admins |
-| @kubernetes/sig-release-api-reviews | [link](https://github.com/orgs/kubernetes/teams/sig-release-api-reviews) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-release-api-reviews) | API Changes and Reviews |
-| @kubernetes/sig-release-bugs | [link](https://github.com/orgs/kubernetes/teams/sig-release-bugs) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-release-bugs) | Bug Triage and Troubleshooting |
-| @kubernetes/sig-release-feature-requests | [link](https://github.com/orgs/kubernetes/teams/sig-release-feature-requests) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-release-feature-requests) | Feature Requests |
-| @kubernetes/sig-release-members | [link](https://github.com/orgs/kubernetes/teams/sig-release-members) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-release-members) | Release Team Members |
-| @kubernetes/sig-release-misc | [link](https://github.com/orgs/kubernetes/teams/sig-release-misc) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-release-misc) | General Discussion |
-| @kubernetes/sig-release-pr-reviews | [link](https://github.com/orgs/kubernetes/teams/sig-release-pr-reviews) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-release-pr-reviews) | PR Reviews |
-| @kubernetes/sig-release-proposals | [link](https://github.com/orgs/kubernetes/teams/sig-release-proposals) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-release-proposals) | Design Proposals |
-| @kubernetes/sig-release-test-failures | [link](https://github.com/orgs/kubernetes/teams/sig-release-test-failures) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-release-test-failures) | Test Failures and Triage |
+| Team Name | Details | Description |
+| --------- |:-------:| ----------- |
+| @kubernetes/sig-release-admins | [link](https://github.com/orgs/kubernetes/teams/sig-release-admins) | Release Team Admins |
+| @kubernetes/sig-release-api-reviews | [link](https://github.com/orgs/kubernetes/teams/sig-release-api-reviews) | API Changes and Reviews |
+| @kubernetes/sig-release-bugs | [link](https://github.com/orgs/kubernetes/teams/sig-release-bugs) | Bug Triage and Troubleshooting |
+| @kubernetes/sig-release-feature-requests | [link](https://github.com/orgs/kubernetes/teams/sig-release-feature-requests) | Feature Requests |
+| @kubernetes/sig-release-members | [link](https://github.com/orgs/kubernetes/teams/sig-release-members) | Release Team Members |
+| @kubernetes/sig-release-misc | [link](https://github.com/orgs/kubernetes/teams/sig-release-misc) | General Discussion |
+| @kubernetes/sig-release-pr-reviews | [link](https://github.com/orgs/kubernetes/teams/sig-release-pr-reviews) | PR Reviews |
+| @kubernetes/sig-release-proposals | [link](https://github.com/orgs/kubernetes/teams/sig-release-proposals) | Design Proposals |
+| @kubernetes/sig-release-test-failures | [link](https://github.com/orgs/kubernetes/teams/sig-release-test-failures) | Test Failures and Triage |
<!-- BEGIN CUSTOM CONTENT -->
[SIG Release][] has moved!
diff --git a/sig-scalability/README.md b/sig-scalability/README.md
index e12ccb0b..a3f78155 100644
--- a/sig-scalability/README.md
+++ b/sig-scalability/README.md
@@ -50,15 +50,15 @@ The google groups contain the archive of Github team notifications.
Mentioning a team on Github will CC its group.
Monitor these for Github activity if you are not a member of the team.
-| Team Name | Details | Google Groups | Description |
-| --------- |:-------:|:-------------:| ----------- |
-| @kubernetes/sig-scalability-api-reviews | [link](https://github.com/orgs/kubernetes/teams/sig-scalability-api-reviews) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-scalability-api-reviews) | API Changes and Reviews |
-| @kubernetes/sig-scalability-bugs | [link](https://github.com/orgs/kubernetes/teams/sig-scalability-bugs) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-scalability-bugs) | Bug Triage and Troubleshooting |
-| @kubernetes/sig-scalability-feature-requests | [link](https://github.com/orgs/kubernetes/teams/sig-scalability-feature-requests) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-scalability-feature-requests) | Feature Requests |
-| @kubernetes/sig-scalability-misc | [link](https://github.com/orgs/kubernetes/teams/sig-scalability-misc) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-scalability-misc) | General Discussion |
-| @kubernetes/sig-scalability-pr-reviews | [link](https://github.com/orgs/kubernetes/teams/sig-scalability-pr-reviews) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-scalability-pr-reviews) | PR Reviews |
-| @kubernetes/sig-scalability-proprosals | [link](https://github.com/orgs/kubernetes/teams/sig-scalability-proprosals) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-scalability-proprosals) | Design Proposals |
-| @kubernetes/sig-scalability-test-failures | [link](https://github.com/orgs/kubernetes/teams/sig-scalability-test-failures) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-scalability-test-failures) | Test Failures and Triage |
+| Team Name | Details | Description |
+| --------- |:-------:| ----------- |
+| @kubernetes/sig-scalability-api-reviews | [link](https://github.com/orgs/kubernetes/teams/sig-scalability-api-reviews) | API Changes and Reviews |
+| @kubernetes/sig-scalability-bugs | [link](https://github.com/orgs/kubernetes/teams/sig-scalability-bugs) | Bug Triage and Troubleshooting |
+| @kubernetes/sig-scalability-feature-requests | [link](https://github.com/orgs/kubernetes/teams/sig-scalability-feature-requests) | Feature Requests |
+| @kubernetes/sig-scalability-misc | [link](https://github.com/orgs/kubernetes/teams/sig-scalability-misc) | General Discussion |
+| @kubernetes/sig-scalability-pr-reviews | [link](https://github.com/orgs/kubernetes/teams/sig-scalability-pr-reviews) | PR Reviews |
+| @kubernetes/sig-scalability-proprosals | [link](https://github.com/orgs/kubernetes/teams/sig-scalability-proprosals) | Design Proposals |
+| @kubernetes/sig-scalability-test-failures | [link](https://github.com/orgs/kubernetes/teams/sig-scalability-test-failures) | Test Failures and Triage |
<!-- BEGIN CUSTOM CONTENT -->
## Upcoming 2018 Meeting Dates
diff --git a/sig-scheduling/README.md b/sig-scheduling/README.md
index 76bb6f84..d5dd1d8a 100644
--- a/sig-scheduling/README.md
+++ b/sig-scheduling/README.md
@@ -57,15 +57,15 @@ The google groups contain the archive of Github team notifications.
Mentioning a team on Github will CC its group.
Monitor these for Github activity if you are not a member of the team.
-| Team Name | Details | Google Groups | Description |
-| --------- |:-------:|:-------------:| ----------- |
-| @kubernetes/sig-scheduling-api-reviews | [link](https://github.com/orgs/kubernetes/teams/sig-scheduling-api-reviews) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-scheduling-api-reviews) | API Changes and Reviews |
-| @kubernetes/sig-scheduling-bugs | [link](https://github.com/orgs/kubernetes/teams/sig-scheduling-bugs) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-scheduling-bugs) | Bug Triage and Troubleshooting |
-| @kubernetes/sig-scheduling-feature-requests | [link](https://github.com/orgs/kubernetes/teams/sig-scheduling-feature-requests) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-scheduling-feature-requests) | Feature Requests |
-| @kubernetes/sig-scheduling-misc | [link](https://github.com/orgs/kubernetes/teams/sig-scheduling-misc) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-scheduling-misc) | General Discussion |
-| @kubernetes/sig-scheduling-pr-reviews | [link](https://github.com/orgs/kubernetes/teams/sig-scheduling-pr-reviews) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-scheduling-pr-reviews) | PR Reviews |
-| @kubernetes/sig-scheduling-proposals | [link](https://github.com/orgs/kubernetes/teams/sig-scheduling-proposals) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-scheduling-proposals) | Design Proposals |
-| @kubernetes/sig-scheduling-test-failures | [link](https://github.com/orgs/kubernetes/teams/sig-scheduling-test-failures) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-scheduling-test-failures) | Test Failures and Triage |
+| Team Name | Details | Description |
+| --------- |:-------:| ----------- |
+| @kubernetes/sig-scheduling-api-reviews | [link](https://github.com/orgs/kubernetes/teams/sig-scheduling-api-reviews) | API Changes and Reviews |
+| @kubernetes/sig-scheduling-bugs | [link](https://github.com/orgs/kubernetes/teams/sig-scheduling-bugs) | Bug Triage and Troubleshooting |
+| @kubernetes/sig-scheduling-feature-requests | [link](https://github.com/orgs/kubernetes/teams/sig-scheduling-feature-requests) | Feature Requests |
+| @kubernetes/sig-scheduling-misc | [link](https://github.com/orgs/kubernetes/teams/sig-scheduling-misc) | General Discussion |
+| @kubernetes/sig-scheduling-pr-reviews | [link](https://github.com/orgs/kubernetes/teams/sig-scheduling-pr-reviews) | PR Reviews |
+| @kubernetes/sig-scheduling-proposals | [link](https://github.com/orgs/kubernetes/teams/sig-scheduling-proposals) | Design Proposals |
+| @kubernetes/sig-scheduling-test-failures | [link](https://github.com/orgs/kubernetes/teams/sig-scheduling-test-failures) | Test Failures and Triage |
<!-- BEGIN CUSTOM CONTENT -->
diff --git a/sig-service-catalog/README.md b/sig-service-catalog/README.md
index b4c83e85..4bf99386 100644
--- a/sig-service-catalog/README.md
+++ b/sig-service-catalog/README.md
@@ -8,7 +8,9 @@ To understand how this file is generated, see https://git.k8s.io/community/gener
--->
# Service Catalog Special Interest Group
-To develop a Kubernetes API for the CNCF service broker and Kubernetes broker implementation.
+Service Catalog is a Kubernetes extension project that implements the [Open Service Broker API](https://www.openservicebrokerapi.org/) (OSBAPI). It allows application developers the ability to provision and consume cloud services natively from within Kubernetes.
+
+The [charter](https://github.com/kubernetes/community/blob/master/sig-service-catalog/charter.md) defines the scope and governance of the Service Catalog Special Interest Group.
## Meetings
* Regular SIG Meeting: [Mondays at 13:00 PT (Pacific Time)](https://docs.google.com/document/d/1FQx0BPlkkl1Bn0c9ocVBxYIKojpmrS1CFP5h0DI68AE/edit) (weekly). [Convert to your timezone](http://www.thetimezoneconverter.com/?t=13:00&tz=PT%20%28Pacific%20Time%29).
@@ -46,15 +48,15 @@ The google groups contain the archive of Github team notifications.
Mentioning a team on Github will CC its group.
Monitor these for Github activity if you are not a member of the team.
-| Team Name | Details | Google Groups | Description |
-| --------- |:-------:|:-------------:| ----------- |
-| @kubernetes/sig-service-catalog-api-reviews | [link](https://github.com/orgs/kubernetes/teams/sig-service-catalog-api-reviews) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-service-catalog-api-reviews) | API Changes and Reviews |
-| @kubernetes/sig-service-catalog-bugs | [link](https://github.com/orgs/kubernetes/teams/sig-service-catalog-bugs) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-service-catalog-bugs) | Bug Triage and Troubleshooting |
-| @kubernetes/sig-service-catalog-feature-requests | [link](https://github.com/orgs/kubernetes/teams/sig-service-catalog-feature-requests) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-service-catalog-feature-requests) | Feature Requests |
-| @kubernetes/sig-service-catalog-misc | [link](https://github.com/orgs/kubernetes/teams/sig-service-catalog-misc) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-service-catalog-misc) | General Discussion |
-| @kubernetes/sig-service-catalog-pr-reviews | [link](https://github.com/orgs/kubernetes/teams/sig-service-catalog-pr-reviews) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-service-catalog-pr-reviews) | PR Reviews |
-| @kubernetes/sig-service-catalog-proposals | [link](https://github.com/orgs/kubernetes/teams/sig-service-catalog-proposals) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-service-catalog-proposals) | Design Proposals |
-| @kubernetes/sig-service-catalog-test-failures | [link](https://github.com/orgs/kubernetes/teams/sig-service-catalog-test-failures) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-service-catalog-test-failures) | Test Failures and Triage |
+| Team Name | Details | Description |
+| --------- |:-------:| ----------- |
+| @kubernetes/sig-service-catalog-api-reviews | [link](https://github.com/orgs/kubernetes/teams/sig-service-catalog-api-reviews) | API Changes and Reviews |
+| @kubernetes/sig-service-catalog-bugs | [link](https://github.com/orgs/kubernetes/teams/sig-service-catalog-bugs) | Bug Triage and Troubleshooting |
+| @kubernetes/sig-service-catalog-feature-requests | [link](https://github.com/orgs/kubernetes/teams/sig-service-catalog-feature-requests) | Feature Requests |
+| @kubernetes/sig-service-catalog-misc | [link](https://github.com/orgs/kubernetes/teams/sig-service-catalog-misc) | General Discussion |
+| @kubernetes/sig-service-catalog-pr-reviews | [link](https://github.com/orgs/kubernetes/teams/sig-service-catalog-pr-reviews) | PR Reviews |
+| @kubernetes/sig-service-catalog-proposals | [link](https://github.com/orgs/kubernetes/teams/sig-service-catalog-proposals) | Design Proposals |
+| @kubernetes/sig-service-catalog-test-failures | [link](https://github.com/orgs/kubernetes/teams/sig-service-catalog-test-failures) | Test Failures and Triage |
<!-- BEGIN CUSTOM CONTENT -->
diff --git a/sig-service-catalog/charter.md b/sig-service-catalog/charter.md
new file mode 100644
index 00000000..32c2d497
--- /dev/null
+++ b/sig-service-catalog/charter.md
@@ -0,0 +1,170 @@
+# SIG Service Catalog Charter
+
+This charter adheres to the conventions described in the [Kubernetes Charter
+README](https://github.com/kubernetes/community/blob/master/committee-steering/governance/README.md).
+
+## Scope
+
+Service Catalog is a Kubernetes extension project that implements the [Open
+Service Broker API](https://www.openservicebrokerapi.org/) (OSBAPI). It enables
+application developers to provision cloud services from within Kubernetes and
+integrates configuration and discovery of those services into Kubernetes
+resources.
+
+### In scope
+
+See the [service-catalog SIG entry](https://github.com/kubernetes/community/tree/master/sig-service-catalog).
+
+This SIG’s main goals are:
+- Support, and adhere to, the Platform requirements of the [OSBAPI
+ specification](https://github.com/openservicebrokerapi/servicebroker/blob/master/spec.md).
+- Provide a UX for Kubernetes users that is consistent with both the OSB API
+ specification and traditional Kubernetes user interactions.
+- Align with the OSBAPI specification as changes are made.
+- Provide feedback (bugs or feature requests) to the [OSBAPI WG]](https://www.openservicebrokerapi.org/).
+
+### Code, Binaries and services
+
+- [Source Repository](https://github.com/kubernetes-incubator/service-catalog)
+ - See [OWNERS](https://raw.githubusercontent.com/kubernetes-incubator/service-catalog/master/OWNERS) for who has access.
+- [Image Repository](https://quay.io/organization/kubernetes-service-catalog)
+ - Canary builds are published on pushes to master.
+ - Release builds (and latest) are published on tags.
+ - Chairs have access to manage this repository.
+- [Helm Repository](https://svc-catalog-charts.storage.googleapis.com)
+ - Charts are manually published after each release.
+ - Managed by Vic Iglesias (Google), @viglesias on the kubernetes slack.
+- [svc-cat.io](https://svc-cat.io)
+ - Published on pushes to master.
+ - Site hosted with [Netlify](https://app.netlify.com/sites/svc-cat/overview).
+ - Chairs and interested maintainers have access to manage this site.
+- [CLI Binary Hosting](https://svc-cat.io/docs/install/#manual)
+ - Canary builds are published on pushes to master.
+ - Release builds (and latest) are published on tags.
+ - Files hosted on Azure blob storage.
+ - Azure account managed by Carolyn Van Slyck (Microsoft) and Aaron Schlesinger
+ (Microsoft).
+- [Travis](https://travis-ci.org/kubernetes-incubator/service-catalog)
+ - Runs the CI builds.
+ - Maintainers have access.
+- [Jenkins](https://service-catalog-jenkins.appspot.com/)
+ - Runs end-to-end tests on a live cluster.
+ - Server managed by Michael Kibbe (Google).
+
+### Out of scope
+
+The following, non-exhaustive, items are out of scope:
+- Operation of OSBAPI Service Brokers.
+
+## Roles
+This SIG's charter deviates from the
+[sig-governance](https://github.com/kubernetes/community/blob/master/committee-steering/governance/sig-governance.md)
+roles. We do not have the Tech Lead role, and have a honorary Emeritus Chair role.
+
+- [Maintainers](https://github.com/orgs/kubernetes-incubator/teams/maintainers-service-catalog/members)
+ - Maintainer is equivalent to the standard [Kubernetes definition of
+ Approver](https://github.com/kubernetes/community/blob/master/community-membership.md#approver).
+ - Responsible for reviewing pull requests, and approving pull requests for merge.
+ - Responsible for technical planning and stewardship of the project.
+ - New maintainers may be nominated by another maintainer, and accepted via lazy
+ two-thirds resolution amongst the chairs.
+ - Maintainers may be nominated for removal from their position by a chair,
+ and accepted via lazy two-thirds resolution amongst the chairs.
+ - Maintainers may propose changes to this charter at any time, by submitting a
+ pull request and then notifying the SIG mailing list, to be accepted via
+ lazy two-thirds resolution amongst the chairs.
+
+- Chairs
+ - Chairs are expected to perform the role of maintainer, in addition to their chair responsibilities.
+ - Chairs are listed in our [SIG
+ definition](https://github.com/kubernetes/community/tree/master/sig-service-catalog#chairs).
+ - Responsible for project administration activities within the SIG and are
+ non-technical in nature, such as organizing the weekly meetings.
+ - A chair does not have more rights, or votes, than a maintainer.
+ - Responsible for reporting the SIG’s status to the appropriate Kubernetes
+ leadership teams.
+ - All decisions amongst chairs are made using lazy consensus with a fallback
+ to a 2/3 majority vote (lazy two-thirds resolution).
+ This process is used for all decisions, such as changing chairs/maintainers
+ or modifying this charter.
+ - Chairs may nominate a new chair at any time, to be accepted via lazy
+ two-thirds resolution amongst the chairs.
+ - Chairs may decide to step down at any time.
+ - Chairs must remain active in the role and may be removed from the position
+ via lazy two-thirds resolution amongst the chairs, if they are unresponsive
+ for > 3 months or are not proactively working with other chairs to fulfill
+ responsibilities.
+ - There is no set number of Chairs.
+
+- Emeritus Chairs ([Inspired by the Helm
+ Project](http://technosophos.com/2018/01/11/introducing-helm-emeritus-core-maintainers.html))
+ - A chair who steps down may choose to take an Emeritus Chair title. This
+ confers honor on the recipient and allows them to remain associated with the
+ SIG in acknowledgement of their significant contributions.
+ - Those who attain this title are no longer expected to attend the weekly
+ meetings, share in the issue queue triage rotation, vote on policy or
+ architectural issues, or review pull requests.
+ - They are [listed in our documentation](https://svc-cat.io/community/#leadership)
+ as Emeritus Chairs, and we will continue to invite them to participate in
+ related events, such as KubeCon.
+
+- Security Contacts
+ - Are a contact point for the Product Security Team to reach out to for
+ triaging and handling of incoming issues.
+ - Must be a maintainer.
+ - Must accept and adhere to the Kubernetes [Embargo
+ Policy](https://github.com/kubernetes/sig-release/blob/master/security-release-process-documentation/security-release-process.md#embargo-policy).
+ - Defined in
+ [SECURITY_CONTACTS](https://github.com/kubernetes-incubator/service-catalog/blob/master/SECURITY_CONTACTS)
+ file.
+
+## Organizational management
+
+- SIG meets every week on Zoom at 1 PM PST on Mondays
+ - Agenda
+ [here](https://docs.google.com/document/d/17xlpkoEbPR5M6P5VDzNx17q6-IPFxKyebEekCGYiIKM/edit#).
+ - Anyone is free to add new agenda items to the doc.
+ - Recordings of the calls are made available [here](https://goo.gl/ZmLNX9).
+- SIG members explicitly representing the group at conferences (SIG progress
+ reports, deep dives, etc.) should make their slides available for perusal and
+ feedback at least 2 week in advance.
+- [Working
+ groups](https://github.com/kubernetes-incubator/service-catalog/wiki/Working-Groups)
+ can be initiated by any member. To create a new one, add the topic to the
+ weekly call’s agenda for discussion.
+ - These are not the same as cross-SIG working groups.
+ - Working groups exist for an unspecified period of time, so that interested
+ members can meet to discuss and solve problems for our SIG.
+
+### Project management
+- [Milestones](https://github.com/kubernetes-incubator/service-catalog/milestones)
+ are defined by SIG maintainers.
+- Anyone is free to request a discussion of the milestones/plans during
+ a weekly call.
+- Weekly releases are typically done on Thursdays, and any member who has
+ maintainer rights is free to initiate it. _Friday releases are strongly
+ discouraged._
+- Major releases are planned and discussed among the SIG members during regular
+ weekly calls.
+- The release process is defined
+ [here](https://github.com/kubernetes-incubator/service-catalog/wiki/Release-Process).
+- Anyone can request to work on an issue by commenting on it with `#dibs`.
+
+
+### Technical processes
+- All technical decisions are made either through issues, pull requests or
+ discussions during the weekly SIG meeting. Major decisions should be
+ documented in an issue or pull request.
+- There is no requirement that all pull requests have an associated issue.
+ However, if the PR represents a significant design decision then it is
+ recommended that it be discussed among the group to avoid unnecessary coding
+ that might not be accepted.
+- While everyone is encouraged to contribute to the discussion of any topic,
+ ultimately any changes made to the codebase must be approved by the
+ maintainers.
+- Pull requests are required to have at least 2 maintainers approve them.
+- Pull requests that are labeled with `do-not-merge/hold` or have an on-going
+ discussion must not be merged until all concerns are addressed.
+- Disagreements are resolved via lazy consensus. In the event that a common
+ decision cannot be made, then a vote among the maintainers will be taken.
+ Simple majority (>50%) wins.
diff --git a/sig-storage/README.md b/sig-storage/README.md
index a51db64e..fdef6705 100644
--- a/sig-storage/README.md
+++ b/sig-storage/README.md
@@ -53,15 +53,15 @@ The google groups contain the archive of Github team notifications.
Mentioning a team on Github will CC its group.
Monitor these for Github activity if you are not a member of the team.
-| Team Name | Details | Google Groups | Description |
-| --------- |:-------:|:-------------:| ----------- |
-| @kubernetes/sig-storage-api-reviews | [link](https://github.com/orgs/kubernetes/teams/sig-storage-api-reviews) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-storage-api-reviews) | API Changes and Reviews |
-| @kubernetes/sig-storage-bugs | [link](https://github.com/orgs/kubernetes/teams/sig-storage-bugs) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-storage-bugs) | Bug Triage and Troubleshooting |
-| @kubernetes/sig-storage-feature-requests | [link](https://github.com/orgs/kubernetes/teams/sig-storage-feature-requests) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-storage-feature-requests) | Feature Requests |
-| @kubernetes/sig-storage-misc | [link](https://github.com/orgs/kubernetes/teams/sig-storage-misc) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-storage-misc) | General Discussion |
-| @kubernetes/sig-storage-pr-reviews | [link](https://github.com/orgs/kubernetes/teams/sig-storage-pr-reviews) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-storage-pr-reviews) | PR Reviews |
-| @kubernetes/sig-storage-proposals | [link](https://github.com/orgs/kubernetes/teams/sig-storage-proposals) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-storage-proposals) | Design Proposals |
-| @kubernetes/sig-storage-test-failures | [link](https://github.com/orgs/kubernetes/teams/sig-storage-test-failures) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-storage-test-failures) | Test Failures and Triage |
+| Team Name | Details | Description |
+| --------- |:-------:| ----------- |
+| @kubernetes/sig-storage-api-reviews | [link](https://github.com/orgs/kubernetes/teams/sig-storage-api-reviews) | API Changes and Reviews |
+| @kubernetes/sig-storage-bugs | [link](https://github.com/orgs/kubernetes/teams/sig-storage-bugs) | Bug Triage and Troubleshooting |
+| @kubernetes/sig-storage-feature-requests | [link](https://github.com/orgs/kubernetes/teams/sig-storage-feature-requests) | Feature Requests |
+| @kubernetes/sig-storage-misc | [link](https://github.com/orgs/kubernetes/teams/sig-storage-misc) | General Discussion |
+| @kubernetes/sig-storage-pr-reviews | [link](https://github.com/orgs/kubernetes/teams/sig-storage-pr-reviews) | PR Reviews |
+| @kubernetes/sig-storage-proposals | [link](https://github.com/orgs/kubernetes/teams/sig-storage-proposals) | Design Proposals |
+| @kubernetes/sig-storage-test-failures | [link](https://github.com/orgs/kubernetes/teams/sig-storage-test-failures) | Test Failures and Triage |
<!-- BEGIN CUSTOM CONTENT -->
diff --git a/sig-testing/README.md b/sig-testing/README.md
index c19f40d4..efea5e1f 100644
--- a/sig-testing/README.md
+++ b/sig-testing/README.md
@@ -42,7 +42,7 @@ The following subprojects are owned by sig-testing:
- **testing-commons**
- Description: The Testing Commons is a subproject within the Kubernetes sig-testing community interested code structure, layout, and execution of common test code used throughout the kubernetes project.
- Owners:
- - https://raw.githubusercontent.com/kubernetes-sig-testing/frameworks/master/OWNERS
+ - https://raw.githubusercontent.com/kubernetes-sigs/testing_frameworks/master/OWNERS
- https://raw.githubusercontent.com/kubernetes/kubernetes/master/test/OWNERS
- Meetings:
- Testing Commons: [Wednesdays at 07:30 PT (Pacific Time)](https://docs.google.com/document/d/1FQx0BPlkkl1Bn0c9ocVBxYIKojpmrS1CFP5h0DI68AE/edit) (bi-weekly). [Convert to your timezone](http://www.thetimezoneconverter.com/?t=07:30&tz=PT%20%28Pacific%20Time%29).
@@ -57,15 +57,15 @@ The google groups contain the archive of Github team notifications.
Mentioning a team on Github will CC its group.
Monitor these for Github activity if you are not a member of the team.
-| Team Name | Details | Google Groups | Description |
-| --------- |:-------:|:-------------:| ----------- |
-| @kubernetes/sig-testing-api-reviews | [link](https://github.com/orgs/kubernetes/teams/sig-testing-api-reviews) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-testing-api-reviews) | API Changes and Reviews |
-| @kubernetes/sig-testing-bugs | [link](https://github.com/orgs/kubernetes/teams/sig-testing-bugs) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-testing-bugs) | Bug Triage and Troubleshooting |
-| @kubernetes/sig-testing-feature-requests | [link](https://github.com/orgs/kubernetes/teams/sig-testing-feature-requests) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-testing-feature-requests) | Feature Requests |
-| @kubernetes/sig-testing-misc | [link](https://github.com/orgs/kubernetes/teams/sig-testing-misc) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-testing-misc) | General Discussion |
-| @kubernetes/sig-testing-pr-reviews | [link](https://github.com/orgs/kubernetes/teams/sig-testing-pr-reviews) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-testing-pr-reviews) | PR Reviews |
-| @kubernetes/sig-testing-proposals | [link](https://github.com/orgs/kubernetes/teams/sig-testing-proposals) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-testing-proposals) | Design Proposals |
-| @kubernetes/sig-testing-test-failures | [link](https://github.com/orgs/kubernetes/teams/sig-testing-test-failures) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-testing-test-failures) | Test Failures and Triage |
+| Team Name | Details | Description |
+| --------- |:-------:| ----------- |
+| @kubernetes/sig-testing-api-reviews | [link](https://github.com/orgs/kubernetes/teams/sig-testing-api-reviews) | API Changes and Reviews |
+| @kubernetes/sig-testing-bugs | [link](https://github.com/orgs/kubernetes/teams/sig-testing-bugs) | Bug Triage and Troubleshooting |
+| @kubernetes/sig-testing-feature-requests | [link](https://github.com/orgs/kubernetes/teams/sig-testing-feature-requests) | Feature Requests |
+| @kubernetes/sig-testing-misc | [link](https://github.com/orgs/kubernetes/teams/sig-testing-misc) | General Discussion |
+| @kubernetes/sig-testing-pr-reviews | [link](https://github.com/orgs/kubernetes/teams/sig-testing-pr-reviews) | PR Reviews |
+| @kubernetes/sig-testing-proposals | [link](https://github.com/orgs/kubernetes/teams/sig-testing-proposals) | Design Proposals |
+| @kubernetes/sig-testing-test-failures | [link](https://github.com/orgs/kubernetes/teams/sig-testing-test-failures) | Test Failures and Triage |
<!-- BEGIN CUSTOM CONTENT -->
diff --git a/sig-vmware/README.md b/sig-vmware/README.md
index e706a71a..26562e2d 100644
--- a/sig-vmware/README.md
+++ b/sig-vmware/README.md
@@ -14,7 +14,7 @@ Bring together members of the VMware and Kubernetes community to maintain, suppo
* Regular SIG Meeting: [Thursdays at 18:00 UTC](https://docs.google.com/document/d/1FQx0BPlkkl1Bn0c9ocVBxYIKojpmrS1CFP5h0DI68AE/edit) (bi-weekly). [Convert to your timezone](http://www.thetimezoneconverter.com/?t=18:00&tz=UTC).
* [Meeting notes and Agenda](https://docs.google.com/document/d/1RV0nVtlPoAtM0DQwNYxYCC9lHfiHpTNatyv4bek6XtA/edit?usp=sharing).
* [Meeting recordings](https://www.youtube.com/playlist?list=PLutJyDdkKQIqKv-Zq8WbyibQtemChor9y).
-* Cloud Provider vSphere weekly syncup: [Wednesdays at 21:00 UTC](https://docs.google.com/document/d/1FQx0BPlkkl1Bn0c9ocVBxYIKojpmrS1CFP5h0DI68AE/edit) (bi-weekly). [Convert to your timezone](http://www.thetimezoneconverter.com/?t=21:00&tz=UTC).
+* Cloud Provider vSphere weekly syncup: [Wednesdays at 16:00 UTC](https://docs.google.com/document/d/1FQx0BPlkkl1Bn0c9ocVBxYIKojpmrS1CFP5h0DI68AE/edit) (bi-weekly). [Convert to your timezone](http://www.thetimezoneconverter.com/?t=16:00&tz=UTC).
* [Meeting notes and Agenda](https://docs.google.com/document/d/1B0NmmKVh8Ea5hnNsbUsJC7ZyNCsq_6NXl5hRdcHlJgY/edit?usp=sharing).
* [Meeting recordings](https://www.youtube.com/playlist?list=PLutJyDdkKQIpOT4bOfuO3MEMHvU1tRqyR).
@@ -34,9 +34,9 @@ The Chairs of the SIG run operations and processes governing the SIG.
## Subprojects
The following subprojects are owned by sig-vmware:
-- **cloud-provider-vsphere**
+- **cluster-api-provider-vsphere**
- Owners:
- - https://raw.githubusercontent.com/kubernetes/cloud-provider-vsphere/master/OWNERS
+ - https://raw.githubusercontent.com/kubernetes-sigs/cluster-api-provider-vsphere/master/OWNERS
## GitHub Teams
@@ -47,17 +47,20 @@ The google groups contain the archive of Github team notifications.
Mentioning a team on Github will CC its group.
Monitor these for Github activity if you are not a member of the team.
-| Team Name | Details | Google Groups | Description |
-| --------- |:-------:|:-------------:| ----------- |
-| @kubernetes/sig-vmware-api-reviews | [link](https://github.com/orgs/kubernetes/teams/sig-vmware-api-reviews) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-vmware-api-reviews) | API Changes and Reviews |
-| @kubernetes/sig-vmware-bugs | [link](https://github.com/orgs/kubernetes/teams/sig-vmware-bugs) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-vmware-bugs) | Bug Triage and Troubleshooting |
-| @kubernetes/sig-vmware-feature-requests | [link](https://github.com/orgs/kubernetes/teams/sig-vmware-feature-requests) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-vmware-feature-requests) | Feature Requests |
-| @kubernetes/sig-vmware-members | [link](https://github.com/orgs/kubernetes/teams/sig-vmware-members) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-vmware-members) | Release Team Members |
-| @kubernetes/sig-vmware-misc | [link](https://github.com/orgs/kubernetes/teams/sig-vmware-misc) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-vmware-misc) | General Discussion |
-| @kubernetes/sig-vmware-pr-reviews | [link](https://github.com/orgs/kubernetes/teams/sig-vmware-pr-reviews) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-vmware-pr-reviews) | PR Reviews |
-| @kubernetes/sig-vmware-proposals | [link](https://github.com/orgs/kubernetes/teams/sig-vmware-proposals) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-vmware-proposals) | Design Proposals |
-| @kubernetes/sig-vmware-test-failures | [link](https://github.com/orgs/kubernetes/teams/sig-vmware-test-failures) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-vmware-test-failures) | Test Failures and Triage |
+| Team Name | Details | Description |
+| --------- |:-------:| ----------- |
+| @kubernetes/sig-vmware-api-reviews | [link](https://github.com/orgs/kubernetes/teams/sig-vmware-api-reviews) | API Changes and Reviews |
+| @kubernetes/sig-vmware-bugs | [link](https://github.com/orgs/kubernetes/teams/sig-vmware-bugs) | Bug Triage and Troubleshooting |
+| @kubernetes/sig-vmware-feature-requests | [link](https://github.com/orgs/kubernetes/teams/sig-vmware-feature-requests) | Feature Requests |
+| @kubernetes/sig-vmware-members | [link](https://github.com/orgs/kubernetes/teams/sig-vmware-members) | Release Team Members |
+| @kubernetes/sig-vmware-misc | [link](https://github.com/orgs/kubernetes/teams/sig-vmware-misc) | General Discussion |
+| @kubernetes/sig-vmware-pr-reviews | [link](https://github.com/orgs/kubernetes/teams/sig-vmware-pr-reviews) | PR Reviews |
+| @kubernetes/sig-vmware-proposals | [link](https://github.com/orgs/kubernetes/teams/sig-vmware-proposals) | Design Proposals |
+| @kubernetes/sig-vmware-test-failures | [link](https://github.com/orgs/kubernetes/teams/sig-vmware-test-failures) | Test Failures and Triage |
<!-- BEGIN CUSTOM CONTENT -->
+## About the cloud-provider-vsphere subproject
+The `cloud-provider-vsphere` subproject is now hosted under the new [SIG Cloud Provider](https://github.com/kubernetes/community/blob/master/sig-cloud-provider/README.md).
+
<!-- END CUSTOM CONTENT -->
diff --git a/sig-windows/README.md b/sig-windows/README.md
index 58219c9b..d4092c55 100644
--- a/sig-windows/README.md
+++ b/sig-windows/README.md
@@ -37,11 +37,11 @@ The google groups contain the archive of Github team notifications.
Mentioning a team on Github will CC its group.
Monitor these for Github activity if you are not a member of the team.
-| Team Name | Details | Google Groups | Description |
-| --------- |:-------:|:-------------:| ----------- |
-| @kubernetes/sig-windows-bugs | [link](https://github.com/orgs/kubernetes/teams/sig-windows-bugs) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-windows-bugs) | Bug Triage and Troubleshooting |
-| @kubernetes/sig-windows-feature-requests | [link](https://github.com/orgs/kubernetes/teams/sig-windows-feature-requests) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-windows-feature-requests) | Feature Requests |
-| @kubernetes/sig-windows-misc | [link](https://github.com/orgs/kubernetes/teams/sig-windows-misc) | [link](https://groups.google.com/forum/#!forum/kubernetes-sig-windows-misc) | General Discussion |
+| Team Name | Details | Description |
+| --------- |:-------:| ----------- |
+| @kubernetes/sig-windows-bugs | [link](https://github.com/orgs/kubernetes/teams/sig-windows-bugs) | Bug Triage and Troubleshooting |
+| @kubernetes/sig-windows-feature-requests | [link](https://github.com/orgs/kubernetes/teams/sig-windows-feature-requests) | Feature Requests |
+| @kubernetes/sig-windows-misc | [link](https://github.com/orgs/kubernetes/teams/sig-windows-misc) | General Discussion |
<!-- BEGIN CUSTOM CONTENT -->
## Getting Started
diff --git a/sigs.yaml b/sigs.yaml
index 1145bedf..b10a85ee 100644
--- a/sigs.yaml
+++ b/sigs.yaml
@@ -247,12 +247,15 @@ sigs:
- name: steering
owners:
- https://raw.githubusercontent.com/kubernetes/steering/master/OWNERS
+ - name: architecture-tracking
+ owners:
+ - https://raw.githubusercontent.com/kubernetes-sigs/architecture-tracking/master/OWNERS
- name: Auth
dir: sig-auth
mission_statement: >
Covers improvements to Kubernetes authorization, authentication, and
cluster security policy.
- charter_link:
+ charter_link: charter.md
label: auth
leadership:
chairs:
@@ -407,9 +410,6 @@ sigs:
- name: Kal Khenidak
github: khenidak
company: Microsoft
- - name: Cole Mickens
- github: colemickens
- company: Red Hat
meetings:
- description: Regular SIG Meeting
day: Wednesday
@@ -423,20 +423,8 @@ sigs:
slack: sig-azure
mailing_list: https://groups.google.com/forum/#!forum/kubernetes-sig-azure
teams:
- - name: sig-azure-api-reviews
- description: API Changes and Reviews
- - name: sig-azure-bugs
- description: Bug Triage and Troubleshooting
- - name: sig-azure-feature-requests
- description: Feature Requests
- - name: sig-azure-misc
+ - name: sig-azure
description: General Discussion
- - name: sig-azure-pr-reviews
- description: PR Reviews
- - name: sig-azure-proposals
- description: Design Proposals
- - name: sig-azure-test-failures
- description: Test Failures and Triage
subprojects:
- name: cloud-provider-azure
owners:
@@ -626,9 +614,6 @@ sigs:
label: cluster-lifecycle
leadership:
chairs:
- - name: Luke Marsden
- github: lukemarsden
- company: Weave
- name: Robert Bailey
github: roberthbailey
company: Google
@@ -695,12 +680,12 @@ sigs:
- name: cluster-api
owners:
- https://raw.githubusercontent.com/kubernetes-sigs/cluster-api/master/OWNERS
- - name: cluster-api-provider-gcp
- owners:
- - https://raw.githubusercontent.com/kubernetes-sigs/cluster-api-provider-gcp/master/OWNERS
- name: cluster-api-provider-aws
owners:
- https://raw.githubusercontent.com/kubernetes-sigs/cluster-api-provider-aws/master/OWNERS
+ - name: cluster-api-provider-gcp
+ owners:
+ - https://raw.githubusercontent.com/kubernetes-sigs/cluster-api-provider-gcp/master/OWNERS
- name: cluster-api-provider-openstack
owners:
- https://raw.githubusercontent.com/kubernetes-sigs/cluster-api-provider-openstack/master/OWNERS
@@ -720,6 +705,9 @@ sigs:
owners:
- https://raw.githubusercontent.com/kubernetes/kubeadm/master/OWNERS
- https://raw.githubusercontent.com/kubernetes/kubernetes/master/cmd/kubeadm/OWNERS
+ - name: kubeadm-dind-cluster
+ owners:
+ - https://raw.githubusercontent.com/kubernetes-sigs/kubeadm-dind-cluster/master/OWNERS
- name: kubernetes-anywhere
owners:
- https://raw.githubusercontent.com/kubernetes/kubernetes-anywhere/master/OWNERS
@@ -820,9 +808,11 @@ sigs:
- name: github-management
owners:
- https://raw.githubusercontent.com/kubernetes/community/master/github-management/OWNERS
- - name: contributors-guide
+ - https://raw.githubusercontent.com/kubernetes/org/master/OWNERS
+ - name: contributors-documentation
owners:
- https://raw.githubusercontent.com/kubernetes/community/master/contributors/guide/OWNERS
+ - https://raw.githubusercontent.com/kubernetes-sigs/contributor-site/master/OWNERS
- name: devstats
owners:
- Phillels
@@ -878,12 +868,15 @@ sigs:
- name: sig-docs-pr-reviews
description: Documentation PR Reviewers
subprojects:
- - name: kubernetes-bootcamp
+ - name: kubernetes-docs-ja
owners:
- - https://raw.githubusercontent.com/kubernetes/kubernetes-bootcamp/master/OWNERS
- - name: kubernetes-docs-cn
+ - https://raw.githubusercontent.com/kubernetes/kubernetes-docs-ja/master/OWNERS
+ - name: kubernetes-docs-ko
owners:
- - https://raw.githubusercontent.com/kubernetes/kubernetes-docs-cn/master/OWNERS
+ - https://raw.githubusercontent.com/kubernetes/kubernetes-docs-ko/master/OWNERS
+ - name: kubernetes-docs-zh
+ owners:
+ - https://raw.githubusercontent.com/kubernetes/kubernetes-docs-zh/master/OWNERS
- name: reference-docs
owners:
- https://raw.githubusercontent.com/kubernetes-incubator/reference-docs/master/OWNERS
@@ -932,6 +925,12 @@ sigs:
- name: cloud-provider-gcp
owners:
- https://raw.githubusercontent.com/kubernetes/cloud-provider-gcp/master/OWNERS
+ - name: gcp-compute-persistent-disk-csi-driver
+ owners:
+ - https://raw.githubusercontent.com/kubernetes-sigs/gcp-compute-persistent-disk-csi-driver/master/OWNERS
+ - name: gcp-filestore-csi-driver
+ owners:
+ - https://raw.githubusercontent.com/kubernetes-sigs/gcp-filestore-csi-driver/master/OWNERS
- name: IBMCloud
dir: sig-ibmcloud
mission_statement: >
@@ -1052,7 +1051,7 @@ sigs:
recordings_url: https://www.youtube.com/watch?v=iWKC3FsNHWg&list=PL69nYSiGNLP0HqgyqTby6HlDEz7i1mb0-
- description: Federation v2 Working Group
day: Wednesday
- time: "9:30"
+ time: "7:30"
tz: "PT (Pacific Time)"
frequency: weekly
url: https://docs.google.com/document/d/1FQx0BPlkkl1Bn0c9ocVBxYIKojpmrS1CFP5h0DI68AE/edit
@@ -1500,9 +1499,10 @@ sigs:
- name: Service Catalog
dir: sig-service-catalog
mission_statement: >
- To develop a Kubernetes API for the CNCF service broker and Kubernetes
- broker implementation.
- charter_link:
+ Service Catalog is a Kubernetes extension project that
+ implements the [Open Service Broker API](https://www.openservicebrokerapi.org/) (OSBAPI).
+ It allows application developers the ability to provision and consume cloud services natively from within Kubernetes.
+ charter_link: https://github.com/kubernetes/community/blob/master/sig-service-catalog/charter.md
label: service-catalog
leadership:
chairs:
@@ -1663,7 +1663,7 @@ sigs:
- name: testing-commons
description: "The Testing Commons is a subproject within the Kubernetes sig-testing community interested code structure, layout, and execution of common test code used throughout the kubernetes project."
owners:
- - https://raw.githubusercontent.com/kubernetes-sig-testing/frameworks/master/OWNERS
+ - https://raw.githubusercontent.com/kubernetes-sigs/testing_frameworks/master/OWNERS
- https://raw.githubusercontent.com/kubernetes/kubernetes/master/test/OWNERS
meetings:
- description: Testing Commons
@@ -1732,7 +1732,7 @@ sigs:
recordings_url: https://www.youtube.com/playlist?list=PLutJyDdkKQIqKv-Zq8WbyibQtemChor9y
- description: Cloud Provider vSphere weekly syncup
day: Wednesday
- time: "21:00"
+ time: "16:00"
tz: "UTC"
frequency: bi-weekly
url: https://docs.google.com/document/d/1FQx0BPlkkl1Bn0c9ocVBxYIKojpmrS1CFP5h0DI68AE/edit
@@ -1759,9 +1759,9 @@ sigs:
- name: sig-vmware-test-failures
description: Test Failures and Triage
subprojects:
- - name: cloud-provider-vsphere
+ - name: cluster-api-provider-vsphere
owners:
- - https://raw.githubusercontent.com/kubernetes/cloud-provider-vsphere/master/OWNERS
+ - https://raw.githubusercontent.com/kubernetes-sigs/cluster-api-provider-vsphere/master/OWNERS
- name: Windows
dir: sig-windows
mission_statement: >