From 4fe2e82d8850db42a4e398a66bb1dc511795710a Mon Sep 17 00:00:00 2001 From: Bowei Du Date: Fri, 17 Feb 2017 14:44:03 -0800 Subject: [PATCH] Configure DNS based on the kube-system:kube-dns ConfigMap Updates the dnsmasq cache/mux layer to be managed by dnsmasq-nanny. dnsmasq-nanny manages dnsmasq based on values from the kube-system:kube-dns configmap: "stubDomains": { "acme.local": ["1.2.3.4"] }, is a map of domain to list of nameservers for the domain. This is used to inject private DNS domains into the kube-dns namespace. In the above example, any DNS requests for *.acme.local will be served by the nameserver 1.2.3.4. "upstreamNameservers": ["8.8.8.8", "8.8.4.4"] is a list of upstreamNameservers to use, overriding the configuration specified in /etc/resolv.conf. --- .../addons/dns/kubedns-controller.yaml.base | 19 +- cluster/addons/dns/kubedns-controller.yaml.in | 19 +- .../addons/dns/kubedns-controller.yaml.sed | 19 +- cmd/kubeadm/app/phases/addons/manifests.go | 17 +- test/e2e/BUILD | 1 + test/e2e/dns_common.go | 281 ++++++++++++++++++ test/e2e/dns_configmap.go | 259 +++++----------- 7 files changed, 407 insertions(+), 208 deletions(-) create mode 100644 test/e2e/dns_common.go diff --git a/cluster/addons/dns/kubedns-controller.yaml.base b/cluster/addons/dns/kubedns-controller.yaml.base index 23e5b584e63f..5ea557335f0e 100644 --- a/cluster/addons/dns/kubedns-controller.yaml.base +++ b/cluster/addons/dns/kubedns-controller.yaml.base @@ -53,7 +53,7 @@ spec: optional: true containers: - name: kubedns - image: gcr.io/google_containers/k8s-dns-kube-dns-amd64:1.13.0 + image: gcr.io/google_containers/k8s-dns-kube-dns-amd64:1.14.1 resources: # TODO: Set memory limits when we've profiled the container for large # clusters, then set request = limit to keep this container in @@ -105,7 +105,7 @@ spec: - name: kube-dns-config mountPath: /kube-dns-config - name: dnsmasq - image: gcr.io/google_containers/k8s-dns-dnsmasq-amd64:1.13.0 + image: gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64:1.14.1 livenessProbe: httpGet: path: /healthcheck/dnsmasq @@ -116,11 +116,17 @@ spec: successThreshold: 1 failureThreshold: 5 args: + - -v=2 + - -logtostderr + - -configDir=/etc/k8s/dns/dnsmasq-nanny + - -restartDnsmasq=true + - -- + - -k - --cache-size=1000 + - --log-facility=- - --server=/__PILLAR__DNS__DOMAIN__/127.0.0.1#10053 - --server=/in-addr.arpa/127.0.0.1#10053 - --server=/ip6.arpa/127.0.0.1#10053 - - --log-facility=- ports: - containerPort: 53 name: dns @@ -132,9 +138,12 @@ spec: resources: requests: cpu: 150m - memory: 10Mi + memory: 20Mi + volumeMounts: + - name: kube-dns-config + mountPath: /etc/k8s/dns/dnsmasq-nanny - name: sidecar - image: gcr.io/google_containers/k8s-dns-sidecar-amd64:1.13.0 + image: gcr.io/google_containers/k8s-dns-sidecar-amd64:1.14.1 livenessProbe: httpGet: path: /metrics diff --git a/cluster/addons/dns/kubedns-controller.yaml.in b/cluster/addons/dns/kubedns-controller.yaml.in index bcf1878bba61..956a0505286d 100644 --- a/cluster/addons/dns/kubedns-controller.yaml.in +++ b/cluster/addons/dns/kubedns-controller.yaml.in @@ -53,7 +53,7 @@ spec: optional: true containers: - name: kubedns - image: gcr.io/google_containers/k8s-dns-kube-dns-amd64:1.13.0 + image: gcr.io/google_containers/k8s-dns-kube-dns-amd64:1.14.1 resources: # TODO: Set memory limits when we've profiled the container for large # clusters, then set request = limit to keep this container in @@ -105,7 +105,7 @@ spec: - name: kube-dns-config mountPath: /kube-dns-config - name: dnsmasq - image: gcr.io/google_containers/k8s-dns-dnsmasq-amd64:1.13.0 + image: gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64:1.14.1 livenessProbe: httpGet: path: /healthcheck/dnsmasq @@ -116,11 +116,17 @@ spec: successThreshold: 1 failureThreshold: 5 args: + - -v=2 + - -logtostderr + - -configDir=/etc/k8s/dns/dnsmasq-nanny + - -restartDnsmasq=true + - -- + - -k - --cache-size=1000 + - --log-facility=- - --server=/{{ pillar['dns_domain'] }}/127.0.0.1#10053 - --server=/in-addr.arpa/127.0.0.1#10053 - --server=/ip6.arpa/127.0.0.1#10053 - - --log-facility=- ports: - containerPort: 53 name: dns @@ -132,9 +138,12 @@ spec: resources: requests: cpu: 150m - memory: 10Mi + memory: 20Mi + volumeMounts: + - name: kube-dns-config + mountPath: /etc/k8s/dns/dnsmasq-nanny - name: sidecar - image: gcr.io/google_containers/k8s-dns-sidecar-amd64:1.13.0 + image: gcr.io/google_containers/k8s-dns-sidecar-amd64:1.14.1 livenessProbe: httpGet: path: /metrics diff --git a/cluster/addons/dns/kubedns-controller.yaml.sed b/cluster/addons/dns/kubedns-controller.yaml.sed index 2a5928a45d9f..a6d81fd33f1f 100644 --- a/cluster/addons/dns/kubedns-controller.yaml.sed +++ b/cluster/addons/dns/kubedns-controller.yaml.sed @@ -53,7 +53,7 @@ spec: optional: true containers: - name: kubedns - image: gcr.io/google_containers/k8s-dns-kube-dns-amd64:1.13.0 + image: gcr.io/google_containers/k8s-dns-kube-dns-amd64:1.14.1 resources: # TODO: Set memory limits when we've profiled the container for large # clusters, then set request = limit to keep this container in @@ -104,7 +104,7 @@ spec: - name: kube-dns-config mountPath: /kube-dns-config - name: dnsmasq - image: gcr.io/google_containers/k8s-dns-dnsmasq-amd64:1.13.0 + image: gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64:1.14.1 livenessProbe: httpGet: path: /healthcheck/dnsmasq @@ -115,11 +115,17 @@ spec: successThreshold: 1 failureThreshold: 5 args: + - -v=2 + - -logtostderr + - -configDir=/etc/k8s/dns/dnsmasq-nanny + - -restartDnsmasq=true + - -- + - -k - --cache-size=1000 + - --log-facility=- - --server=/$DNS_DOMAIN/127.0.0.1#10053 - --server=/in-addr.arpa/127.0.0.1#10053 - --server=/ip6.arpa/127.0.0.1#10053 - - --log-facility=- ports: - containerPort: 53 name: dns @@ -131,9 +137,12 @@ spec: resources: requests: cpu: 150m - memory: 10Mi + memory: 20Mi + volumeMounts: + - name: kube-dns-config + mountPath: /etc/k8s/dns/dnsmasq-nanny - name: sidecar - image: gcr.io/google_containers/k8s-dns-sidecar-amd64:1.13.0 + image: gcr.io/google_containers/k8s-dns-sidecar-amd64:1.14.1 livenessProbe: httpGet: path: /metrics diff --git a/cmd/kubeadm/app/phases/addons/manifests.go b/cmd/kubeadm/app/phases/addons/manifests.go index 2cee80fb807d..a2f5d01a985b 100644 --- a/cmd/kubeadm/app/phases/addons/manifests.go +++ b/cmd/kubeadm/app/phases/addons/manifests.go @@ -89,7 +89,7 @@ spec: name: kube-proxy ` - KubeDNSVersion = "1.13.0" + KubeDNSVersion = "1.14.1" KubeDNSDeployment = ` @@ -179,7 +179,7 @@ spec: - name: kube-dns-config mountPath: /kube-dns-config - name: dnsmasq - image: {{ .ImageRepository }}/k8s-dns-dnsmasq-{{ .Arch }}:{{ .Version }} + image: {{ .ImageRepository }}/k8s-dns-dnsmasq-nanny-{{ .Arch }}:{{ .Version }} imagePullPolicy: IfNotPresent livenessProbe: httpGet: @@ -191,11 +191,17 @@ spec: successThreshold: 1 failureThreshold: 5 args: + - -v=2 + - -logtostderr + - -configDir=/etc/k8s/dns/dnsmasq-nanny + - -restartDnsmasq=true + - -- + - -k - --cache-size=1000 + - --log-facility=- - --server=/{{ .DNSDomain }}/127.0.0.1#10053 - --server=/in-addr.arpa/127.0.0.1#10053 - --server=/ip6.arpa/127.0.0.1#10053 - - --log-facility=- ports: - containerPort: 53 name: dns @@ -207,7 +213,10 @@ spec: resources: requests: cpu: 150m - memory: 10Mi + memory: 20Mi + volumeMounts: + - name: kube-dns-config + mountPath: /etc/k8s/dns/dnsmasq-nanny - name: sidecar image: {{ .ImageRepository }}/k8s-dns-sidecar-{{ .Arch }}:{{ .Version }} imagePullPolicy: IfNotPresent diff --git a/test/e2e/BUILD b/test/e2e/BUILD index e619ccbc8167..c49b849d336e 100644 --- a/test/e2e/BUILD +++ b/test/e2e/BUILD @@ -31,6 +31,7 @@ go_library( "disruption.go", "dns.go", "dns_autoscaling.go", + "dns_common.go", "dns_configmap.go", "e2e.go", "empty.go", diff --git a/test/e2e/dns_common.go b/test/e2e/dns_common.go new file mode 100644 index 000000000000..fdf95821ff28 --- /dev/null +++ b/test/e2e/dns_common.go @@ -0,0 +1,281 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "fmt" + "strings" + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/kubernetes/pkg/api/v1" + "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" + "k8s.io/kubernetes/test/e2e/framework" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +type dnsTestCommon struct { + f *framework.Framework + c clientset.Interface + ns string + name string + labels []string + + dnsPod *v1.Pod + utilPod *v1.Pod + utilService *v1.Service + dnsServerPod *v1.Pod + + cm *v1.ConfigMap +} + +func newDnsTestCommon() dnsTestCommon { + return dnsTestCommon{ + f: framework.NewDefaultFramework("dns-config-map"), + ns: "kube-system", + name: "kube-dns", + } +} + +func (t *dnsTestCommon) init() { + By("Finding a DNS pod") + label := labels.SelectorFromSet(labels.Set(map[string]string{"k8s-app": "kube-dns"})) + options := metav1.ListOptions{LabelSelector: label.String()} + + pods, err := t.f.ClientSet.Core().Pods("kube-system").List(options) + Expect(err).NotTo(HaveOccurred()) + Expect(len(pods.Items)).Should(BeNumerically(">=", 1)) + + t.dnsPod = &pods.Items[0] + framework.Logf("Using DNS pod: %v", t.dnsPod.Name) +} + +func (t *dnsTestCommon) checkDNSRecord(name string, predicate func([]string) bool, timeout time.Duration) { + t.checkDNSRecordFrom(name, predicate, "kube-dns", timeout) +} + +func (t *dnsTestCommon) checkDNSRecordFrom(name string, predicate func([]string) bool, target string, timeout time.Duration) { + var actual []string + + err := wait.PollImmediate( + time.Duration(1)*time.Second, + timeout, + func() (bool, error) { + actual = t.runDig(name, target) + if predicate(actual) { + return true, nil + } + return false, nil + }) + + if err != nil { + framework.Failf("dig result did not match: %#v after %v", + actual, timeout) + } +} + +// runDig queries for `dnsName`. Returns a list of responses. +func (t *dnsTestCommon) runDig(dnsName, target string) []string { + cmd := []string{"/usr/bin/dig", "+short"} + switch target { + case "kube-dns": + cmd = append(cmd, "@"+t.dnsPod.Status.PodIP, "-p", "10053") + case "dnsmasq": + break + default: + panic(fmt.Errorf("invalid target: " + target)) + } + cmd = append(cmd, dnsName) + + stdout, stderr, err := t.f.ExecWithOptions(framework.ExecOptions{ + Command: cmd, + Namespace: t.f.Namespace.Name, + PodName: t.utilPod.Name, + ContainerName: "util", + CaptureStdout: true, + CaptureStderr: true, + }) + + framework.Logf("Running dig: %v, stdout: %q, stderr: %q, err: %v", + cmd, stdout, stderr, err) + + if stdout == "" { + return []string{} + } else { + return strings.Split(stdout, "\n") + } +} + +func (t *dnsTestCommon) setConfigMap(cm *v1.ConfigMap) { + if t.cm != nil { + t.cm = cm + } + + cm.ObjectMeta.Namespace = t.ns + cm.ObjectMeta.Name = t.name + + options := metav1.ListOptions{ + FieldSelector: fields.Set{ + "metadata.namespace": t.ns, + "metadata.name": t.name, + }.AsSelector().String(), + } + cmList, err := t.c.Core().ConfigMaps(t.ns).List(options) + Expect(err).NotTo(HaveOccurred()) + + if len(cmList.Items) == 0 { + By(fmt.Sprintf("Creating the ConfigMap (%s:%s) %+v", t.ns, t.name, *cm)) + _, err := t.c.Core().ConfigMaps(t.ns).Create(cm) + Expect(err).NotTo(HaveOccurred()) + } else { + By(fmt.Sprintf("Updating the ConfigMap (%s:%s) to %+v", t.ns, t.name, *cm)) + _, err := t.c.Core().ConfigMaps(t.ns).Update(cm) + Expect(err).NotTo(HaveOccurred()) + } +} + +func (t *dnsTestCommon) deleteConfigMap() { + By(fmt.Sprintf("Deleting the ConfigMap (%s:%s)", t.ns, t.name)) + t.cm = nil + err := t.c.Core().ConfigMaps(t.ns).Delete(t.name, nil) + Expect(err).NotTo(HaveOccurred()) +} + +func (t *dnsTestCommon) createUtilPod() { + // Actual port # doesn't matter, just needs to exist. + const servicePort = 10101 + + t.utilPod = &v1.Pod{ + TypeMeta: metav1.TypeMeta{ + Kind: "Pod", + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: t.f.Namespace.Name, + Labels: map[string]string{"app": "e2e-dns-configmap"}, + GenerateName: "e2e-dns-configmap-", + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "util", + Image: "gcr.io/google_containers/dnsutils:e2e", + Command: []string{"sleep", "10000"}, + Ports: []v1.ContainerPort{ + {ContainerPort: servicePort, Protocol: "TCP"}, + }, + }, + }, + }, + } + + var err error + t.utilPod, err = t.c.Core().Pods(t.f.Namespace.Name).Create(t.utilPod) + Expect(err).NotTo(HaveOccurred()) + framework.Logf("Created pod %v", t.utilPod) + Expect(t.f.WaitForPodRunning(t.utilPod.Name)).NotTo(HaveOccurred()) + + t.utilService = &v1.Service{ + TypeMeta: metav1.TypeMeta{ + Kind: "Service", + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: t.f.Namespace.Name, + Name: "e2e-dns-configmap", + }, + Spec: v1.ServiceSpec{ + Selector: map[string]string{"app": "e2e-dns-configmap"}, + Ports: []v1.ServicePort{ + { + Protocol: "TCP", + Port: servicePort, + TargetPort: intstr.FromInt(servicePort), + }, + }, + }, + } + + t.utilService, err = t.c.Core().Services(t.f.Namespace.Name).Create(t.utilService) + Expect(err).NotTo(HaveOccurred()) + framework.Logf("Created service %v", t.utilService) +} + +func (t *dnsTestCommon) deleteUtilPod() { + podClient := t.c.Core().Pods(t.f.Namespace.Name) + if err := podClient.Delete(t.utilPod.Name, metav1.NewDeleteOptions(0)); err != nil { + framework.Logf("Delete of pod %v:%v failed: %v", + t.utilPod.Namespace, t.utilPod.Name, err) + } +} + +func (t *dnsTestCommon) createDNSServer(aRecords map[string]string) { + t.dnsServerPod = &v1.Pod{ + TypeMeta: metav1.TypeMeta{ + Kind: "Pod", + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: t.f.Namespace.Name, + GenerateName: "e2e-dns-configmap-dns-server-", + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "dns", + Image: "gcr.io/google_containers/k8s-dns-dnsmasq-amd64:1.13.0", + Command: []string{ + "/usr/sbin/dnsmasq", + "-u", "root", + "-k", + "--log-facility", "-", + "-q", + }, + }, + }, + DNSPolicy: "Default", + }, + } + + for name, ip := range aRecords { + t.dnsServerPod.Spec.Containers[0].Command = append( + t.dnsServerPod.Spec.Containers[0].Command, + fmt.Sprintf("-A/%v/%v", name, ip)) + } + + var err error + t.dnsServerPod, err = t.c.Core().Pods(t.f.Namespace.Name).Create(t.dnsServerPod) + Expect(err).NotTo(HaveOccurred()) + framework.Logf("Created pod %v", t.dnsServerPod) + Expect(t.f.WaitForPodRunning(t.dnsServerPod.Name)).NotTo(HaveOccurred()) + + t.dnsServerPod, err = t.c.Core().Pods(t.f.Namespace.Name).Get( + t.dnsServerPod.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) +} + +func (t *dnsTestCommon) deleteDNSServerPod() { + podClient := t.c.Core().Pods(t.f.Namespace.Name) + if err := podClient.Delete(t.dnsServerPod.Name, metav1.NewDeleteOptions(0)); err != nil { + framework.Logf("Delete of pod %v:%v failed: %v", + t.utilPod.Namespace, t.dnsServerPod.Name, err) + } +} diff --git a/test/e2e/dns_configmap.go b/test/e2e/dns_configmap.go index 6e31c0790458..f5cb52550e24 100644 --- a/test/e2e/dns_configmap.go +++ b/test/e2e/dns_configmap.go @@ -18,68 +18,32 @@ package e2e import ( "fmt" - "strings" "time" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/fields" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/kubernetes/pkg/api/v1" - "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" "k8s.io/kubernetes/test/e2e/framework" . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" ) -type dnsConfigMapTest struct { - f *framework.Framework - c clientset.Interface - ns string - name string - labels []string +type dnsFederationsConfigMapTest struct { + dnsTestCommon - cm *v1.ConfigMap fedMap map[string]string isValid bool - - dnsPod *v1.Pod - utilPod *v1.Pod - utilService *v1.Service } -var _ = framework.KubeDescribe("DNS config map", func() { - test := &dnsConfigMapTest{ - f: framework.NewDefaultFramework("dns-config-map"), - ns: "kube-system", - name: "kube-dns", - } +var _ = framework.KubeDescribe("DNS configMap federations", func() { + t := &dnsNameserverTest{dnsTestCommon: newDnsTestCommon()} + BeforeEach(func() { t.c = t.f.ClientSet }) - BeforeEach(func() { - test.c = test.f.ClientSet - }) - - It("should be able to change configuration", func() { - test.run() + It("should be able to change federation configuration", func() { + t.run() }) }) -func (t *dnsConfigMapTest) init() { - By("Finding a DNS pod") - label := labels.SelectorFromSet(labels.Set(map[string]string{"k8s-app": "kube-dns"})) - options := metav1.ListOptions{LabelSelector: label.String()} - - pods, err := t.f.ClientSet.Core().Pods("kube-system").List(options) - Expect(err).NotTo(HaveOccurred()) - Expect(len(pods.Items)).Should(BeNumerically(">=", 1)) - - t.dnsPod = &pods.Items[0] - framework.Logf("Using DNS pod: %v", t.dnsPod.Name) -} - -func (t *dnsConfigMapTest) run() { +func (t *dnsFederationsConfigMapTest) run() { t.init() defer t.c.Core().ConfigMaps(t.ns).Delete(t.name, nil) @@ -120,11 +84,7 @@ func (t *dnsConfigMapTest) run() { t.validate() } -func (t *dnsConfigMapTest) validate() { - t.validateFederation() -} - -func (t *dnsConfigMapTest) validateFederation() { +func (t *dnsFederationsConfigMapTest) validate() { federations := t.fedMap if len(federations) == 0 { @@ -161,155 +121,76 @@ func (t *dnsConfigMapTest) validateFederation() { } } -func (t *dnsConfigMapTest) checkDNSRecord(name string, predicate func([]string) bool, timeout time.Duration) { - var actual []string - - err := wait.PollImmediate( - time.Duration(1)*time.Second, - timeout, - func() (bool, error) { - actual = t.runDig(name) - if predicate(actual) { - return true, nil - } - return false, nil - }) - - if err != nil { - framework.Logf("dig result did not match: %#v after %v", - actual, timeout) - } -} - -// runDig querying for `dnsName`. Returns a list of responses. -func (t *dnsConfigMapTest) runDig(dnsName string) []string { - cmd := []string{ - "/usr/bin/dig", - "+short", - "@" + t.dnsPod.Status.PodIP, - "-p", "10053", dnsName, - } - stdout, stderr, err := t.f.ExecWithOptions(framework.ExecOptions{ - Command: cmd, - Namespace: t.f.Namespace.Name, - PodName: t.utilPod.Name, - ContainerName: "util", - CaptureStdout: true, - CaptureStderr: true, - }) - - framework.Logf("Running dig: %v, stdout: %q, stderr: %q, err: %v", - cmd, stdout, stderr, err) - - if stdout == "" { - return []string{} - } else { - return strings.Split(stdout, "\n") - } -} - -func (t *dnsConfigMapTest) setConfigMap(cm *v1.ConfigMap, fedMap map[string]string, isValid bool) { +func (t *dnsFederationsConfigMapTest) setConfigMap(cm *v1.ConfigMap, fedMap map[string]string, isValid bool) { if isValid { - t.cm = cm t.fedMap = fedMap } t.isValid = isValid - - cm.ObjectMeta.Namespace = t.ns - cm.ObjectMeta.Name = t.name - - options := metav1.ListOptions{ - FieldSelector: fields.Set{ - "metadata.namespace": t.ns, - "metadata.name": t.name, - }.AsSelector().String(), - } - cmList, err := t.c.Core().ConfigMaps(t.ns).List(options) - Expect(err).NotTo(HaveOccurred()) - - if len(cmList.Items) == 0 { - By(fmt.Sprintf("Creating the ConfigMap (%s:%s) %+v", t.ns, t.name, *cm)) - _, err := t.c.Core().ConfigMaps(t.ns).Create(cm) - Expect(err).NotTo(HaveOccurred()) - } else { - By(fmt.Sprintf("Updating the ConfigMap (%s:%s) to %+v", t.ns, t.name, *cm)) - _, err := t.c.Core().ConfigMaps(t.ns).Update(cm) - Expect(err).NotTo(HaveOccurred()) - } + t.dnsTestCommon.setConfigMap(cm) } -func (t *dnsConfigMapTest) deleteConfigMap() { - By(fmt.Sprintf("Deleting the ConfigMap (%s:%s)", t.ns, t.name)) - - t.cm = nil +func (t *dnsFederationsConfigMapTest) deleteConfigMap() { t.isValid = false - - err := t.c.Core().ConfigMaps(t.ns).Delete(t.name, nil) - Expect(err).NotTo(HaveOccurred()) + t.dnsTestCommon.deleteConfigMap() } -func (t *dnsConfigMapTest) createUtilPod() { - // Actual port # doesn't matter, just need to exist. - const servicePort = 10101 - - t.utilPod = &v1.Pod{ - TypeMeta: metav1.TypeMeta{ - Kind: "Pod", - }, - ObjectMeta: metav1.ObjectMeta{ - Namespace: t.f.Namespace.Name, - Labels: map[string]string{"app": "e2e-dns-configmap"}, - GenerateName: "e2e-dns-configmap-", - }, - Spec: v1.PodSpec{ - Containers: []v1.Container{ - { - Name: "util", - Image: "gcr.io/google_containers/dnsutils:e2e", - Command: []string{"sleep", "10000"}, - Ports: []v1.ContainerPort{ - {ContainerPort: servicePort, Protocol: "TCP"}, - }, - }, - }, - }, - } - - var err error - t.utilPod, err = t.c.Core().Pods(t.f.Namespace.Name).Create(t.utilPod) - Expect(err).NotTo(HaveOccurred()) - framework.Logf("Created pod %v", t.utilPod) - Expect(t.f.WaitForPodRunning(t.utilPod.Name)).NotTo(HaveOccurred()) - - t.utilService = &v1.Service{ - TypeMeta: metav1.TypeMeta{ - Kind: "Service", - }, - ObjectMeta: metav1.ObjectMeta{ - Namespace: t.f.Namespace.Name, - Name: "e2e-dns-configmap", - }, - Spec: v1.ServiceSpec{ - Selector: map[string]string{"app": "e2e-dns-configmap"}, - Ports: []v1.ServicePort{ - { - Protocol: "TCP", - Port: servicePort, - TargetPort: intstr.FromInt(servicePort), - }, - }, - }, - } - - t.utilService, err = t.c.Core().Services(t.f.Namespace.Name).Create(t.utilService) - Expect(err).NotTo(HaveOccurred()) - framework.Logf("Created service %v", t.utilService) +type dnsNameserverTest struct { + dnsTestCommon } -func (t *dnsConfigMapTest) deleteUtilPod() { - podClient := t.c.Core().Pods(t.f.Namespace.Name) - if err := podClient.Delete(t.utilPod.Name, metav1.NewDeleteOptions(0)); err != nil { - framework.Logf("Delete of pod %v:%v failed: %v", - t.utilPod.Namespace, t.utilPod.Name, err) - } +func (t *dnsNameserverTest) run() { + t.init() + + t.createUtilPod() + defer t.deleteUtilPod() + + t.createDNSServer(map[string]string{ + "abc.acme.local": "1.1.1.1", + "def.acme.local": "2.2.2.2", + "widget.local": "3.3.3.3", + }) + defer t.deleteDNSServerPod() + + t.setConfigMap(&v1.ConfigMap{Data: map[string]string{ + "stubDomains": fmt.Sprintf(`{"acme.local":["%v"]}`, t.dnsServerPod.Status.PodIP), + "upstreamNameservers": fmt.Sprintf(`["%v"]`, t.dnsServerPod.Status.PodIP), + }}) + + // The ConfigMap update mechanism takes longer than the standard + // wait.ForeverTestTimeout. + moreForeverTestTimeout := 2 * 60 * time.Second + + t.checkDNSRecordFrom( + "abc.acme.local", + func(actual []string) bool { return len(actual) == 1 && actual[0] == "1.1.1.1" }, + "dnsmasq", + moreForeverTestTimeout) + t.checkDNSRecordFrom( + "def.acme.local", + func(actual []string) bool { return len(actual) == 1 && actual[0] == "2.2.2.2" }, + "dnsmasq", + wait.ForeverTestTimeout) + t.checkDNSRecordFrom( + "widget.local", + func(actual []string) bool { return len(actual) == 1 && actual[0] == "3.3.3.3" }, + "dnsmasq", + wait.ForeverTestTimeout) + + t.c.Core().ConfigMaps(t.ns).Delete(t.name, nil) + // Wait for the deleted ConfigMap to take effect, otherwise the + // configuration can bleed into other tests. + t.checkDNSRecordFrom( + "abc.acme.local", + func(actual []string) bool { return len(actual) == 0 }, + "dnsmasq", + moreForeverTestTimeout) } + +var _ = framework.KubeDescribe("DNS configMap nameserver", func() { + t := &dnsNameserverTest{dnsTestCommon: newDnsTestCommon()} + BeforeEach(func() { t.c = t.f.ClientSet }) + + It("should be able to change stubDomain configuration", func() { + t.run() + }) +})