| 
									
										
										
										
											2016-07-21 06:08:47 +08:00
										 |  |  | /* | 
					
						
							|  |  |  | Copyright 2016 The Kubernetes Authors. | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | Licensed under the Apache License, Version 2.0 (the "License"); | 
					
						
							|  |  |  | you may not use this file except in compliance with the License. | 
					
						
							|  |  |  | You may obtain a copy of the License at | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     http://www.apache.org/licenses/LICENSE-2.0
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | Unless required by applicable law or agreed to in writing, software | 
					
						
							|  |  |  | distributed under the License is distributed on an "AS IS" BASIS, | 
					
						
							|  |  |  | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | 
					
						
							|  |  |  | See the License for the specific language governing permissions and | 
					
						
							|  |  |  | limitations under the License. | 
					
						
							|  |  |  | */ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | package kubelet | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | import ( | 
					
						
							|  |  |  | 	"fmt" | 
					
						
							|  |  |  | 	"math" | 
					
						
							|  |  |  | 	"net" | 
					
						
							| 
									
										
										
										
											2016-11-30 15:27:27 +08:00
										 |  |  | 	goruntime "runtime" | 
					
						
							| 
									
										
										
										
											2016-07-21 06:08:47 +08:00
										 |  |  | 	"sort" | 
					
						
							|  |  |  | 	"strings" | 
					
						
							|  |  |  | 	"time" | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	"github.com/golang/glog" | 
					
						
							| 
									
										
										
										
											2017-01-14 01:48:50 +08:00
										 |  |  | 	apierrors "k8s.io/apimachinery/pkg/api/errors" | 
					
						
							| 
									
										
										
										
											2017-01-25 21:13:07 +08:00
										 |  |  | 	"k8s.io/apimachinery/pkg/api/resource" | 
					
						
							| 
									
										
										
										
											2017-01-11 22:09:48 +08:00
										 |  |  | 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" | 
					
						
							|  |  |  | 	"k8s.io/apimachinery/pkg/conversion" | 
					
						
							|  |  |  | 	"k8s.io/apimachinery/pkg/types" | 
					
						
							|  |  |  | 	utilnet "k8s.io/apimachinery/pkg/util/net" | 
					
						
							| 
									
										
										
										
											2016-11-19 04:50:58 +08:00
										 |  |  | 	"k8s.io/kubernetes/pkg/api/v1" | 
					
						
							| 
									
										
										
										
											2017-04-14 04:19:08 +08:00
										 |  |  | 	v1helper "k8s.io/kubernetes/pkg/api/v1/helper" | 
					
						
							| 
									
										
										
										
											2016-07-21 06:08:47 +08:00
										 |  |  | 	"k8s.io/kubernetes/pkg/cloudprovider" | 
					
						
							|  |  |  | 	"k8s.io/kubernetes/pkg/kubelet/cadvisor" | 
					
						
							|  |  |  | 	"k8s.io/kubernetes/pkg/kubelet/events" | 
					
						
							| 
									
										
										
										
											2017-01-27 22:01:55 +08:00
										 |  |  | 	"k8s.io/kubernetes/pkg/kubelet/util" | 
					
						
							| 
									
										
										
										
											2016-08-23 04:38:36 +08:00
										 |  |  | 	"k8s.io/kubernetes/pkg/kubelet/util/sliceutils" | 
					
						
							| 
									
										
										
										
											2016-12-02 06:46:20 +08:00
										 |  |  | 	nodeutil "k8s.io/kubernetes/pkg/util/node" | 
					
						
							| 
									
										
										
										
											2016-07-21 06:08:47 +08:00
										 |  |  | 	"k8s.io/kubernetes/pkg/version" | 
					
						
							|  |  |  | 	"k8s.io/kubernetes/pkg/volume/util/volumehelper" | 
					
						
							|  |  |  | ) | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-09-17 06:16:08 +08:00
										 |  |  | const ( | 
					
						
							|  |  |  | 	// maxImagesInNodeStatus is the number of max images we store in image status.
 | 
					
						
							|  |  |  | 	maxImagesInNodeStatus = 50 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// maxNamesPerImageInNodeStatus is max number of names per image stored in
 | 
					
						
							|  |  |  | 	// the node status.
 | 
					
						
							|  |  |  | 	maxNamesPerImageInNodeStatus = 5 | 
					
						
							|  |  |  | ) | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-08-31 01:40:25 +08:00
										 |  |  | // registerWithApiServer registers the node with the cluster master. It is safe
 | 
					
						
							| 
									
										
										
										
											2016-07-21 06:08:47 +08:00
										 |  |  | // to call multiple times, but not concurrently (kl.registrationCompleted is
 | 
					
						
							|  |  |  | // not locked).
 | 
					
						
							| 
									
										
										
										
											2016-08-31 01:40:25 +08:00
										 |  |  | func (kl *Kubelet) registerWithApiServer() { | 
					
						
							| 
									
										
										
										
											2016-07-21 06:08:47 +08:00
										 |  |  | 	if kl.registrationCompleted { | 
					
						
							|  |  |  | 		return | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	step := 100 * time.Millisecond | 
					
						
							| 
									
										
										
										
											2016-08-31 01:40:25 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-07-21 06:08:47 +08:00
										 |  |  | 	for { | 
					
						
							|  |  |  | 		time.Sleep(step) | 
					
						
							|  |  |  | 		step = step * 2 | 
					
						
							|  |  |  | 		if step >= 7*time.Second { | 
					
						
							|  |  |  | 			step = 7 * time.Second | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-08-31 01:40:25 +08:00
										 |  |  | 		node, err := kl.initialNode() | 
					
						
							| 
									
										
										
										
											2016-07-21 06:08:47 +08:00
										 |  |  | 		if err != nil { | 
					
						
							| 
									
										
										
										
											2016-11-19 04:50:58 +08:00
										 |  |  | 			glog.Errorf("Unable to construct v1.Node object for kubelet: %v", err) | 
					
						
							| 
									
										
										
										
											2016-07-21 06:08:47 +08:00
										 |  |  | 			continue | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-08-31 01:40:25 +08:00
										 |  |  | 		glog.Infof("Attempting to register node %s", node.Name) | 
					
						
							|  |  |  | 		registered := kl.tryRegisterWithApiServer(node) | 
					
						
							|  |  |  | 		if registered { | 
					
						
							|  |  |  | 			glog.Infof("Successfully registered node %s", node.Name) | 
					
						
							|  |  |  | 			kl.registrationCompleted = true | 
					
						
							|  |  |  | 			return | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // tryRegisterWithApiServer makes an attempt to register the given node with
 | 
					
						
							|  |  |  | // the API server, returning a boolean indicating whether the attempt was
 | 
					
						
							|  |  |  | // successful.  If a node with the same name already exists, it reconciles the
 | 
					
						
							|  |  |  | // value of the annotation for controller-managed attach-detach of attachable
 | 
					
						
							|  |  |  | // persistent volumes for the node.  If a node of the same name exists but has
 | 
					
						
							|  |  |  | // a different externalID value, it attempts to delete that node so that a
 | 
					
						
							|  |  |  | // later attempt can recreate it.
 | 
					
						
							| 
									
										
										
										
											2016-11-19 04:50:58 +08:00
										 |  |  | func (kl *Kubelet) tryRegisterWithApiServer(node *v1.Node) bool { | 
					
						
							| 
									
										
										
										
											2016-08-31 01:40:25 +08:00
										 |  |  | 	_, err := kl.kubeClient.Core().Nodes().Create(node) | 
					
						
							|  |  |  | 	if err == nil { | 
					
						
							|  |  |  | 		return true | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	if !apierrors.IsAlreadyExists(err) { | 
					
						
							|  |  |  | 		glog.Errorf("Unable to register node %q with API server: %v", kl.nodeName, err) | 
					
						
							|  |  |  | 		return false | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-12-07 21:26:33 +08:00
										 |  |  | 	existingNode, err := kl.kubeClient.Core().Nodes().Get(string(kl.nodeName), metav1.GetOptions{}) | 
					
						
							| 
									
										
										
										
											2016-08-31 01:40:25 +08:00
										 |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		glog.Errorf("Unable to register node %q with API server: error getting existing node: %v", kl.nodeName, err) | 
					
						
							|  |  |  | 		return false | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	if existingNode == nil { | 
					
						
							|  |  |  | 		glog.Errorf("Unable to register node %q with API server: no node instance returned", kl.nodeName) | 
					
						
							|  |  |  | 		return false | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-12-02 06:46:20 +08:00
										 |  |  | 	clonedNode, err := conversion.NewCloner().DeepCopy(existingNode) | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		glog.Errorf("Unable to clone %q node object %#v: %v", kl.nodeName, existingNode, err) | 
					
						
							|  |  |  | 		return false | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	originalNode, ok := clonedNode.(*v1.Node) | 
					
						
							|  |  |  | 	if !ok || originalNode == nil { | 
					
						
							|  |  |  | 		glog.Errorf("Unable to cast %q node object %#v to v1.Node", kl.nodeName, clonedNode) | 
					
						
							|  |  |  | 		return false | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-08-31 01:40:25 +08:00
										 |  |  | 	if existingNode.Spec.ExternalID == node.Spec.ExternalID { | 
					
						
							|  |  |  | 		glog.Infof("Node %s was previously registered", kl.nodeName) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		// Edge case: the node was previously registered; reconcile
 | 
					
						
							|  |  |  | 		// the value of the controller-managed attach-detach
 | 
					
						
							|  |  |  | 		// annotation.
 | 
					
						
							|  |  |  | 		requiresUpdate := kl.reconcileCMADAnnotationWithExistingNode(node, existingNode) | 
					
						
							|  |  |  | 		if requiresUpdate { | 
					
						
							| 
									
										
										
										
											2016-12-02 06:46:20 +08:00
										 |  |  | 			if _, err := nodeutil.PatchNodeStatus(kl.kubeClient, types.NodeName(kl.nodeName), | 
					
						
							|  |  |  | 				originalNode, existingNode); err != nil { | 
					
						
							| 
									
										
										
										
											2016-08-31 01:40:25 +08:00
										 |  |  | 				glog.Errorf("Unable to reconcile node %q with API server: error updating node: %v", kl.nodeName, err) | 
					
						
							|  |  |  | 				return false | 
					
						
							| 
									
										
										
										
											2016-07-21 06:08:47 +08:00
										 |  |  | 			} | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2016-08-31 01:40:25 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 		return true | 
					
						
							| 
									
										
										
										
											2016-07-21 06:08:47 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2016-08-31 01:40:25 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	glog.Errorf( | 
					
						
							|  |  |  | 		"Previously node %q had externalID %q; now it is %q; will delete and recreate.", | 
					
						
							|  |  |  | 		kl.nodeName, node.Spec.ExternalID, existingNode.Spec.ExternalID, | 
					
						
							|  |  |  | 	) | 
					
						
							|  |  |  | 	if err := kl.kubeClient.Core().Nodes().Delete(node.Name, nil); err != nil { | 
					
						
							|  |  |  | 		glog.Errorf("Unable to register node %q with API server: error deleting old node: %v", kl.nodeName, err) | 
					
						
							|  |  |  | 	} else { | 
					
						
							| 
									
										
										
										
											2017-01-20 19:24:05 +08:00
										 |  |  | 		glog.Infof("Deleted old node object %q", kl.nodeName) | 
					
						
							| 
									
										
										
										
											2016-08-31 01:40:25 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	return false | 
					
						
							| 
									
										
										
										
											2016-07-21 06:08:47 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-08-31 01:40:25 +08:00
										 |  |  | // reconcileCMADAnnotationWithExistingNode reconciles the controller-managed
 | 
					
						
							|  |  |  | // attach-detach annotation on a new node and the existing node, returning
 | 
					
						
							|  |  |  | // whether the existing node must be updated.
 | 
					
						
							| 
									
										
										
										
											2016-11-19 04:50:58 +08:00
										 |  |  | func (kl *Kubelet) reconcileCMADAnnotationWithExistingNode(node, existingNode *v1.Node) bool { | 
					
						
							| 
									
										
										
										
											2016-08-31 01:40:25 +08:00
										 |  |  | 	var ( | 
					
						
							|  |  |  | 		existingCMAAnnotation    = existingNode.Annotations[volumehelper.ControllerManagedAttachAnnotation] | 
					
						
							|  |  |  | 		newCMAAnnotation, newSet = node.Annotations[volumehelper.ControllerManagedAttachAnnotation] | 
					
						
							|  |  |  | 	) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	if newCMAAnnotation == existingCMAAnnotation { | 
					
						
							|  |  |  | 		return false | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// If the just-constructed node and the existing node do
 | 
					
						
							|  |  |  | 	// not have the same value, update the existing node with
 | 
					
						
							|  |  |  | 	// the correct value of the annotation.
 | 
					
						
							|  |  |  | 	if !newSet { | 
					
						
							|  |  |  | 		glog.Info("Controller attach-detach setting changed to false; updating existing Node") | 
					
						
							|  |  |  | 		delete(existingNode.Annotations, volumehelper.ControllerManagedAttachAnnotation) | 
					
						
							|  |  |  | 	} else { | 
					
						
							|  |  |  | 		glog.Info("Controller attach-detach setting changed to true; updating existing Node") | 
					
						
							|  |  |  | 		if existingNode.Annotations == nil { | 
					
						
							|  |  |  | 			existingNode.Annotations = make(map[string]string) | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		existingNode.Annotations[volumehelper.ControllerManagedAttachAnnotation] = newCMAAnnotation | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	return true | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-11-19 04:50:58 +08:00
										 |  |  | // initialNode constructs the initial v1.Node for this Kubelet, incorporating node
 | 
					
						
							| 
									
										
										
										
											2016-08-31 01:40:25 +08:00
										 |  |  | // labels, information from the cloud provider, and Kubelet configuration.
 | 
					
						
							| 
									
										
										
										
											2016-11-19 04:50:58 +08:00
										 |  |  | func (kl *Kubelet) initialNode() (*v1.Node, error) { | 
					
						
							|  |  |  | 	node := &v1.Node{ | 
					
						
							| 
									
										
										
										
											2017-01-17 11:38:19 +08:00
										 |  |  | 		ObjectMeta: metav1.ObjectMeta{ | 
					
						
							| 
									
										
										
										
											2016-07-16 14:10:29 +08:00
										 |  |  | 			Name: string(kl.nodeName), | 
					
						
							| 
									
										
										
										
											2016-07-21 06:08:47 +08:00
										 |  |  | 			Labels: map[string]string{ | 
					
						
							| 
									
										
										
										
											2017-03-11 08:17:58 +08:00
										 |  |  | 				metav1.LabelHostname: kl.hostname, | 
					
						
							|  |  |  | 				metav1.LabelOS:       goruntime.GOOS, | 
					
						
							|  |  |  | 				metav1.LabelArch:     goruntime.GOARCH, | 
					
						
							| 
									
										
										
										
											2016-07-21 06:08:47 +08:00
										 |  |  | 			}, | 
					
						
							|  |  |  | 		}, | 
					
						
							| 
									
										
										
										
											2016-11-19 04:50:58 +08:00
										 |  |  | 		Spec: v1.NodeSpec{ | 
					
						
							| 
									
										
										
										
											2016-07-21 06:08:47 +08:00
										 |  |  | 			Unschedulable: !kl.registerSchedulable, | 
					
						
							|  |  |  | 		}, | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2017-03-30 07:21:42 +08:00
										 |  |  | 	nodeTaints := make([]v1.Taint, 0) | 
					
						
							| 
									
										
										
										
											2016-08-30 06:00:02 +08:00
										 |  |  | 	if len(kl.kubeletConfiguration.RegisterWithTaints) > 0 { | 
					
						
							| 
									
										
										
										
											2017-01-13 12:18:34 +08:00
										 |  |  | 		taints := make([]v1.Taint, len(kl.kubeletConfiguration.RegisterWithTaints)) | 
					
						
							|  |  |  | 		for i := range kl.kubeletConfiguration.RegisterWithTaints { | 
					
						
							|  |  |  | 			if err := v1.Convert_api_Taint_To_v1_Taint(&kl.kubeletConfiguration.RegisterWithTaints[i], &taints[i], nil); err != nil { | 
					
						
							|  |  |  | 				return nil, err | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2017-03-30 07:21:42 +08:00
										 |  |  | 		nodeTaints = append(nodeTaints, taints...) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	if kl.externalCloudProvider { | 
					
						
							|  |  |  | 		taint := v1.Taint{ | 
					
						
							|  |  |  | 			Key:    metav1.TaintExternalCloudProvider, | 
					
						
							|  |  |  | 			Value:  "true", | 
					
						
							|  |  |  | 			Effect: v1.TaintEffectNoSchedule, | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		nodeTaints = append(nodeTaints, taint) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	if len(nodeTaints) > 0 { | 
					
						
							|  |  |  | 		node.Spec.Taints = nodeTaints | 
					
						
							| 
									
										
										
										
											2016-08-30 06:00:02 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2016-07-21 06:08:47 +08:00
										 |  |  | 	// Initially, set NodeNetworkUnavailable to true.
 | 
					
						
							|  |  |  | 	if kl.providerRequiresNetworkingConfiguration() { | 
					
						
							| 
									
										
										
										
											2016-11-19 04:50:58 +08:00
										 |  |  | 		node.Status.Conditions = append(node.Status.Conditions, v1.NodeCondition{ | 
					
						
							|  |  |  | 			Type:               v1.NodeNetworkUnavailable, | 
					
						
							|  |  |  | 			Status:             v1.ConditionTrue, | 
					
						
							| 
									
										
										
										
											2016-07-21 06:08:47 +08:00
										 |  |  | 			Reason:             "NoRouteCreated", | 
					
						
							|  |  |  | 			Message:            "Node created without a route", | 
					
						
							| 
									
										
										
										
											2016-12-04 02:57:26 +08:00
										 |  |  | 			LastTransitionTime: metav1.NewTime(kl.clock.Now()), | 
					
						
							| 
									
										
										
										
											2016-07-21 06:08:47 +08:00
										 |  |  | 		}) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	if kl.enableControllerAttachDetach { | 
					
						
							|  |  |  | 		if node.Annotations == nil { | 
					
						
							|  |  |  | 			node.Annotations = make(map[string]string) | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-08-27 00:26:52 +08:00
										 |  |  | 		glog.Infof("Setting node annotation to enable volume controller attach/detach") | 
					
						
							| 
									
										
										
										
											2016-07-21 06:08:47 +08:00
										 |  |  | 		node.Annotations[volumehelper.ControllerManagedAttachAnnotation] = "true" | 
					
						
							| 
									
										
										
										
											2016-08-27 00:26:52 +08:00
										 |  |  | 	} else { | 
					
						
							|  |  |  | 		glog.Infof("Controller attach/detach is disabled for this node; Kubelet will attach and detach volumes") | 
					
						
							| 
									
										
										
										
											2016-07-21 06:08:47 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2017-05-11 02:42:56 +08:00
										 |  |  | 	if kl.kubeletConfiguration.KeepTerminatedPodVolumes { | 
					
						
							|  |  |  | 		if node.Annotations == nil { | 
					
						
							|  |  |  | 			node.Annotations = make(map[string]string) | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		glog.Infof("Setting node annotation to keep pod volumes of terminated pods attached to the node") | 
					
						
							|  |  |  | 		node.Annotations[volumehelper.KeepTerminatedPodVolumesAnnotation] = "true" | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-07-21 06:08:47 +08:00
										 |  |  | 	// @question: should this be place after the call to the cloud provider? which also applies labels
 | 
					
						
							|  |  |  | 	for k, v := range kl.nodeLabels { | 
					
						
							|  |  |  | 		if cv, found := node.ObjectMeta.Labels[k]; found { | 
					
						
							|  |  |  | 			glog.Warningf("the node label %s=%s will overwrite default setting %s", k, v, cv) | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		node.ObjectMeta.Labels[k] = v | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2017-03-30 07:21:42 +08:00
										 |  |  | 	if kl.providerID != "" { | 
					
						
							|  |  |  | 		node.Spec.ProviderID = kl.providerID | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-07-21 06:08:47 +08:00
										 |  |  | 	if kl.cloud != nil { | 
					
						
							|  |  |  | 		instances, ok := kl.cloud.Instances() | 
					
						
							|  |  |  | 		if !ok { | 
					
						
							|  |  |  | 			return nil, fmt.Errorf("failed to get instances from cloud provider") | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		// TODO(roberthbailey): Can we do this without having credentials to talk
 | 
					
						
							|  |  |  | 		// to the cloud provider?
 | 
					
						
							|  |  |  | 		// TODO: ExternalID is deprecated, we'll have to drop this code
 | 
					
						
							|  |  |  | 		externalID, err := instances.ExternalID(kl.nodeName) | 
					
						
							|  |  |  | 		if err != nil { | 
					
						
							|  |  |  | 			return nil, fmt.Errorf("failed to get external ID from cloud provider: %v", err) | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		node.Spec.ExternalID = externalID | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		// TODO: We can't assume that the node has credentials to talk to the
 | 
					
						
							|  |  |  | 		// cloudprovider from arbitrary nodes. At most, we should talk to a
 | 
					
						
							|  |  |  | 		// local metadata server here.
 | 
					
						
							| 
									
										
										
										
											2017-03-30 07:21:42 +08:00
										 |  |  | 		if node.Spec.ProviderID == "" { | 
					
						
							|  |  |  | 			node.Spec.ProviderID, err = cloudprovider.GetInstanceProviderID(kl.cloud, kl.nodeName) | 
					
						
							|  |  |  | 			if err != nil { | 
					
						
							|  |  |  | 				return nil, err | 
					
						
							|  |  |  | 			} | 
					
						
							| 
									
										
										
										
											2016-07-21 06:08:47 +08:00
										 |  |  | 		} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		instanceType, err := instances.InstanceType(kl.nodeName) | 
					
						
							|  |  |  | 		if err != nil { | 
					
						
							|  |  |  | 			return nil, err | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		if instanceType != "" { | 
					
						
							| 
									
										
										
										
											2016-12-04 02:57:26 +08:00
										 |  |  | 			glog.Infof("Adding node label from cloud provider: %s=%s", metav1.LabelInstanceType, instanceType) | 
					
						
							|  |  |  | 			node.ObjectMeta.Labels[metav1.LabelInstanceType] = instanceType | 
					
						
							| 
									
										
										
										
											2016-07-21 06:08:47 +08:00
										 |  |  | 		} | 
					
						
							|  |  |  | 		// If the cloud has zone information, label the node with the zone information
 | 
					
						
							|  |  |  | 		zones, ok := kl.cloud.Zones() | 
					
						
							|  |  |  | 		if ok { | 
					
						
							|  |  |  | 			zone, err := zones.GetZone() | 
					
						
							|  |  |  | 			if err != nil { | 
					
						
							|  |  |  | 				return nil, fmt.Errorf("failed to get zone from cloud provider: %v", err) | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 			if zone.FailureDomain != "" { | 
					
						
							| 
									
										
										
										
											2016-12-04 02:57:26 +08:00
										 |  |  | 				glog.Infof("Adding node label from cloud provider: %s=%s", metav1.LabelZoneFailureDomain, zone.FailureDomain) | 
					
						
							|  |  |  | 				node.ObjectMeta.Labels[metav1.LabelZoneFailureDomain] = zone.FailureDomain | 
					
						
							| 
									
										
										
										
											2016-07-21 06:08:47 +08:00
										 |  |  | 			} | 
					
						
							|  |  |  | 			if zone.Region != "" { | 
					
						
							| 
									
										
										
										
											2016-12-04 02:57:26 +08:00
										 |  |  | 				glog.Infof("Adding node label from cloud provider: %s=%s", metav1.LabelZoneRegion, zone.Region) | 
					
						
							|  |  |  | 				node.ObjectMeta.Labels[metav1.LabelZoneRegion] = zone.Region | 
					
						
							| 
									
										
										
										
											2016-07-21 06:08:47 +08:00
										 |  |  | 			} | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} else { | 
					
						
							|  |  |  | 		node.Spec.ExternalID = kl.hostname | 
					
						
							|  |  |  | 		if kl.autoDetectCloudProvider { | 
					
						
							|  |  |  | 			// If no cloud provider is defined - use the one detected by cadvisor
 | 
					
						
							|  |  |  | 			info, err := kl.GetCachedMachineInfo() | 
					
						
							|  |  |  | 			if err == nil { | 
					
						
							|  |  |  | 				kl.updateCloudProviderFromMachineInfo(node, info) | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2016-12-02 03:12:32 +08:00
										 |  |  | 	kl.setNodeStatus(node) | 
					
						
							| 
									
										
										
										
											2016-07-21 06:08:47 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	return node, nil | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // syncNodeStatus should be called periodically from a goroutine.
 | 
					
						
							|  |  |  | // It synchronizes node status to master, registering the kubelet first if
 | 
					
						
							|  |  |  | // necessary.
 | 
					
						
							|  |  |  | func (kl *Kubelet) syncNodeStatus() { | 
					
						
							|  |  |  | 	if kl.kubeClient == nil { | 
					
						
							|  |  |  | 		return | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	if kl.registerNode { | 
					
						
							|  |  |  | 		// This will exit immediately if it doesn't need to do anything.
 | 
					
						
							| 
									
										
										
										
											2016-08-31 01:40:25 +08:00
										 |  |  | 		kl.registerWithApiServer() | 
					
						
							| 
									
										
										
										
											2016-07-21 06:08:47 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 	if err := kl.updateNodeStatus(); err != nil { | 
					
						
							|  |  |  | 		glog.Errorf("Unable to update node status: %v", err) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // updateNodeStatus updates node status to master with retries.
 | 
					
						
							|  |  |  | func (kl *Kubelet) updateNodeStatus() error { | 
					
						
							|  |  |  | 	for i := 0; i < nodeStatusUpdateRetry; i++ { | 
					
						
							| 
									
										
										
										
											2016-12-01 16:11:36 +08:00
										 |  |  | 		if err := kl.tryUpdateNodeStatus(i); err != nil { | 
					
						
							| 
									
										
										
										
											2016-07-21 06:08:47 +08:00
										 |  |  | 			glog.Errorf("Error updating node status, will retry: %v", err) | 
					
						
							|  |  |  | 		} else { | 
					
						
							|  |  |  | 			return nil | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return fmt.Errorf("update node status exceeds retry count") | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // tryUpdateNodeStatus tries to update node status to master. If ReconcileCBR0
 | 
					
						
							|  |  |  | // is set, this function will also confirm that cbr0 is configured correctly.
 | 
					
						
							| 
									
										
										
										
											2016-12-01 16:11:36 +08:00
										 |  |  | func (kl *Kubelet) tryUpdateNodeStatus(tryNumber int) error { | 
					
						
							| 
									
										
										
										
											2016-10-21 00:00:33 +08:00
										 |  |  | 	// In large clusters, GET and PUT operations on Node objects coming
 | 
					
						
							|  |  |  | 	// from here are the majority of load on apiserver and etcd.
 | 
					
						
							|  |  |  | 	// To reduce the load on etcd, we are serving GET operations from
 | 
					
						
							|  |  |  | 	// apiserver cache (the data might be slightly delayed but it doesn't
 | 
					
						
							| 
									
										
										
										
											2016-12-28 20:17:27 +08:00
										 |  |  | 	// seem to cause more conflict - the delays are pretty small).
 | 
					
						
							| 
									
										
										
										
											2016-12-01 16:11:36 +08:00
										 |  |  | 	// If it result in a conflict, all retries are served directly from etcd.
 | 
					
						
							| 
									
										
										
										
											2016-12-14 00:14:56 +08:00
										 |  |  | 	opts := metav1.GetOptions{} | 
					
						
							| 
									
										
										
										
											2016-12-01 16:11:36 +08:00
										 |  |  | 	if tryNumber == 0 { | 
					
						
							| 
									
										
										
										
											2017-01-27 22:01:55 +08:00
										 |  |  | 		util.FromApiserverCache(&opts) | 
					
						
							| 
									
										
										
										
											2016-10-21 00:00:33 +08:00
										 |  |  | 	} | 
					
						
							| 
									
										
										
										
											2016-12-14 00:14:56 +08:00
										 |  |  | 	node, err := kl.kubeClient.Core().Nodes().Get(string(kl.nodeName), opts) | 
					
						
							| 
									
										
										
										
											2016-07-21 06:08:47 +08:00
										 |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		return fmt.Errorf("error getting node %q: %v", kl.nodeName, err) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-12-02 06:46:20 +08:00
										 |  |  | 	clonedNode, err := conversion.NewCloner().DeepCopy(node) | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		return fmt.Errorf("error clone node %q: %v", kl.nodeName, err) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	originalNode, ok := clonedNode.(*v1.Node) | 
					
						
							|  |  |  | 	if !ok || originalNode == nil { | 
					
						
							|  |  |  | 		return fmt.Errorf("failed to cast %q node object %#v to v1.Node", kl.nodeName, clonedNode) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-10-27 01:25:35 +08:00
										 |  |  | 	kl.updatePodCIDR(node.Spec.PodCIDR) | 
					
						
							| 
									
										
										
										
											2016-07-21 06:08:47 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-12-02 03:12:32 +08:00
										 |  |  | 	kl.setNodeStatus(node) | 
					
						
							| 
									
										
										
										
											2016-12-02 06:46:20 +08:00
										 |  |  | 	// Patch the current status on the API server
 | 
					
						
							|  |  |  | 	updatedNode, err := nodeutil.PatchNodeStatus(kl.kubeClient, types.NodeName(kl.nodeName), originalNode, node) | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		return err | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2017-01-10 23:37:45 +08:00
										 |  |  | 	// If update finishes successfully, mark the volumeInUse as reportedInUse to indicate
 | 
					
						
							| 
									
										
										
										
											2016-09-28 07:12:57 +08:00
										 |  |  | 	// those volumes are already updated in the node's status
 | 
					
						
							| 
									
										
										
										
											2016-12-02 06:46:20 +08:00
										 |  |  | 	kl.volumeManager.MarkVolumesAsReportedInUse(updatedNode.Status.VolumesInUse) | 
					
						
							|  |  |  | 	return nil | 
					
						
							| 
									
										
										
										
											2016-07-21 06:08:47 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // recordNodeStatusEvent records an event of the given type with the given
 | 
					
						
							|  |  |  | // message for the node.
 | 
					
						
							| 
									
										
										
										
											2016-12-28 20:17:27 +08:00
										 |  |  | func (kl *Kubelet) recordNodeStatusEvent(eventType, event string) { | 
					
						
							| 
									
										
										
										
											2016-07-21 06:08:47 +08:00
										 |  |  | 	glog.V(2).Infof("Recording %s event message for node %s", event, kl.nodeName) | 
					
						
							|  |  |  | 	// TODO: This requires a transaction, either both node status is updated
 | 
					
						
							|  |  |  | 	// and event is recorded or neither should happen, see issue #6055.
 | 
					
						
							| 
									
										
										
										
											2016-12-28 20:17:27 +08:00
										 |  |  | 	kl.recorder.Eventf(kl.nodeRef, eventType, event, "Node %s status is now: %s", kl.nodeName, event) | 
					
						
							| 
									
										
										
										
											2016-07-21 06:08:47 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-05-12 03:45:36 +08:00
										 |  |  | // Set IP and hostname addresses for the node.
 | 
					
						
							| 
									
										
										
										
											2016-11-19 04:50:58 +08:00
										 |  |  | func (kl *Kubelet) setNodeAddress(node *v1.Node) error { | 
					
						
							| 
									
										
										
										
											2016-10-01 05:07:13 +08:00
										 |  |  | 	if kl.nodeIP != nil { | 
					
						
							|  |  |  | 		if err := kl.validateNodeIP(); err != nil { | 
					
						
							|  |  |  | 			return fmt.Errorf("failed to validate nodeIP: %v", err) | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		glog.V(2).Infof("Using node IP: %q", kl.nodeIP.String()) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-07-21 06:08:47 +08:00
										 |  |  | 	if kl.cloud != nil { | 
					
						
							|  |  |  | 		instances, ok := kl.cloud.Instances() | 
					
						
							|  |  |  | 		if !ok { | 
					
						
							|  |  |  | 			return fmt.Errorf("failed to get instances from cloud provider") | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		// TODO(roberthbailey): Can we do this without having credentials to talk
 | 
					
						
							|  |  |  | 		// to the cloud provider?
 | 
					
						
							|  |  |  | 		// TODO(justinsb): We can if CurrentNodeName() was actually CurrentNode() and returned an interface
 | 
					
						
							| 
									
										
										
										
											2016-08-02 19:03:20 +08:00
										 |  |  | 		// TODO: If IP addresses couldn't be fetched from the cloud provider, should kubelet fallback on the other methods for getting the IP below?
 | 
					
						
							| 
									
										
										
										
											2016-07-21 06:08:47 +08:00
										 |  |  | 		nodeAddresses, err := instances.NodeAddresses(kl.nodeName) | 
					
						
							|  |  |  | 		if err != nil { | 
					
						
							|  |  |  | 			return fmt.Errorf("failed to get node address from cloud provider: %v", err) | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2016-09-21 04:49:16 +08:00
										 |  |  | 		if kl.nodeIP != nil { | 
					
						
							|  |  |  | 			for _, nodeAddress := range nodeAddresses { | 
					
						
							|  |  |  | 				if nodeAddress.Address == kl.nodeIP.String() { | 
					
						
							| 
									
										
										
										
											2016-11-19 04:50:58 +08:00
										 |  |  | 					node.Status.Addresses = []v1.NodeAddress{ | 
					
						
							| 
									
										
										
										
											2016-09-21 04:49:16 +08:00
										 |  |  | 						{Type: nodeAddress.Type, Address: nodeAddress.Address}, | 
					
						
							| 
									
										
										
										
											2016-11-19 04:50:58 +08:00
										 |  |  | 						{Type: v1.NodeHostName, Address: kl.GetHostname()}, | 
					
						
							| 
									
										
										
										
											2016-09-21 04:49:16 +08:00
										 |  |  | 					} | 
					
						
							|  |  |  | 					return nil | 
					
						
							|  |  |  | 				} | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 			return fmt.Errorf("failed to get node address from cloud provider that matches ip: %v", kl.nodeIP) | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2016-11-04 22:00:23 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 		// Only add a NodeHostName address if the cloudprovider did not specify one
 | 
					
						
							|  |  |  | 		// (we assume the cloudprovider knows best)
 | 
					
						
							| 
									
										
										
										
											2016-11-19 04:50:58 +08:00
										 |  |  | 		var addressNodeHostName *v1.NodeAddress | 
					
						
							| 
									
										
										
										
											2016-11-04 22:00:23 +08:00
										 |  |  | 		for i := range nodeAddresses { | 
					
						
							| 
									
										
										
										
											2016-11-19 04:50:58 +08:00
										 |  |  | 			if nodeAddresses[i].Type == v1.NodeHostName { | 
					
						
							| 
									
										
										
										
											2016-11-04 22:00:23 +08:00
										 |  |  | 				addressNodeHostName = &nodeAddresses[i] | 
					
						
							|  |  |  | 				break | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		if addressNodeHostName == nil { | 
					
						
							| 
									
										
										
										
											2016-11-19 04:50:58 +08:00
										 |  |  | 			hostnameAddress := v1.NodeAddress{Type: v1.NodeHostName, Address: kl.GetHostname()} | 
					
						
							| 
									
										
										
										
											2016-11-07 21:34:34 +08:00
										 |  |  | 			nodeAddresses = append(nodeAddresses, hostnameAddress) | 
					
						
							| 
									
										
										
										
											2016-11-04 22:00:23 +08:00
										 |  |  | 		} else { | 
					
						
							|  |  |  | 			glog.V(2).Infof("Using Node Hostname from cloudprovider: %q", addressNodeHostName.Address) | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2016-11-07 21:34:34 +08:00
										 |  |  | 		node.Status.Addresses = nodeAddresses | 
					
						
							| 
									
										
										
										
											2016-07-21 06:08:47 +08:00
										 |  |  | 	} else { | 
					
						
							| 
									
										
										
										
											2016-08-02 19:03:20 +08:00
										 |  |  | 		var ipAddr net.IP | 
					
						
							|  |  |  | 		var err error | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		// 1) Use nodeIP if set
 | 
					
						
							|  |  |  | 		// 2) If the user has specified an IP to HostnameOverride, use it
 | 
					
						
							|  |  |  | 		// 3) Lookup the IP from node name by DNS and use the first non-loopback ipv4 address
 | 
					
						
							|  |  |  | 		// 4) Try to get the IP from the network interface used as default gateway
 | 
					
						
							| 
									
										
										
										
											2016-07-21 06:08:47 +08:00
										 |  |  | 		if kl.nodeIP != nil { | 
					
						
							| 
									
										
										
										
											2016-08-02 19:03:20 +08:00
										 |  |  | 			ipAddr = kl.nodeIP | 
					
						
							| 
									
										
										
										
											2017-03-30 07:21:42 +08:00
										 |  |  | 			node.ObjectMeta.Annotations[metav1.AnnotationProvidedIPAddr] = kl.nodeIP.String() | 
					
						
							| 
									
										
										
										
											2016-07-21 06:08:47 +08:00
										 |  |  | 		} else if addr := net.ParseIP(kl.hostname); addr != nil { | 
					
						
							| 
									
										
										
										
											2016-08-02 19:03:20 +08:00
										 |  |  | 			ipAddr = addr | 
					
						
							| 
									
										
										
										
											2016-07-21 06:08:47 +08:00
										 |  |  | 		} else { | 
					
						
							| 
									
										
										
										
											2016-08-02 19:03:20 +08:00
										 |  |  | 			var addrs []net.IP | 
					
						
							|  |  |  | 			addrs, err = net.LookupIP(node.Name) | 
					
						
							|  |  |  | 			for _, addr := range addrs { | 
					
						
							|  |  |  | 				if !addr.IsLoopback() && addr.To4() != nil { | 
					
						
							|  |  |  | 					ipAddr = addr | 
					
						
							|  |  |  | 					break | 
					
						
							| 
									
										
										
										
											2016-07-21 06:08:47 +08:00
										 |  |  | 				} | 
					
						
							| 
									
										
										
										
											2016-08-02 19:03:20 +08:00
										 |  |  | 			} | 
					
						
							| 
									
										
										
										
											2016-07-21 06:08:47 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-08-02 19:03:20 +08:00
										 |  |  | 			if ipAddr == nil { | 
					
						
							|  |  |  | 				ipAddr, err = utilnet.ChooseHostInterface() | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2016-07-21 06:08:47 +08:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-08-02 19:03:20 +08:00
										 |  |  | 		if ipAddr == nil { | 
					
						
							|  |  |  | 			// We tried everything we could, but the IP address wasn't fetchable; error out
 | 
					
						
							|  |  |  | 			return fmt.Errorf("can't get ip address of node %s. error: %v", node.Name, err) | 
					
						
							|  |  |  | 		} else { | 
					
						
							| 
									
										
										
										
											2016-11-19 04:50:58 +08:00
										 |  |  | 			node.Status.Addresses = []v1.NodeAddress{ | 
					
						
							|  |  |  | 				{Type: v1.NodeInternalIP, Address: ipAddr.String()}, | 
					
						
							|  |  |  | 				{Type: v1.NodeHostName, Address: kl.GetHostname()}, | 
					
						
							| 
									
										
										
										
											2016-07-21 06:08:47 +08:00
										 |  |  | 			} | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return nil | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-11-19 04:50:58 +08:00
										 |  |  | func (kl *Kubelet) setNodeStatusMachineInfo(node *v1.Node) { | 
					
						
							| 
									
										
										
										
											2016-09-26 23:11:31 +08:00
										 |  |  | 	// Note: avoid blindly overwriting the capacity in case opaque
 | 
					
						
							|  |  |  | 	//       resources are being advertised.
 | 
					
						
							|  |  |  | 	if node.Status.Capacity == nil { | 
					
						
							| 
									
										
										
										
											2016-11-19 04:50:58 +08:00
										 |  |  | 		node.Status.Capacity = v1.ResourceList{} | 
					
						
							| 
									
										
										
										
											2016-09-26 23:11:31 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2017-02-26 13:16:13 +08:00
										 |  |  | 	// populate GPU capacity.
 | 
					
						
							|  |  |  | 	gpuCapacity := kl.gpuManager.Capacity() | 
					
						
							|  |  |  | 	if gpuCapacity != nil { | 
					
						
							|  |  |  | 		for k, v := range gpuCapacity { | 
					
						
							|  |  |  | 			node.Status.Capacity[k] = v | 
					
						
							|  |  |  | 		} | 
					
						
							| 
									
										
										
										
											2016-12-03 15:12:38 +08:00
										 |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-07-21 06:08:47 +08:00
										 |  |  | 	// TODO: Post NotReady if we cannot get MachineInfo from cAdvisor. This needs to start
 | 
					
						
							|  |  |  | 	// cAdvisor locally, e.g. for test-cmd.sh, and in integration test.
 | 
					
						
							|  |  |  | 	info, err := kl.GetCachedMachineInfo() | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		// TODO(roberthbailey): This is required for test-cmd.sh to pass.
 | 
					
						
							|  |  |  | 		// See if the test should be updated instead.
 | 
					
						
							| 
									
										
										
										
											2016-11-19 04:50:58 +08:00
										 |  |  | 		node.Status.Capacity[v1.ResourceCPU] = *resource.NewMilliQuantity(0, resource.DecimalSI) | 
					
						
							|  |  |  | 		node.Status.Capacity[v1.ResourceMemory] = resource.MustParse("0Gi") | 
					
						
							|  |  |  | 		node.Status.Capacity[v1.ResourcePods] = *resource.NewQuantity(int64(kl.maxPods), resource.DecimalSI) | 
					
						
							| 
									
										
										
										
											2016-07-21 06:08:47 +08:00
										 |  |  | 		glog.Errorf("Error getting machine info: %v", err) | 
					
						
							|  |  |  | 	} else { | 
					
						
							|  |  |  | 		node.Status.NodeInfo.MachineID = info.MachineID | 
					
						
							|  |  |  | 		node.Status.NodeInfo.SystemUUID = info.SystemUUID | 
					
						
							| 
									
										
										
										
											2016-09-26 23:11:31 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 		for rName, rCap := range cadvisor.CapacityFromMachineInfo(info) { | 
					
						
							|  |  |  | 			node.Status.Capacity[rName] = rCap | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-07-21 06:08:47 +08:00
										 |  |  | 		if kl.podsPerCore > 0 { | 
					
						
							| 
									
										
										
										
											2016-11-19 04:50:58 +08:00
										 |  |  | 			node.Status.Capacity[v1.ResourcePods] = *resource.NewQuantity( | 
					
						
							| 
									
										
										
										
											2016-07-21 06:08:47 +08:00
										 |  |  | 				int64(math.Min(float64(info.NumCores*kl.podsPerCore), float64(kl.maxPods))), resource.DecimalSI) | 
					
						
							|  |  |  | 		} else { | 
					
						
							| 
									
										
										
										
											2016-11-19 04:50:58 +08:00
										 |  |  | 			node.Status.Capacity[v1.ResourcePods] = *resource.NewQuantity( | 
					
						
							| 
									
										
										
										
											2016-07-21 06:08:47 +08:00
										 |  |  | 				int64(kl.maxPods), resource.DecimalSI) | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		if node.Status.NodeInfo.BootID != "" && | 
					
						
							|  |  |  | 			node.Status.NodeInfo.BootID != info.BootID { | 
					
						
							|  |  |  | 			// TODO: This requires a transaction, either both node status is updated
 | 
					
						
							|  |  |  | 			// and event is recorded or neither should happen, see issue #6055.
 | 
					
						
							| 
									
										
										
										
											2016-11-19 04:50:58 +08:00
										 |  |  | 			kl.recorder.Eventf(kl.nodeRef, v1.EventTypeWarning, events.NodeRebooted, | 
					
						
							| 
									
										
										
										
											2016-07-21 06:08:47 +08:00
										 |  |  | 				"Node %s has been rebooted, boot id: %s", kl.nodeName, info.BootID) | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		node.Status.NodeInfo.BootID = info.BootID | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Set Allocatable.
 | 
					
						
							| 
									
										
										
										
											2017-02-10 13:14:10 +08:00
										 |  |  | 	if node.Status.Allocatable == nil { | 
					
						
							|  |  |  | 		node.Status.Allocatable = make(v1.ResourceList) | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2017-03-07 09:42:55 +08:00
										 |  |  | 	// Remove opaque integer resources from allocatable that are no longer
 | 
					
						
							|  |  |  | 	// present in capacity.
 | 
					
						
							|  |  |  | 	for k := range node.Status.Allocatable { | 
					
						
							|  |  |  | 		_, found := node.Status.Capacity[k] | 
					
						
							| 
									
										
										
										
											2017-04-14 04:19:08 +08:00
										 |  |  | 		if !found && v1helper.IsOpaqueIntResourceName(k) { | 
					
						
							| 
									
										
										
										
											2017-03-07 09:42:55 +08:00
										 |  |  | 			delete(node.Status.Allocatable, k) | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2017-02-10 13:14:10 +08:00
										 |  |  | 	allocatableReservation := kl.containerManager.GetNodeAllocatableReservation() | 
					
						
							| 
									
										
										
										
											2016-07-21 06:08:47 +08:00
										 |  |  | 	for k, v := range node.Status.Capacity { | 
					
						
							|  |  |  | 		value := *(v.Copy()) | 
					
						
							| 
									
										
										
										
											2017-02-10 13:14:10 +08:00
										 |  |  | 		if res, exists := allocatableReservation[k]; exists { | 
					
						
							|  |  |  | 			value.Sub(res) | 
					
						
							| 
									
										
										
										
											2016-07-21 06:08:47 +08:00
										 |  |  | 		} | 
					
						
							|  |  |  | 		node.Status.Allocatable[k] = value | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // Set versioninfo for the node.
 | 
					
						
							| 
									
										
										
										
											2016-11-19 04:50:58 +08:00
										 |  |  | func (kl *Kubelet) setNodeStatusVersionInfo(node *v1.Node) { | 
					
						
							| 
									
										
										
										
											2016-07-21 06:08:47 +08:00
										 |  |  | 	verinfo, err := kl.cadvisor.VersionInfo() | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		glog.Errorf("Error getting version info: %v", err) | 
					
						
							|  |  |  | 	} else { | 
					
						
							|  |  |  | 		node.Status.NodeInfo.KernelVersion = verinfo.KernelVersion | 
					
						
							|  |  |  | 		node.Status.NodeInfo.OSImage = verinfo.ContainerOsVersion | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		runtimeVersion := "Unknown" | 
					
						
							|  |  |  | 		if runtimeVer, err := kl.containerRuntime.Version(); err == nil { | 
					
						
							|  |  |  | 			runtimeVersion = runtimeVer.String() | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		node.Status.NodeInfo.ContainerRuntimeVersion = fmt.Sprintf("%s://%s", kl.containerRuntime.Type(), runtimeVersion) | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		node.Status.NodeInfo.KubeletVersion = version.Get().String() | 
					
						
							|  |  |  | 		// TODO: kube-proxy might be different version from kubelet in the future
 | 
					
						
							|  |  |  | 		node.Status.NodeInfo.KubeProxyVersion = version.Get().String() | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // Set daemonEndpoints for the node.
 | 
					
						
							| 
									
										
										
										
											2016-11-19 04:50:58 +08:00
										 |  |  | func (kl *Kubelet) setNodeStatusDaemonEndpoints(node *v1.Node) { | 
					
						
							| 
									
										
										
										
											2016-07-21 06:08:47 +08:00
										 |  |  | 	node.Status.DaemonEndpoints = *kl.daemonEndpoints | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // Set images list for the node
 | 
					
						
							| 
									
										
										
										
											2016-11-19 04:50:58 +08:00
										 |  |  | func (kl *Kubelet) setNodeStatusImages(node *v1.Node) { | 
					
						
							| 
									
										
										
										
											2016-07-21 06:08:47 +08:00
										 |  |  | 	// Update image list of this node
 | 
					
						
							| 
									
										
										
										
											2016-11-19 04:50:58 +08:00
										 |  |  | 	var imagesOnNode []v1.ContainerImage | 
					
						
							| 
									
										
										
										
											2016-07-21 06:08:47 +08:00
										 |  |  | 	containerImages, err := kl.imageManager.GetImageList() | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		glog.Errorf("Error getting image list: %v", err) | 
					
						
							|  |  |  | 	} else { | 
					
						
							|  |  |  | 		// sort the images from max to min, and only set top N images into the node status.
 | 
					
						
							| 
									
										
										
										
											2016-08-23 04:38:36 +08:00
										 |  |  | 		sort.Sort(sliceutils.ByImageSize(containerImages)) | 
					
						
							| 
									
										
										
										
											2016-07-21 06:08:47 +08:00
										 |  |  | 		if maxImagesInNodeStatus < len(containerImages) { | 
					
						
							|  |  |  | 			containerImages = containerImages[0:maxImagesInNodeStatus] | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 		for _, image := range containerImages { | 
					
						
							| 
									
										
										
										
											2016-09-17 06:16:08 +08:00
										 |  |  | 			names := append(image.RepoDigests, image.RepoTags...) | 
					
						
							|  |  |  | 			// Report up to maxNamesPerImageInNodeStatus names per image.
 | 
					
						
							|  |  |  | 			if len(names) > maxNamesPerImageInNodeStatus { | 
					
						
							|  |  |  | 				names = names[0:maxNamesPerImageInNodeStatus] | 
					
						
							|  |  |  | 			} | 
					
						
							| 
									
										
										
										
											2016-11-19 04:50:58 +08:00
										 |  |  | 			imagesOnNode = append(imagesOnNode, v1.ContainerImage{ | 
					
						
							| 
									
										
										
										
											2016-09-17 06:16:08 +08:00
										 |  |  | 				Names:     names, | 
					
						
							| 
									
										
										
										
											2016-07-21 06:08:47 +08:00
										 |  |  | 				SizeBytes: image.Size, | 
					
						
							|  |  |  | 			}) | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	node.Status.Images = imagesOnNode | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // Set the GOOS and GOARCH for this node
 | 
					
						
							| 
									
										
										
										
											2016-11-19 04:50:58 +08:00
										 |  |  | func (kl *Kubelet) setNodeStatusGoRuntime(node *v1.Node) { | 
					
						
							| 
									
										
										
										
											2016-11-30 15:27:27 +08:00
										 |  |  | 	node.Status.NodeInfo.OperatingSystem = goruntime.GOOS | 
					
						
							|  |  |  | 	node.Status.NodeInfo.Architecture = goruntime.GOARCH | 
					
						
							| 
									
										
										
										
											2016-07-21 06:08:47 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // Set status for the node.
 | 
					
						
							| 
									
										
										
										
											2016-11-19 04:50:58 +08:00
										 |  |  | func (kl *Kubelet) setNodeStatusInfo(node *v1.Node) { | 
					
						
							| 
									
										
										
										
											2016-07-21 06:08:47 +08:00
										 |  |  | 	kl.setNodeStatusMachineInfo(node) | 
					
						
							|  |  |  | 	kl.setNodeStatusVersionInfo(node) | 
					
						
							|  |  |  | 	kl.setNodeStatusDaemonEndpoints(node) | 
					
						
							|  |  |  | 	kl.setNodeStatusImages(node) | 
					
						
							|  |  |  | 	kl.setNodeStatusGoRuntime(node) | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // Set Ready condition for the node.
 | 
					
						
							| 
									
										
										
										
											2016-11-19 04:50:58 +08:00
										 |  |  | func (kl *Kubelet) setNodeReadyCondition(node *v1.Node) { | 
					
						
							| 
									
										
										
										
											2016-07-21 06:08:47 +08:00
										 |  |  | 	// NOTE(aaronlevy): NodeReady condition needs to be the last in the list of node conditions.
 | 
					
						
							|  |  |  | 	// This is due to an issue with version skewed kubelet and master components.
 | 
					
						
							|  |  |  | 	// ref: https://github.com/kubernetes/kubernetes/issues/16961
 | 
					
						
							| 
									
										
										
										
											2016-12-04 02:57:26 +08:00
										 |  |  | 	currentTime := metav1.NewTime(kl.clock.Now()) | 
					
						
							| 
									
										
										
										
											2016-11-19 04:50:58 +08:00
										 |  |  | 	var newNodeReadyCondition v1.NodeCondition | 
					
						
							| 
									
										
										
										
											2016-09-23 11:04:37 +08:00
										 |  |  | 	rs := append(kl.runtimeState.runtimeErrors(), kl.runtimeState.networkErrors()...) | 
					
						
							|  |  |  | 	if len(rs) == 0 { | 
					
						
							| 
									
										
										
										
											2016-11-19 04:50:58 +08:00
										 |  |  | 		newNodeReadyCondition = v1.NodeCondition{ | 
					
						
							|  |  |  | 			Type:              v1.NodeReady, | 
					
						
							|  |  |  | 			Status:            v1.ConditionTrue, | 
					
						
							| 
									
										
										
										
											2016-07-21 06:08:47 +08:00
										 |  |  | 			Reason:            "KubeletReady", | 
					
						
							|  |  |  | 			Message:           "kubelet is posting ready status", | 
					
						
							|  |  |  | 			LastHeartbeatTime: currentTime, | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} else { | 
					
						
							| 
									
										
										
										
											2016-11-19 04:50:58 +08:00
										 |  |  | 		newNodeReadyCondition = v1.NodeCondition{ | 
					
						
							|  |  |  | 			Type:              v1.NodeReady, | 
					
						
							|  |  |  | 			Status:            v1.ConditionFalse, | 
					
						
							| 
									
										
										
										
											2016-07-21 06:08:47 +08:00
										 |  |  | 			Reason:            "KubeletNotReady", | 
					
						
							|  |  |  | 			Message:           strings.Join(rs, ","), | 
					
						
							|  |  |  | 			LastHeartbeatTime: currentTime, | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-08-30 08:54:15 +08:00
										 |  |  | 	// Append AppArmor status if it's enabled.
 | 
					
						
							|  |  |  | 	// TODO(timstclair): This is a temporary message until node feature reporting is added.
 | 
					
						
							| 
									
										
										
										
											2016-11-19 04:50:58 +08:00
										 |  |  | 	if newNodeReadyCondition.Status == v1.ConditionTrue && | 
					
						
							| 
									
										
										
										
											2016-08-30 08:54:15 +08:00
										 |  |  | 		kl.appArmorValidator != nil && kl.appArmorValidator.ValidateHost() == nil { | 
					
						
							|  |  |  | 		newNodeReadyCondition.Message = fmt.Sprintf("%s. AppArmor enabled", newNodeReadyCondition.Message) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-07-21 06:08:47 +08:00
										 |  |  | 	// Record any soft requirements that were not met in the container manager.
 | 
					
						
							|  |  |  | 	status := kl.containerManager.Status() | 
					
						
							|  |  |  | 	if status.SoftRequirements != nil { | 
					
						
							|  |  |  | 		newNodeReadyCondition.Message = fmt.Sprintf("%s. WARNING: %s", newNodeReadyCondition.Message, status.SoftRequirements.Error()) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	readyConditionUpdated := false | 
					
						
							|  |  |  | 	needToRecordEvent := false | 
					
						
							|  |  |  | 	for i := range node.Status.Conditions { | 
					
						
							| 
									
										
										
										
											2016-11-19 04:50:58 +08:00
										 |  |  | 		if node.Status.Conditions[i].Type == v1.NodeReady { | 
					
						
							| 
									
										
										
										
											2016-07-21 06:08:47 +08:00
										 |  |  | 			if node.Status.Conditions[i].Status == newNodeReadyCondition.Status { | 
					
						
							|  |  |  | 				newNodeReadyCondition.LastTransitionTime = node.Status.Conditions[i].LastTransitionTime | 
					
						
							|  |  |  | 			} else { | 
					
						
							|  |  |  | 				newNodeReadyCondition.LastTransitionTime = currentTime | 
					
						
							|  |  |  | 				needToRecordEvent = true | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 			node.Status.Conditions[i] = newNodeReadyCondition | 
					
						
							|  |  |  | 			readyConditionUpdated = true | 
					
						
							|  |  |  | 			break | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	if !readyConditionUpdated { | 
					
						
							|  |  |  | 		newNodeReadyCondition.LastTransitionTime = currentTime | 
					
						
							|  |  |  | 		node.Status.Conditions = append(node.Status.Conditions, newNodeReadyCondition) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	if needToRecordEvent { | 
					
						
							| 
									
										
										
										
											2016-11-19 04:50:58 +08:00
										 |  |  | 		if newNodeReadyCondition.Status == v1.ConditionTrue { | 
					
						
							|  |  |  | 			kl.recordNodeStatusEvent(v1.EventTypeNormal, events.NodeReady) | 
					
						
							| 
									
										
										
										
											2016-07-21 06:08:47 +08:00
										 |  |  | 		} else { | 
					
						
							| 
									
										
										
										
											2016-11-19 04:50:58 +08:00
										 |  |  | 			kl.recordNodeStatusEvent(v1.EventTypeNormal, events.NodeNotReady) | 
					
						
							| 
									
										
										
										
											2016-12-01 03:48:38 +08:00
										 |  |  | 			glog.Infof("Node became not ready: %+v", newNodeReadyCondition) | 
					
						
							| 
									
										
										
										
											2016-07-21 06:08:47 +08:00
										 |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // setNodeMemoryPressureCondition for the node.
 | 
					
						
							|  |  |  | // TODO: this needs to move somewhere centralized...
 | 
					
						
							| 
									
										
										
										
											2016-11-19 04:50:58 +08:00
										 |  |  | func (kl *Kubelet) setNodeMemoryPressureCondition(node *v1.Node) { | 
					
						
							| 
									
										
										
										
											2016-12-04 02:57:26 +08:00
										 |  |  | 	currentTime := metav1.NewTime(kl.clock.Now()) | 
					
						
							| 
									
										
										
										
											2016-11-19 04:50:58 +08:00
										 |  |  | 	var condition *v1.NodeCondition | 
					
						
							| 
									
										
										
										
											2016-07-21 06:08:47 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	// Check if NodeMemoryPressure condition already exists and if it does, just pick it up for update.
 | 
					
						
							|  |  |  | 	for i := range node.Status.Conditions { | 
					
						
							| 
									
										
										
										
											2016-11-19 04:50:58 +08:00
										 |  |  | 		if node.Status.Conditions[i].Type == v1.NodeMemoryPressure { | 
					
						
							| 
									
										
										
										
											2016-07-21 06:08:47 +08:00
										 |  |  | 			condition = &node.Status.Conditions[i] | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	newCondition := false | 
					
						
							|  |  |  | 	// If the NodeMemoryPressure condition doesn't exist, create one
 | 
					
						
							|  |  |  | 	if condition == nil { | 
					
						
							| 
									
										
										
										
											2016-11-19 04:50:58 +08:00
										 |  |  | 		condition = &v1.NodeCondition{ | 
					
						
							|  |  |  | 			Type:   v1.NodeMemoryPressure, | 
					
						
							|  |  |  | 			Status: v1.ConditionUnknown, | 
					
						
							| 
									
										
										
										
											2016-07-21 06:08:47 +08:00
										 |  |  | 		} | 
					
						
							|  |  |  | 		// cannot be appended to node.Status.Conditions here because it gets
 | 
					
						
							|  |  |  | 		// copied to the slice. So if we append to the slice here none of the
 | 
					
						
							|  |  |  | 		// updates we make below are reflected in the slice.
 | 
					
						
							|  |  |  | 		newCondition = true | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Update the heartbeat time
 | 
					
						
							|  |  |  | 	condition.LastHeartbeatTime = currentTime | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Note: The conditions below take care of the case when a new NodeMemoryPressure condition is
 | 
					
						
							|  |  |  | 	// created and as well as the case when the condition already exists. When a new condition
 | 
					
						
							| 
									
										
										
										
											2016-11-19 04:50:58 +08:00
										 |  |  | 	// is created its status is set to v1.ConditionUnknown which matches either
 | 
					
						
							|  |  |  | 	// condition.Status != v1.ConditionTrue or
 | 
					
						
							|  |  |  | 	// condition.Status != v1.ConditionFalse in the conditions below depending on whether
 | 
					
						
							| 
									
										
										
										
											2016-07-21 06:08:47 +08:00
										 |  |  | 	// the kubelet is under memory pressure or not.
 | 
					
						
							|  |  |  | 	if kl.evictionManager.IsUnderMemoryPressure() { | 
					
						
							| 
									
										
										
										
											2016-11-19 04:50:58 +08:00
										 |  |  | 		if condition.Status != v1.ConditionTrue { | 
					
						
							|  |  |  | 			condition.Status = v1.ConditionTrue | 
					
						
							| 
									
										
										
										
											2016-07-21 06:08:47 +08:00
										 |  |  | 			condition.Reason = "KubeletHasInsufficientMemory" | 
					
						
							|  |  |  | 			condition.Message = "kubelet has insufficient memory available" | 
					
						
							|  |  |  | 			condition.LastTransitionTime = currentTime | 
					
						
							| 
									
										
										
										
											2016-11-19 04:50:58 +08:00
										 |  |  | 			kl.recordNodeStatusEvent(v1.EventTypeNormal, "NodeHasInsufficientMemory") | 
					
						
							| 
									
										
										
										
											2016-07-21 06:08:47 +08:00
										 |  |  | 		} | 
					
						
							|  |  |  | 	} else { | 
					
						
							| 
									
										
										
										
											2016-11-19 04:50:58 +08:00
										 |  |  | 		if condition.Status != v1.ConditionFalse { | 
					
						
							|  |  |  | 			condition.Status = v1.ConditionFalse | 
					
						
							| 
									
										
										
										
											2016-07-21 06:08:47 +08:00
										 |  |  | 			condition.Reason = "KubeletHasSufficientMemory" | 
					
						
							|  |  |  | 			condition.Message = "kubelet has sufficient memory available" | 
					
						
							|  |  |  | 			condition.LastTransitionTime = currentTime | 
					
						
							| 
									
										
										
										
											2016-11-19 04:50:58 +08:00
										 |  |  | 			kl.recordNodeStatusEvent(v1.EventTypeNormal, "NodeHasSufficientMemory") | 
					
						
							| 
									
										
										
										
											2016-07-21 06:08:47 +08:00
										 |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	if newCondition { | 
					
						
							|  |  |  | 		node.Status.Conditions = append(node.Status.Conditions, *condition) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-07-23 03:23:09 +08:00
										 |  |  | // setNodeDiskPressureCondition for the node.
 | 
					
						
							|  |  |  | // TODO: this needs to move somewhere centralized...
 | 
					
						
							| 
									
										
										
										
											2016-11-19 04:50:58 +08:00
										 |  |  | func (kl *Kubelet) setNodeDiskPressureCondition(node *v1.Node) { | 
					
						
							| 
									
										
										
										
											2016-12-04 02:57:26 +08:00
										 |  |  | 	currentTime := metav1.NewTime(kl.clock.Now()) | 
					
						
							| 
									
										
										
										
											2016-11-19 04:50:58 +08:00
										 |  |  | 	var condition *v1.NodeCondition | 
					
						
							| 
									
										
										
										
											2016-07-23 03:23:09 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	// Check if NodeDiskPressure condition already exists and if it does, just pick it up for update.
 | 
					
						
							|  |  |  | 	for i := range node.Status.Conditions { | 
					
						
							| 
									
										
										
										
											2016-11-19 04:50:58 +08:00
										 |  |  | 		if node.Status.Conditions[i].Type == v1.NodeDiskPressure { | 
					
						
							| 
									
										
										
										
											2016-07-23 03:23:09 +08:00
										 |  |  | 			condition = &node.Status.Conditions[i] | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	newCondition := false | 
					
						
							|  |  |  | 	// If the NodeDiskPressure condition doesn't exist, create one
 | 
					
						
							|  |  |  | 	if condition == nil { | 
					
						
							| 
									
										
										
										
											2016-11-19 04:50:58 +08:00
										 |  |  | 		condition = &v1.NodeCondition{ | 
					
						
							|  |  |  | 			Type:   v1.NodeDiskPressure, | 
					
						
							|  |  |  | 			Status: v1.ConditionUnknown, | 
					
						
							| 
									
										
										
										
											2016-07-23 03:23:09 +08:00
										 |  |  | 		} | 
					
						
							|  |  |  | 		// cannot be appended to node.Status.Conditions here because it gets
 | 
					
						
							|  |  |  | 		// copied to the slice. So if we append to the slice here none of the
 | 
					
						
							|  |  |  | 		// updates we make below are reflected in the slice.
 | 
					
						
							|  |  |  | 		newCondition = true | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Update the heartbeat time
 | 
					
						
							|  |  |  | 	condition.LastHeartbeatTime = currentTime | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-12-28 20:17:27 +08:00
										 |  |  | 	// Note: The conditions below take care of the case when a new NodeDiskPressure condition is
 | 
					
						
							| 
									
										
										
										
											2016-07-23 03:23:09 +08:00
										 |  |  | 	// created and as well as the case when the condition already exists. When a new condition
 | 
					
						
							| 
									
										
										
										
											2016-11-19 04:50:58 +08:00
										 |  |  | 	// is created its status is set to v1.ConditionUnknown which matches either
 | 
					
						
							|  |  |  | 	// condition.Status != v1.ConditionTrue or
 | 
					
						
							|  |  |  | 	// condition.Status != v1.ConditionFalse in the conditions below depending on whether
 | 
					
						
							| 
									
										
										
										
											2016-07-23 03:23:09 +08:00
										 |  |  | 	// the kubelet is under disk pressure or not.
 | 
					
						
							|  |  |  | 	if kl.evictionManager.IsUnderDiskPressure() { | 
					
						
							| 
									
										
										
										
											2016-11-19 04:50:58 +08:00
										 |  |  | 		if condition.Status != v1.ConditionTrue { | 
					
						
							|  |  |  | 			condition.Status = v1.ConditionTrue | 
					
						
							| 
									
										
										
										
											2016-07-23 03:23:09 +08:00
										 |  |  | 			condition.Reason = "KubeletHasDiskPressure" | 
					
						
							|  |  |  | 			condition.Message = "kubelet has disk pressure" | 
					
						
							|  |  |  | 			condition.LastTransitionTime = currentTime | 
					
						
							| 
									
										
										
										
											2016-11-19 04:50:58 +08:00
										 |  |  | 			kl.recordNodeStatusEvent(v1.EventTypeNormal, "NodeHasDiskPressure") | 
					
						
							| 
									
										
										
										
											2016-07-23 03:23:09 +08:00
										 |  |  | 		} | 
					
						
							|  |  |  | 	} else { | 
					
						
							| 
									
										
										
										
											2016-11-19 04:50:58 +08:00
										 |  |  | 		if condition.Status != v1.ConditionFalse { | 
					
						
							|  |  |  | 			condition.Status = v1.ConditionFalse | 
					
						
							| 
									
										
										
										
											2016-07-23 03:23:09 +08:00
										 |  |  | 			condition.Reason = "KubeletHasNoDiskPressure" | 
					
						
							|  |  |  | 			condition.Message = "kubelet has no disk pressure" | 
					
						
							|  |  |  | 			condition.LastTransitionTime = currentTime | 
					
						
							| 
									
										
										
										
											2016-11-19 04:50:58 +08:00
										 |  |  | 			kl.recordNodeStatusEvent(v1.EventTypeNormal, "NodeHasNoDiskPressure") | 
					
						
							| 
									
										
										
										
											2016-07-23 03:23:09 +08:00
										 |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	if newCondition { | 
					
						
							|  |  |  | 		node.Status.Conditions = append(node.Status.Conditions, *condition) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-12-28 20:17:27 +08:00
										 |  |  | // Set OODCondition for the node.
 | 
					
						
							| 
									
										
										
										
											2016-11-19 04:50:58 +08:00
										 |  |  | func (kl *Kubelet) setNodeOODCondition(node *v1.Node) { | 
					
						
							| 
									
										
										
										
											2016-12-04 02:57:26 +08:00
										 |  |  | 	currentTime := metav1.NewTime(kl.clock.Now()) | 
					
						
							| 
									
										
										
										
											2016-11-19 04:50:58 +08:00
										 |  |  | 	var nodeOODCondition *v1.NodeCondition | 
					
						
							| 
									
										
										
										
											2016-07-21 06:08:47 +08:00
										 |  |  | 
 | 
					
						
							|  |  |  | 	// Check if NodeOutOfDisk condition already exists and if it does, just pick it up for update.
 | 
					
						
							|  |  |  | 	for i := range node.Status.Conditions { | 
					
						
							| 
									
										
										
										
											2016-11-19 04:50:58 +08:00
										 |  |  | 		if node.Status.Conditions[i].Type == v1.NodeOutOfDisk { | 
					
						
							| 
									
										
										
										
											2016-07-21 06:08:47 +08:00
										 |  |  | 			nodeOODCondition = &node.Status.Conditions[i] | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	newOODCondition := false | 
					
						
							|  |  |  | 	// If the NodeOutOfDisk condition doesn't exist, create one.
 | 
					
						
							|  |  |  | 	if nodeOODCondition == nil { | 
					
						
							| 
									
										
										
										
											2016-11-19 04:50:58 +08:00
										 |  |  | 		nodeOODCondition = &v1.NodeCondition{ | 
					
						
							|  |  |  | 			Type:   v1.NodeOutOfDisk, | 
					
						
							|  |  |  | 			Status: v1.ConditionUnknown, | 
					
						
							| 
									
										
										
										
											2016-07-21 06:08:47 +08:00
										 |  |  | 		} | 
					
						
							|  |  |  | 		// nodeOODCondition cannot be appended to node.Status.Conditions here because it gets
 | 
					
						
							|  |  |  | 		// copied to the slice. So if we append nodeOODCondition to the slice here none of the
 | 
					
						
							|  |  |  | 		// updates we make to nodeOODCondition below are reflected in the slice.
 | 
					
						
							|  |  |  | 		newOODCondition = true | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Update the heartbeat time irrespective of all the conditions.
 | 
					
						
							|  |  |  | 	nodeOODCondition.LastHeartbeatTime = currentTime | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Note: The conditions below take care of the case when a new NodeOutOfDisk condition is
 | 
					
						
							|  |  |  | 	// created and as well as the case when the condition already exists. When a new condition
 | 
					
						
							| 
									
										
										
										
											2016-11-19 04:50:58 +08:00
										 |  |  | 	// is created its status is set to v1.ConditionUnknown which matches either
 | 
					
						
							|  |  |  | 	// nodeOODCondition.Status != v1.ConditionTrue or
 | 
					
						
							|  |  |  | 	// nodeOODCondition.Status != v1.ConditionFalse in the conditions below depending on whether
 | 
					
						
							| 
									
										
										
										
											2016-07-21 06:08:47 +08:00
										 |  |  | 	// the kubelet is out of disk or not.
 | 
					
						
							|  |  |  | 	if kl.isOutOfDisk() { | 
					
						
							| 
									
										
										
										
											2016-11-19 04:50:58 +08:00
										 |  |  | 		if nodeOODCondition.Status != v1.ConditionTrue { | 
					
						
							|  |  |  | 			nodeOODCondition.Status = v1.ConditionTrue | 
					
						
							| 
									
										
										
										
											2016-07-21 06:08:47 +08:00
										 |  |  | 			nodeOODCondition.Reason = "KubeletOutOfDisk" | 
					
						
							|  |  |  | 			nodeOODCondition.Message = "out of disk space" | 
					
						
							|  |  |  | 			nodeOODCondition.LastTransitionTime = currentTime | 
					
						
							| 
									
										
										
										
											2016-11-19 04:50:58 +08:00
										 |  |  | 			kl.recordNodeStatusEvent(v1.EventTypeNormal, "NodeOutOfDisk") | 
					
						
							| 
									
										
										
										
											2016-07-21 06:08:47 +08:00
										 |  |  | 		} | 
					
						
							|  |  |  | 	} else { | 
					
						
							| 
									
										
										
										
											2016-11-19 04:50:58 +08:00
										 |  |  | 		if nodeOODCondition.Status != v1.ConditionFalse { | 
					
						
							| 
									
										
										
										
											2016-07-21 06:08:47 +08:00
										 |  |  | 			// Update the out of disk condition when the condition status is unknown even if we
 | 
					
						
							|  |  |  | 			// are within the outOfDiskTransitionFrequency duration. We do this to set the
 | 
					
						
							|  |  |  | 			// condition status correctly at kubelet startup.
 | 
					
						
							| 
									
										
										
										
											2016-11-19 04:50:58 +08:00
										 |  |  | 			if nodeOODCondition.Status == v1.ConditionUnknown || kl.clock.Since(nodeOODCondition.LastTransitionTime.Time) >= kl.outOfDiskTransitionFrequency { | 
					
						
							|  |  |  | 				nodeOODCondition.Status = v1.ConditionFalse | 
					
						
							| 
									
										
										
										
											2016-07-21 06:08:47 +08:00
										 |  |  | 				nodeOODCondition.Reason = "KubeletHasSufficientDisk" | 
					
						
							|  |  |  | 				nodeOODCondition.Message = "kubelet has sufficient disk space available" | 
					
						
							|  |  |  | 				nodeOODCondition.LastTransitionTime = currentTime | 
					
						
							| 
									
										
										
										
											2016-11-19 04:50:58 +08:00
										 |  |  | 				kl.recordNodeStatusEvent(v1.EventTypeNormal, "NodeHasSufficientDisk") | 
					
						
							| 
									
										
										
										
											2016-07-21 06:08:47 +08:00
										 |  |  | 			} else { | 
					
						
							|  |  |  | 				glog.Infof("Node condition status for OutOfDisk is false, but last transition time is less than %s", kl.outOfDiskTransitionFrequency) | 
					
						
							|  |  |  | 			} | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	if newOODCondition { | 
					
						
							|  |  |  | 		node.Status.Conditions = append(node.Status.Conditions, *nodeOODCondition) | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // Maintains Node.Spec.Unschedulable value from previous run of tryUpdateNodeStatus()
 | 
					
						
							|  |  |  | // TODO: why is this a package var?
 | 
					
						
							|  |  |  | var oldNodeUnschedulable bool | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // record if node schedulable change.
 | 
					
						
							| 
									
										
										
										
											2016-11-19 04:50:58 +08:00
										 |  |  | func (kl *Kubelet) recordNodeSchedulableEvent(node *v1.Node) { | 
					
						
							| 
									
										
										
										
											2016-07-21 06:08:47 +08:00
										 |  |  | 	if oldNodeUnschedulable != node.Spec.Unschedulable { | 
					
						
							|  |  |  | 		if node.Spec.Unschedulable { | 
					
						
							| 
									
										
										
										
											2016-11-19 04:50:58 +08:00
										 |  |  | 			kl.recordNodeStatusEvent(v1.EventTypeNormal, events.NodeNotSchedulable) | 
					
						
							| 
									
										
										
										
											2016-07-21 06:08:47 +08:00
										 |  |  | 		} else { | 
					
						
							| 
									
										
										
										
											2016-11-19 04:50:58 +08:00
										 |  |  | 			kl.recordNodeStatusEvent(v1.EventTypeNormal, events.NodeSchedulable) | 
					
						
							| 
									
										
										
										
											2016-07-21 06:08:47 +08:00
										 |  |  | 		} | 
					
						
							|  |  |  | 		oldNodeUnschedulable = node.Spec.Unschedulable | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-09-28 07:12:57 +08:00
										 |  |  | // Update VolumesInUse field in Node Status only after states are synced up at least once
 | 
					
						
							|  |  |  | // in volume reconciler.
 | 
					
						
							| 
									
										
										
										
											2016-11-19 04:50:58 +08:00
										 |  |  | func (kl *Kubelet) setNodeVolumesInUseStatus(node *v1.Node) { | 
					
						
							| 
									
										
										
										
											2016-09-28 07:12:57 +08:00
										 |  |  | 	// Make sure to only update node status after reconciler starts syncing up states
 | 
					
						
							|  |  |  | 	if kl.volumeManager.ReconcilerStatesHasBeenSynced() { | 
					
						
							|  |  |  | 		node.Status.VolumesInUse = kl.volumeManager.GetVolumesInUse() | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2016-07-21 06:08:47 +08:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // setNodeStatus fills in the Status fields of the given Node, overwriting
 | 
					
						
							|  |  |  | // any fields that are currently set.
 | 
					
						
							|  |  |  | // TODO(madhusudancs): Simplify the logic for setting node conditions and
 | 
					
						
							| 
									
										
										
										
											2016-08-03 06:13:54 +08:00
										 |  |  | // refactor the node status condition code out to a different file.
 | 
					
						
							| 
									
										
										
										
											2016-12-02 03:12:32 +08:00
										 |  |  | func (kl *Kubelet) setNodeStatus(node *v1.Node) { | 
					
						
							| 
									
										
										
										
											2016-07-21 06:08:47 +08:00
										 |  |  | 	for _, f := range kl.setNodeStatusFuncs { | 
					
						
							|  |  |  | 		if err := f(node); err != nil { | 
					
						
							| 
									
										
										
										
											2016-12-02 03:12:32 +08:00
										 |  |  | 			glog.Warningf("Failed to set some node status fields: %s", err) | 
					
						
							| 
									
										
										
										
											2016-07-21 06:08:47 +08:00
										 |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | // defaultNodeStatusFuncs is a factory that generates the default set of
 | 
					
						
							|  |  |  | // setNodeStatus funcs
 | 
					
						
							| 
									
										
										
										
											2016-11-19 04:50:58 +08:00
										 |  |  | func (kl *Kubelet) defaultNodeStatusFuncs() []func(*v1.Node) error { | 
					
						
							| 
									
										
										
										
											2016-07-21 06:08:47 +08:00
										 |  |  | 	// initial set of node status update handlers, can be modified by Option's
 | 
					
						
							| 
									
										
										
										
											2016-11-19 04:50:58 +08:00
										 |  |  | 	withoutError := func(f func(*v1.Node)) func(*v1.Node) error { | 
					
						
							|  |  |  | 		return func(n *v1.Node) error { | 
					
						
							| 
									
										
										
										
											2016-07-21 06:08:47 +08:00
										 |  |  | 			f(n) | 
					
						
							|  |  |  | 			return nil | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							| 
									
										
										
										
											2016-11-19 04:50:58 +08:00
										 |  |  | 	return []func(*v1.Node) error{ | 
					
						
							| 
									
										
										
										
											2016-07-21 06:08:47 +08:00
										 |  |  | 		kl.setNodeAddress, | 
					
						
							|  |  |  | 		withoutError(kl.setNodeStatusInfo), | 
					
						
							|  |  |  | 		withoutError(kl.setNodeOODCondition), | 
					
						
							|  |  |  | 		withoutError(kl.setNodeMemoryPressureCondition), | 
					
						
							| 
									
										
										
										
											2016-07-23 03:23:09 +08:00
										 |  |  | 		withoutError(kl.setNodeDiskPressureCondition), | 
					
						
							| 
									
										
										
										
											2016-07-21 06:08:47 +08:00
										 |  |  | 		withoutError(kl.setNodeReadyCondition), | 
					
						
							|  |  |  | 		withoutError(kl.setNodeVolumesInUseStatus), | 
					
						
							|  |  |  | 		withoutError(kl.recordNodeSchedulableEvent), | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-10-01 05:07:13 +08:00
										 |  |  | // Validate given node IP belongs to the current host
 | 
					
						
							|  |  |  | func (kl *Kubelet) validateNodeIP() error { | 
					
						
							|  |  |  | 	if kl.nodeIP == nil { | 
					
						
							|  |  |  | 		return nil | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	// Honor IP limitations set in setNodeStatus()
 | 
					
						
							|  |  |  | 	if kl.nodeIP.IsLoopback() { | 
					
						
							|  |  |  | 		return fmt.Errorf("nodeIP can't be loopback address") | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	if kl.nodeIP.To4() == nil { | 
					
						
							|  |  |  | 		return fmt.Errorf("nodeIP must be IPv4 address") | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | 	addrs, err := net.InterfaceAddrs() | 
					
						
							|  |  |  | 	if err != nil { | 
					
						
							|  |  |  | 		return err | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	for _, addr := range addrs { | 
					
						
							|  |  |  | 		var ip net.IP | 
					
						
							|  |  |  | 		switch v := addr.(type) { | 
					
						
							|  |  |  | 		case *net.IPNet: | 
					
						
							|  |  |  | 			ip = v.IP | 
					
						
							|  |  |  | 		case *net.IPAddr: | 
					
						
							|  |  |  | 			ip = v.IP | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 		if ip != nil && ip.Equal(kl.nodeIP) { | 
					
						
							|  |  |  | 			return nil | 
					
						
							|  |  |  | 		} | 
					
						
							|  |  |  | 	} | 
					
						
							|  |  |  | 	return fmt.Errorf("Node IP: %q not found in the host's network interfaces", kl.nodeIP.String()) | 
					
						
							|  |  |  | } |