mirror of
				https://github.com/optim-enterprises-bv/kubernetes.git
				synced 2025-11-03 19:58:17 +00:00 
			
		
		
		
	Corrected some spelling and grammatical errors
Signed-off-by: Daniel Hu <farmer.hutao@outlook.com>
This commit is contained in:
		@@ -63,7 +63,7 @@ type DataV1 struct {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// NewV1 returns an instance of Checkpoint, in V1 (k8s <= 1.19) format.
 | 
			
		||||
// Users should avoid creating checkpoints in formats different than the most recent one,
 | 
			
		||||
// Users should avoid creating checkpoints in formats different from the most recent one,
 | 
			
		||||
// use the old formats only to validate existing checkpoint and convert them to most recent
 | 
			
		||||
// format. The only exception should be test code.
 | 
			
		||||
func NewV1(devEntries []PodDevicesEntryV1,
 | 
			
		||||
 
 | 
			
		||||
@@ -74,10 +74,10 @@ type ManagerImpl struct {
 | 
			
		||||
	// allDevices holds all the devices currently registered to the device manager
 | 
			
		||||
	allDevices ResourceDeviceInstances
 | 
			
		||||
 | 
			
		||||
	// healthyDevices contains all of the registered healthy resourceNames and their exported device IDs.
 | 
			
		||||
	// healthyDevices contains all the registered healthy resourceNames and their exported device IDs.
 | 
			
		||||
	healthyDevices map[string]sets.Set[string]
 | 
			
		||||
 | 
			
		||||
	// unhealthyDevices contains all of the unhealthy devices and their exported device IDs.
 | 
			
		||||
	// unhealthyDevices contains all the unhealthy devices and their exported device IDs.
 | 
			
		||||
	unhealthyDevices map[string]sets.Set[string]
 | 
			
		||||
 | 
			
		||||
	// allocatedDevices contains allocated deviceIds, keyed by resourceName.
 | 
			
		||||
@@ -90,7 +90,7 @@ type ManagerImpl struct {
 | 
			
		||||
	// List of NUMA Nodes available on the underlying machine
 | 
			
		||||
	numaNodes []int
 | 
			
		||||
 | 
			
		||||
	// Store of Topology Affinties that the Device Manager can query.
 | 
			
		||||
	// Store of Topology Affinities that the Device Manager can query.
 | 
			
		||||
	topologyAffinityStore topologymanager.Store
 | 
			
		||||
 | 
			
		||||
	// devicesToReuse contains devices that can be reused as they have been allocated to
 | 
			
		||||
@@ -576,7 +576,7 @@ func (m *ManagerImpl) devicesToAllocate(podUID, contName, resource string, requi
 | 
			
		||||
	//    note: if we get this far the runtime is surely running. This is usually enforced at OS level by startup system services dependencies.
 | 
			
		||||
 | 
			
		||||
	// First we take care of the exceptional flow (scenarios 2 and 3). In both flows, kubelet is reinitializing, and while kubelet is initializing, sources are NOT all ready.
 | 
			
		||||
	// Is this a simple kubelet restart (scenario 2)? To distinguish, we use the informations we got for runtime. If we are asked to allocate devices for containers reported
 | 
			
		||||
	// Is this a simple kubelet restart (scenario 2)? To distinguish, we use the information we got for runtime. If we are asked to allocate devices for containers reported
 | 
			
		||||
	// running, then it can only be a kubelet restart. On node reboot the runtime and the containers were also shut down. Then, if the container was running, it can only be
 | 
			
		||||
	// because it already has access to all the required devices, so we got nothing to do and we can bail out.
 | 
			
		||||
	if !m.sourcesReady.AllReady() && m.isContainerAlreadyRunning(podUID, contName) {
 | 
			
		||||
@@ -676,7 +676,7 @@ func (m *ManagerImpl) devicesToAllocate(podUID, contName, resource string, requi
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// If we can't allocate all remaining devices from the set of aligned ones,
 | 
			
		||||
	// then start by first allocating all of the  aligned devices (to ensure
 | 
			
		||||
	// then start by first allocating all the aligned devices (to ensure
 | 
			
		||||
	// that the alignment guaranteed by the TopologyManager is honored).
 | 
			
		||||
	if allocateRemainingFrom(aligned) {
 | 
			
		||||
		return allocated, nil
 | 
			
		||||
@@ -734,7 +734,7 @@ func (m *ManagerImpl) filterByAffinity(podUID, contName, resource string, availa
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Get a flat list of all of the nodes associated with available devices.
 | 
			
		||||
	// Get a flat list of all the nodes associated with available devices.
 | 
			
		||||
	var nodes []int
 | 
			
		||||
	for node := range perNodeDevices {
 | 
			
		||||
		nodes = append(nodes, node)
 | 
			
		||||
 
 | 
			
		||||
@@ -89,7 +89,7 @@ func (s *server) Start() error {
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// For now we leave cleanup of the *entire* directory up to the Handler
 | 
			
		||||
	// For now, we leave cleanup of the *entire* directory up to the Handler
 | 
			
		||||
	// (even though we should in theory be able to just wipe the whole directory)
 | 
			
		||||
	// because the Handler stores its checkpoint file (amongst others) in here.
 | 
			
		||||
	if err := s.rhandler.CleanupPluginDirectory(s.socketDir); err != nil {
 | 
			
		||||
 
 | 
			
		||||
@@ -163,7 +163,7 @@ func (pdev *podDevices) removeContainerAllocatedResources(podUID, contName strin
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Returns all of devices allocated to the pods being tracked, keyed by resourceName.
 | 
			
		||||
// Returns all devices allocated to the pods being tracked, keyed by resourceName.
 | 
			
		||||
func (pdev *podDevices) devices() map[string]sets.Set[string] {
 | 
			
		||||
	ret := make(map[string]sets.Set[string])
 | 
			
		||||
	pdev.RLock()
 | 
			
		||||
 
 | 
			
		||||
@@ -38,7 +38,7 @@ func (m *ManagerImpl) GetTopologyHints(pod *v1.Pod, container *v1.Container) map
 | 
			
		||||
	// Garbage collect any stranded device resources before providing TopologyHints
 | 
			
		||||
	m.UpdateAllocatedDevices()
 | 
			
		||||
 | 
			
		||||
	// Loop through all device resources and generate TopologyHints for them..
 | 
			
		||||
	// Loop through all device resources and generate TopologyHints for them.
 | 
			
		||||
	deviceHints := make(map[string][]topologymanager.TopologyHint)
 | 
			
		||||
	accumulatedResourceRequests := m.getContainerDeviceRequest(container)
 | 
			
		||||
 | 
			
		||||
@@ -171,7 +171,7 @@ func (m *ManagerImpl) generateDeviceTopologyHints(resource string, available set
 | 
			
		||||
			minAffinitySize = mask.Count()
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		// Then check to see if all of the reusable devices are part of the bitmask.
 | 
			
		||||
		// Then check to see if all the reusable devices are part of the bitmask.
 | 
			
		||||
		numMatching := 0
 | 
			
		||||
		for d := range reusable {
 | 
			
		||||
			// Skip the device if it doesn't specify any topology info.
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										4
									
								
								pkg/kubelet/pluginmanager/cache/types.go
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										4
									
								
								pkg/kubelet/pluginmanager/cache/types.go
									
									
									
									
										vendored
									
									
								
							@@ -48,11 +48,11 @@ type PluginHandler interface {
 | 
			
		||||
	// Validate returns an error if the information provided by
 | 
			
		||||
	// the potential plugin is erroneous (unsupported version, ...)
 | 
			
		||||
	ValidatePlugin(pluginName string, endpoint string, versions []string) error
 | 
			
		||||
	// RegisterPlugin is called so that the plugin can be register by any
 | 
			
		||||
	// RegisterPlugin is called so that the plugin can be registered by any
 | 
			
		||||
	// plugin consumer
 | 
			
		||||
	// Error encountered here can still be Notified to the plugin.
 | 
			
		||||
	RegisterPlugin(pluginName, endpoint string, versions []string) error
 | 
			
		||||
	// DeRegister is called once the pluginwatcher observes that the socket has
 | 
			
		||||
	// DeRegisterPlugin is called once the pluginwatcher observes that the socket has
 | 
			
		||||
	// been deleted.
 | 
			
		||||
	DeRegisterPlugin(pluginName string)
 | 
			
		||||
}
 | 
			
		||||
 
 | 
			
		||||
		Reference in New Issue
	
	Block a user