mirror of
				https://github.com/optim-enterprises-bv/kubernetes.git
				synced 2025-11-03 19:58:17 +00:00 
			
		
		
		
	feat: revert #103979 for it's duplicated
Signed-off-by: likakuli <1154584512@qq.com>
This commit is contained in:
		
				
					committed by
					
						
						Francesco Romani
					
				
			
			
				
	
			
			
			
						parent
						
							c6669ea7d6
						
					
				
				
					commit
					212c4c4851
				
			@@ -146,9 +146,6 @@ type manager struct {
 | 
				
			|||||||
	// allocatableCPUs is the set of online CPUs as reported by the system,
 | 
						// allocatableCPUs is the set of online CPUs as reported by the system,
 | 
				
			||||||
	// and available for allocation, minus the reserved set
 | 
						// and available for allocation, minus the reserved set
 | 
				
			||||||
	allocatableCPUs cpuset.CPUSet
 | 
						allocatableCPUs cpuset.CPUSet
 | 
				
			||||||
 | 
					 | 
				
			||||||
	// pendingAdmissionPod contain the pod during the admission phase
 | 
					 | 
				
			||||||
	pendingAdmissionPod *v1.Pod
 | 
					 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
var _ Manager = &manager{}
 | 
					var _ Manager = &manager{}
 | 
				
			||||||
@@ -254,10 +251,6 @@ func (m *manager) Start(activePods ActivePodsFunc, sourcesReady config.SourcesRe
 | 
				
			|||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func (m *manager) Allocate(p *v1.Pod, c *v1.Container) error {
 | 
					func (m *manager) Allocate(p *v1.Pod, c *v1.Container) error {
 | 
				
			||||||
	// The pod is during the admission phase. We need to save the pod to avoid it
 | 
					 | 
				
			||||||
	// being cleaned before the admission ended
 | 
					 | 
				
			||||||
	m.setPodPendingAdmission(p)
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	// Garbage collect any stranded resources before allocating CPUs.
 | 
						// Garbage collect any stranded resources before allocating CPUs.
 | 
				
			||||||
	m.removeStaleState()
 | 
						m.removeStaleState()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -326,9 +319,6 @@ func (m *manager) State() state.Reader {
 | 
				
			|||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func (m *manager) GetTopologyHints(pod *v1.Pod, container *v1.Container) map[string][]topologymanager.TopologyHint {
 | 
					func (m *manager) GetTopologyHints(pod *v1.Pod, container *v1.Container) map[string][]topologymanager.TopologyHint {
 | 
				
			||||||
	// The pod is during the admission phase. We need to save the pod to avoid it
 | 
					 | 
				
			||||||
	// being cleaned before the admission ended
 | 
					 | 
				
			||||||
	m.setPodPendingAdmission(pod)
 | 
					 | 
				
			||||||
	// Garbage collect any stranded resources before providing TopologyHints
 | 
						// Garbage collect any stranded resources before providing TopologyHints
 | 
				
			||||||
	m.removeStaleState()
 | 
						m.removeStaleState()
 | 
				
			||||||
	// Delegate to active policy
 | 
						// Delegate to active policy
 | 
				
			||||||
@@ -336,9 +326,6 @@ func (m *manager) GetTopologyHints(pod *v1.Pod, container *v1.Container) map[str
 | 
				
			|||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func (m *manager) GetPodTopologyHints(pod *v1.Pod) map[string][]topologymanager.TopologyHint {
 | 
					func (m *manager) GetPodTopologyHints(pod *v1.Pod) map[string][]topologymanager.TopologyHint {
 | 
				
			||||||
	// The pod is during the admission phase. We need to save the pod to avoid it
 | 
					 | 
				
			||||||
	// being cleaned before the admission ended
 | 
					 | 
				
			||||||
	m.setPodPendingAdmission(pod)
 | 
					 | 
				
			||||||
	// Garbage collect any stranded resources before providing TopologyHints
 | 
						// Garbage collect any stranded resources before providing TopologyHints
 | 
				
			||||||
	m.removeStaleState()
 | 
						m.removeStaleState()
 | 
				
			||||||
	// Delegate to active policy
 | 
						// Delegate to active policy
 | 
				
			||||||
@@ -375,14 +362,11 @@ func (m *manager) removeStaleState() {
 | 
				
			|||||||
	defer m.Unlock()
 | 
						defer m.Unlock()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// Get the list of active pods.
 | 
						// Get the list of active pods.
 | 
				
			||||||
	activeAndAdmittedPods := m.activePods()
 | 
						activePods := m.activePods()
 | 
				
			||||||
	if m.pendingAdmissionPod != nil {
 | 
					 | 
				
			||||||
		activeAndAdmittedPods = append(activeAndAdmittedPods, m.pendingAdmissionPod)
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// Build a list of (podUID, containerName) pairs for all containers in all active Pods.
 | 
						// Build a list of (podUID, containerName) pairs for all containers in all active Pods.
 | 
				
			||||||
	activeContainers := make(map[string]map[string]struct{})
 | 
						activeContainers := make(map[string]map[string]struct{})
 | 
				
			||||||
	for _, pod := range activeAndAdmittedPods {
 | 
						for _, pod := range activePods {
 | 
				
			||||||
		activeContainers[string(pod.UID)] = make(map[string]struct{})
 | 
							activeContainers[string(pod.UID)] = make(map[string]struct{})
 | 
				
			||||||
		for _, container := range append(pod.Spec.InitContainers, pod.Spec.Containers...) {
 | 
							for _, container := range append(pod.Spec.InitContainers, pod.Spec.Containers...) {
 | 
				
			||||||
			activeContainers[string(pod.UID)][container.Name] = struct{}{}
 | 
								activeContainers[string(pod.UID)][container.Name] = struct{}{}
 | 
				
			||||||
@@ -554,10 +538,3 @@ func (m *manager) GetExclusiveCPUs(podUID, containerName string) cpuset.CPUSet {
 | 
				
			|||||||
func (m *manager) GetCPUAffinity(podUID, containerName string) cpuset.CPUSet {
 | 
					func (m *manager) GetCPUAffinity(podUID, containerName string) cpuset.CPUSet {
 | 
				
			||||||
	return m.state.GetCPUSetOrDefault(podUID, containerName)
 | 
						return m.state.GetCPUSetOrDefault(podUID, containerName)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					 | 
				
			||||||
func (m *manager) setPodPendingAdmission(pod *v1.Pod) {
 | 
					 | 
				
			||||||
	m.Lock()
 | 
					 | 
				
			||||||
	defer m.Unlock()
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	m.pendingAdmissionPod = pod
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 
 | 
				
			|||||||
@@ -325,7 +325,7 @@ func TestCPUManagerAdd(t *testing.T) {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
		pod := makePod("fakePod", "fakeContainer", "2", "2")
 | 
							pod := makePod("fakePod", "fakeContainer", "2", "2")
 | 
				
			||||||
		container := &pod.Spec.Containers[0]
 | 
							container := &pod.Spec.Containers[0]
 | 
				
			||||||
		mgr.activePods = func() []*v1.Pod { return nil }
 | 
							mgr.activePods = func() []*v1.Pod { return []*v1.Pod{pod} }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		err := mgr.Allocate(pod, container)
 | 
							err := mgr.Allocate(pod, container)
 | 
				
			||||||
		if !reflect.DeepEqual(err, testCase.expAllocateErr) {
 | 
							if !reflect.DeepEqual(err, testCase.expAllocateErr) {
 | 
				
			||||||
@@ -1321,7 +1321,7 @@ func TestCPUManagerAddWithResvList(t *testing.T) {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
		pod := makePod("fakePod", "fakeContainer", "2", "2")
 | 
							pod := makePod("fakePod", "fakeContainer", "2", "2")
 | 
				
			||||||
		container := &pod.Spec.Containers[0]
 | 
							container := &pod.Spec.Containers[0]
 | 
				
			||||||
		mgr.activePods = func() []*v1.Pod { return nil }
 | 
							mgr.activePods = func() []*v1.Pod { return []*v1.Pod{pod} }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		err := mgr.Allocate(pod, container)
 | 
							err := mgr.Allocate(pod, container)
 | 
				
			||||||
		if !reflect.DeepEqual(err, testCase.expAllocateErr) {
 | 
							if !reflect.DeepEqual(err, testCase.expAllocateErr) {
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -245,11 +245,6 @@ func TestGetTopologyHints(t *testing.T) {
 | 
				
			|||||||
		if len(tc.expectedHints) == 0 && len(hints) == 0 {
 | 
							if len(tc.expectedHints) == 0 && len(hints) == 0 {
 | 
				
			||||||
			continue
 | 
								continue
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
 | 
					 | 
				
			||||||
		if m.pendingAdmissionPod == nil {
 | 
					 | 
				
			||||||
			t.Errorf("The pendingAdmissionPod should point to the current pod after the call to GetTopologyHints()")
 | 
					 | 
				
			||||||
		}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
		sort.SliceStable(hints, func(i, j int) bool {
 | 
							sort.SliceStable(hints, func(i, j int) bool {
 | 
				
			||||||
			return hints[i].LessThan(hints[j])
 | 
								return hints[i].LessThan(hints[j])
 | 
				
			||||||
		})
 | 
							})
 | 
				
			||||||
@@ -298,7 +293,6 @@ func TestGetPodTopologyHints(t *testing.T) {
 | 
				
			|||||||
		if len(tc.expectedHints) == 0 && len(podHints) == 0 {
 | 
							if len(tc.expectedHints) == 0 && len(podHints) == 0 {
 | 
				
			||||||
			continue
 | 
								continue
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
 | 
					 | 
				
			||||||
		sort.SliceStable(podHints, func(i, j int) bool {
 | 
							sort.SliceStable(podHints, func(i, j int) bool {
 | 
				
			||||||
			return podHints[i].LessThan(podHints[j])
 | 
								return podHints[i].LessThan(podHints[j])
 | 
				
			||||||
		})
 | 
							})
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -101,9 +101,6 @@ type ManagerImpl struct {
 | 
				
			|||||||
	// init containers.
 | 
						// init containers.
 | 
				
			||||||
	devicesToReuse PodReusableDevices
 | 
						devicesToReuse PodReusableDevices
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// pendingAdmissionPod contain the pod during the admission phase
 | 
					 | 
				
			||||||
	pendingAdmissionPod *v1.Pod
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	// containerMap provides a mapping from (pod, container) -> containerID
 | 
						// containerMap provides a mapping from (pod, container) -> containerID
 | 
				
			||||||
	// for all containers in a pod. Used to detect pods running across a restart
 | 
						// for all containers in a pod. Used to detect pods running across a restart
 | 
				
			||||||
	containerMap containermap.ContainerMap
 | 
						containerMap containermap.ContainerMap
 | 
				
			||||||
@@ -364,10 +361,6 @@ func (m *ManagerImpl) Stop() error {
 | 
				
			|||||||
// Allocate is the call that you can use to allocate a set of devices
 | 
					// Allocate is the call that you can use to allocate a set of devices
 | 
				
			||||||
// from the registered device plugins.
 | 
					// from the registered device plugins.
 | 
				
			||||||
func (m *ManagerImpl) Allocate(pod *v1.Pod, container *v1.Container) error {
 | 
					func (m *ManagerImpl) Allocate(pod *v1.Pod, container *v1.Container) error {
 | 
				
			||||||
	// The pod is during the admission phase. We need to save the pod to avoid it
 | 
					 | 
				
			||||||
	// being cleaned before the admission ended
 | 
					 | 
				
			||||||
	m.setPodPendingAdmission(pod)
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	if _, ok := m.devicesToReuse[string(pod.UID)]; !ok {
 | 
						if _, ok := m.devicesToReuse[string(pod.UID)]; !ok {
 | 
				
			||||||
		m.devicesToReuse[string(pod.UID)] = make(map[string]sets.Set[string])
 | 
							m.devicesToReuse[string(pod.UID)] = make(map[string]sets.Set[string])
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
@@ -548,20 +541,14 @@ func (m *ManagerImpl) getCheckpoint() (checkpoint.DeviceManagerCheckpoint, error
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
// UpdateAllocatedDevices frees any Devices that are bound to terminated pods.
 | 
					// UpdateAllocatedDevices frees any Devices that are bound to terminated pods.
 | 
				
			||||||
func (m *ManagerImpl) UpdateAllocatedDevices() {
 | 
					func (m *ManagerImpl) UpdateAllocatedDevices() {
 | 
				
			||||||
 | 
						activePods := m.activePods()
 | 
				
			||||||
	if !m.sourcesReady.AllReady() {
 | 
						if !m.sourcesReady.AllReady() {
 | 
				
			||||||
		return
 | 
							return
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					 | 
				
			||||||
	m.mutex.Lock()
 | 
						m.mutex.Lock()
 | 
				
			||||||
	defer m.mutex.Unlock()
 | 
						defer m.mutex.Unlock()
 | 
				
			||||||
 | 
					 | 
				
			||||||
	activeAndAdmittedPods := m.activePods()
 | 
					 | 
				
			||||||
	if m.pendingAdmissionPod != nil {
 | 
					 | 
				
			||||||
		activeAndAdmittedPods = append(activeAndAdmittedPods, m.pendingAdmissionPod)
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	podsToBeRemoved := m.podDevices.pods()
 | 
						podsToBeRemoved := m.podDevices.pods()
 | 
				
			||||||
	for _, pod := range activeAndAdmittedPods {
 | 
						for _, pod := range activePods {
 | 
				
			||||||
		podsToBeRemoved.Delete(string(pod.UID))
 | 
							podsToBeRemoved.Delete(string(pod.UID))
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	if len(podsToBeRemoved) <= 0 {
 | 
						if len(podsToBeRemoved) <= 0 {
 | 
				
			||||||
@@ -1171,13 +1158,6 @@ func (m *ManagerImpl) ShouldResetExtendedResourceCapacity() bool {
 | 
				
			|||||||
	return len(checkpoints) == 0
 | 
						return len(checkpoints) == 0
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func (m *ManagerImpl) setPodPendingAdmission(pod *v1.Pod) {
 | 
					 | 
				
			||||||
	m.mutex.Lock()
 | 
					 | 
				
			||||||
	defer m.mutex.Unlock()
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	m.pendingAdmissionPod = pod
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
func (m *ManagerImpl) isContainerAlreadyRunning(podUID, cntName string) bool {
 | 
					func (m *ManagerImpl) isContainerAlreadyRunning(podUID, cntName string) bool {
 | 
				
			||||||
	cntID, err := m.containerMap.GetContainerID(podUID, cntName)
 | 
						cntID, err := m.containerMap.GetContainerID(podUID, cntName)
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -31,10 +31,6 @@ import (
 | 
				
			|||||||
// ensures the Device Manager is consulted when Topology Aware Hints for each
 | 
					// ensures the Device Manager is consulted when Topology Aware Hints for each
 | 
				
			||||||
// container are created.
 | 
					// container are created.
 | 
				
			||||||
func (m *ManagerImpl) GetTopologyHints(pod *v1.Pod, container *v1.Container) map[string][]topologymanager.TopologyHint {
 | 
					func (m *ManagerImpl) GetTopologyHints(pod *v1.Pod, container *v1.Container) map[string][]topologymanager.TopologyHint {
 | 
				
			||||||
	// The pod is during the admission phase. We need to save the pod to avoid it
 | 
					 | 
				
			||||||
	// being cleaned before the admission ended
 | 
					 | 
				
			||||||
	m.setPodPendingAdmission(pod)
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	// Garbage collect any stranded device resources before providing TopologyHints
 | 
						// Garbage collect any stranded device resources before providing TopologyHints
 | 
				
			||||||
	m.UpdateAllocatedDevices()
 | 
						m.UpdateAllocatedDevices()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -87,10 +83,6 @@ func (m *ManagerImpl) GetTopologyHints(pod *v1.Pod, container *v1.Container) map
 | 
				
			|||||||
// GetPodTopologyHints implements the topologymanager.HintProvider Interface which
 | 
					// GetPodTopologyHints implements the topologymanager.HintProvider Interface which
 | 
				
			||||||
// ensures the Device Manager is consulted when Topology Aware Hints for Pod are created.
 | 
					// ensures the Device Manager is consulted when Topology Aware Hints for Pod are created.
 | 
				
			||||||
func (m *ManagerImpl) GetPodTopologyHints(pod *v1.Pod) map[string][]topologymanager.TopologyHint {
 | 
					func (m *ManagerImpl) GetPodTopologyHints(pod *v1.Pod) map[string][]topologymanager.TopologyHint {
 | 
				
			||||||
	// The pod is during the admission phase. We need to save the pod to avoid it
 | 
					 | 
				
			||||||
	// being cleaned before the admission ended
 | 
					 | 
				
			||||||
	m.setPodPendingAdmission(pod)
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	// Garbage collect any stranded device resources before providing TopologyHints
 | 
						// Garbage collect any stranded device resources before providing TopologyHints
 | 
				
			||||||
	m.UpdateAllocatedDevices()
 | 
						m.UpdateAllocatedDevices()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -126,9 +126,6 @@ type manager struct {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
	// allocatableMemory holds the allocatable memory for each NUMA node
 | 
						// allocatableMemory holds the allocatable memory for each NUMA node
 | 
				
			||||||
	allocatableMemory []state.Block
 | 
						allocatableMemory []state.Block
 | 
				
			||||||
 | 
					 | 
				
			||||||
	// pendingAdmissionPod contain the pod during the admission phase
 | 
					 | 
				
			||||||
	pendingAdmissionPod *v1.Pod
 | 
					 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
var _ Manager = &manager{}
 | 
					var _ Manager = &manager{}
 | 
				
			||||||
@@ -242,10 +239,6 @@ func (m *manager) GetMemoryNUMANodes(pod *v1.Pod, container *v1.Container) sets.
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
// Allocate is called to pre-allocate memory resources during Pod admission.
 | 
					// Allocate is called to pre-allocate memory resources during Pod admission.
 | 
				
			||||||
func (m *manager) Allocate(pod *v1.Pod, container *v1.Container) error {
 | 
					func (m *manager) Allocate(pod *v1.Pod, container *v1.Container) error {
 | 
				
			||||||
	// The pod is during the admission phase. We need to save the pod to avoid it
 | 
					 | 
				
			||||||
	// being cleaned before the admission ended
 | 
					 | 
				
			||||||
	m.setPodPendingAdmission(pod)
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	// Garbage collect any stranded resources before allocation
 | 
						// Garbage collect any stranded resources before allocation
 | 
				
			||||||
	m.removeStaleState()
 | 
						m.removeStaleState()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -284,10 +277,6 @@ func (m *manager) State() state.Reader {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
// GetPodTopologyHints returns the topology hints for the topology manager
 | 
					// GetPodTopologyHints returns the topology hints for the topology manager
 | 
				
			||||||
func (m *manager) GetPodTopologyHints(pod *v1.Pod) map[string][]topologymanager.TopologyHint {
 | 
					func (m *manager) GetPodTopologyHints(pod *v1.Pod) map[string][]topologymanager.TopologyHint {
 | 
				
			||||||
	// The pod is during the admission phase. We need to save the pod to avoid it
 | 
					 | 
				
			||||||
	// being cleaned before the admission ended
 | 
					 | 
				
			||||||
	m.setPodPendingAdmission(pod)
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	// Garbage collect any stranded resources before providing TopologyHints
 | 
						// Garbage collect any stranded resources before providing TopologyHints
 | 
				
			||||||
	m.removeStaleState()
 | 
						m.removeStaleState()
 | 
				
			||||||
	// Delegate to active policy
 | 
						// Delegate to active policy
 | 
				
			||||||
@@ -296,10 +285,6 @@ func (m *manager) GetPodTopologyHints(pod *v1.Pod) map[string][]topologymanager.
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
// GetTopologyHints returns the topology hints for the topology manager
 | 
					// GetTopologyHints returns the topology hints for the topology manager
 | 
				
			||||||
func (m *manager) GetTopologyHints(pod *v1.Pod, container *v1.Container) map[string][]topologymanager.TopologyHint {
 | 
					func (m *manager) GetTopologyHints(pod *v1.Pod, container *v1.Container) map[string][]topologymanager.TopologyHint {
 | 
				
			||||||
	// The pod is during the admission phase. We need to save the pod to avoid it
 | 
					 | 
				
			||||||
	// being cleaned before the admission ended
 | 
					 | 
				
			||||||
	m.setPodPendingAdmission(pod)
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	// Garbage collect any stranded resources before providing TopologyHints
 | 
						// Garbage collect any stranded resources before providing TopologyHints
 | 
				
			||||||
	m.removeStaleState()
 | 
						m.removeStaleState()
 | 
				
			||||||
	// Delegate to active policy
 | 
						// Delegate to active policy
 | 
				
			||||||
@@ -322,15 +307,12 @@ func (m *manager) removeStaleState() {
 | 
				
			|||||||
	m.Lock()
 | 
						m.Lock()
 | 
				
			||||||
	defer m.Unlock()
 | 
						defer m.Unlock()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// Get the list of admitted and active pods.
 | 
						// Get the list of active pods.
 | 
				
			||||||
	activeAndAdmittedPods := m.activePods()
 | 
						activePods := m.activePods()
 | 
				
			||||||
	if m.pendingAdmissionPod != nil {
 | 
					 | 
				
			||||||
		activeAndAdmittedPods = append(activeAndAdmittedPods, m.pendingAdmissionPod)
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
	// Build a list of (podUID, containerName) pairs for all containers in all active Pods.
 | 
						// Build a list of (podUID, containerName) pairs for all containers in all active Pods.
 | 
				
			||||||
	activeContainers := make(map[string]map[string]struct{})
 | 
						activeContainers := make(map[string]map[string]struct{})
 | 
				
			||||||
	for _, pod := range activeAndAdmittedPods {
 | 
						for _, pod := range activePods {
 | 
				
			||||||
		activeContainers[string(pod.UID)] = make(map[string]struct{})
 | 
							activeContainers[string(pod.UID)] = make(map[string]struct{})
 | 
				
			||||||
		for _, container := range append(pod.Spec.InitContainers, pod.Spec.Containers...) {
 | 
							for _, container := range append(pod.Spec.InitContainers, pod.Spec.Containers...) {
 | 
				
			||||||
			activeContainers[string(pod.UID)][container.Name] = struct{}{}
 | 
								activeContainers[string(pod.UID)][container.Name] = struct{}{}
 | 
				
			||||||
@@ -464,10 +446,3 @@ func (m *manager) GetAllocatableMemory() []state.Block {
 | 
				
			|||||||
func (m *manager) GetMemory(podUID, containerName string) []state.Block {
 | 
					func (m *manager) GetMemory(podUID, containerName string) []state.Block {
 | 
				
			||||||
	return m.state.GetMemoryBlocks(podUID, containerName)
 | 
						return m.state.GetMemoryBlocks(podUID, containerName)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					 | 
				
			||||||
func (m *manager) setPodPendingAdmission(pod *v1.Pod) {
 | 
					 | 
				
			||||||
	m.Lock()
 | 
					 | 
				
			||||||
	defer m.Unlock()
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	m.pendingAdmissionPod = pod
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 
 | 
				
			|||||||
@@ -2019,129 +2019,6 @@ func TestNewManager(t *testing.T) {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
func TestGetTopologyHints(t *testing.T) {
 | 
					func TestGetTopologyHints(t *testing.T) {
 | 
				
			||||||
	testCases := []testMemoryManager{
 | 
						testCases := []testMemoryManager{
 | 
				
			||||||
		{
 | 
					 | 
				
			||||||
			description: "Successful hint generation",
 | 
					 | 
				
			||||||
			policyName:  policyTypeStatic,
 | 
					 | 
				
			||||||
			machineInfo: returnMachineInfo(),
 | 
					 | 
				
			||||||
			reserved: systemReservedMemory{
 | 
					 | 
				
			||||||
				0: map[v1.ResourceName]uint64{
 | 
					 | 
				
			||||||
					v1.ResourceMemory: 1 * gb,
 | 
					 | 
				
			||||||
				},
 | 
					 | 
				
			||||||
				1: map[v1.ResourceName]uint64{
 | 
					 | 
				
			||||||
					v1.ResourceMemory: 1 * gb,
 | 
					 | 
				
			||||||
				},
 | 
					 | 
				
			||||||
			},
 | 
					 | 
				
			||||||
			assignments: state.ContainerMemoryAssignments{
 | 
					 | 
				
			||||||
				"fakePod1": map[string][]state.Block{
 | 
					 | 
				
			||||||
					"fakeContainer1": {
 | 
					 | 
				
			||||||
						{
 | 
					 | 
				
			||||||
							NUMAAffinity: []int{0},
 | 
					 | 
				
			||||||
							Type:         v1.ResourceMemory,
 | 
					 | 
				
			||||||
							Size:         1 * gb,
 | 
					 | 
				
			||||||
						},
 | 
					 | 
				
			||||||
						{
 | 
					 | 
				
			||||||
							NUMAAffinity: []int{0},
 | 
					 | 
				
			||||||
							Type:         hugepages1Gi,
 | 
					 | 
				
			||||||
							Size:         1 * gb,
 | 
					 | 
				
			||||||
						},
 | 
					 | 
				
			||||||
					},
 | 
					 | 
				
			||||||
					"fakeContainer2": {
 | 
					 | 
				
			||||||
						{
 | 
					 | 
				
			||||||
							NUMAAffinity: []int{0},
 | 
					 | 
				
			||||||
							Type:         v1.ResourceMemory,
 | 
					 | 
				
			||||||
							Size:         1 * gb,
 | 
					 | 
				
			||||||
						},
 | 
					 | 
				
			||||||
						{
 | 
					 | 
				
			||||||
							NUMAAffinity: []int{0},
 | 
					 | 
				
			||||||
							Type:         hugepages1Gi,
 | 
					 | 
				
			||||||
							Size:         1 * gb,
 | 
					 | 
				
			||||||
						},
 | 
					 | 
				
			||||||
					},
 | 
					 | 
				
			||||||
				},
 | 
					 | 
				
			||||||
			},
 | 
					 | 
				
			||||||
			machineState: state.NUMANodeMap{
 | 
					 | 
				
			||||||
				0: &state.NUMANodeState{
 | 
					 | 
				
			||||||
					Cells:               []int{0},
 | 
					 | 
				
			||||||
					NumberOfAssignments: 4,
 | 
					 | 
				
			||||||
					MemoryMap: map[v1.ResourceName]*state.MemoryTable{
 | 
					 | 
				
			||||||
						v1.ResourceMemory: {
 | 
					 | 
				
			||||||
							Allocatable:    9 * gb,
 | 
					 | 
				
			||||||
							Free:           7 * gb,
 | 
					 | 
				
			||||||
							Reserved:       2 * gb,
 | 
					 | 
				
			||||||
							SystemReserved: 1 * gb,
 | 
					 | 
				
			||||||
							TotalMemSize:   10 * gb,
 | 
					 | 
				
			||||||
						},
 | 
					 | 
				
			||||||
						hugepages1Gi: {
 | 
					 | 
				
			||||||
							Allocatable:    5 * gb,
 | 
					 | 
				
			||||||
							Free:           3 * gb,
 | 
					 | 
				
			||||||
							Reserved:       2 * gb,
 | 
					 | 
				
			||||||
							SystemReserved: 0 * gb,
 | 
					 | 
				
			||||||
							TotalMemSize:   5 * gb,
 | 
					 | 
				
			||||||
						},
 | 
					 | 
				
			||||||
					},
 | 
					 | 
				
			||||||
				},
 | 
					 | 
				
			||||||
				1: &state.NUMANodeState{
 | 
					 | 
				
			||||||
					Cells:               []int{1},
 | 
					 | 
				
			||||||
					NumberOfAssignments: 0,
 | 
					 | 
				
			||||||
					MemoryMap: map[v1.ResourceName]*state.MemoryTable{
 | 
					 | 
				
			||||||
						v1.ResourceMemory: {
 | 
					 | 
				
			||||||
							Allocatable:    9 * gb,
 | 
					 | 
				
			||||||
							Free:           9 * gb,
 | 
					 | 
				
			||||||
							Reserved:       0 * gb,
 | 
					 | 
				
			||||||
							SystemReserved: 1 * gb,
 | 
					 | 
				
			||||||
							TotalMemSize:   10 * gb,
 | 
					 | 
				
			||||||
						},
 | 
					 | 
				
			||||||
						hugepages1Gi: {
 | 
					 | 
				
			||||||
							Allocatable:    5 * gb,
 | 
					 | 
				
			||||||
							Free:           5 * gb,
 | 
					 | 
				
			||||||
							Reserved:       0,
 | 
					 | 
				
			||||||
							SystemReserved: 0,
 | 
					 | 
				
			||||||
							TotalMemSize:   5 * gb,
 | 
					 | 
				
			||||||
						},
 | 
					 | 
				
			||||||
					},
 | 
					 | 
				
			||||||
				},
 | 
					 | 
				
			||||||
			},
 | 
					 | 
				
			||||||
			expectedError: nil,
 | 
					 | 
				
			||||||
			expectedHints: map[string][]topologymanager.TopologyHint{
 | 
					 | 
				
			||||||
				string(v1.ResourceMemory): {
 | 
					 | 
				
			||||||
					{
 | 
					 | 
				
			||||||
						NUMANodeAffinity: newNUMAAffinity(0),
 | 
					 | 
				
			||||||
						Preferred:        true,
 | 
					 | 
				
			||||||
					},
 | 
					 | 
				
			||||||
					{
 | 
					 | 
				
			||||||
						NUMANodeAffinity: newNUMAAffinity(1),
 | 
					 | 
				
			||||||
						Preferred:        true,
 | 
					 | 
				
			||||||
					},
 | 
					 | 
				
			||||||
				},
 | 
					 | 
				
			||||||
				string(hugepages1Gi): {
 | 
					 | 
				
			||||||
					{
 | 
					 | 
				
			||||||
						NUMANodeAffinity: newNUMAAffinity(0),
 | 
					 | 
				
			||||||
						Preferred:        true,
 | 
					 | 
				
			||||||
					},
 | 
					 | 
				
			||||||
					{
 | 
					 | 
				
			||||||
						NUMANodeAffinity: newNUMAAffinity(1),
 | 
					 | 
				
			||||||
						Preferred:        true,
 | 
					 | 
				
			||||||
					},
 | 
					 | 
				
			||||||
				},
 | 
					 | 
				
			||||||
			},
 | 
					 | 
				
			||||||
			activePods: []*v1.Pod{
 | 
					 | 
				
			||||||
				{
 | 
					 | 
				
			||||||
					ObjectMeta: metav1.ObjectMeta{
 | 
					 | 
				
			||||||
						UID: "fakePod1",
 | 
					 | 
				
			||||||
					},
 | 
					 | 
				
			||||||
					Spec: v1.PodSpec{
 | 
					 | 
				
			||||||
						Containers: []v1.Container{
 | 
					 | 
				
			||||||
							{
 | 
					 | 
				
			||||||
								Name: "fakeContainer1",
 | 
					 | 
				
			||||||
							},
 | 
					 | 
				
			||||||
							{
 | 
					 | 
				
			||||||
								Name: "fakeContainer2",
 | 
					 | 
				
			||||||
							},
 | 
					 | 
				
			||||||
						},
 | 
					 | 
				
			||||||
					},
 | 
					 | 
				
			||||||
				},
 | 
					 | 
				
			||||||
			},
 | 
					 | 
				
			||||||
		},
 | 
					 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
			description: "Successful hint generation",
 | 
								description: "Successful hint generation",
 | 
				
			||||||
			policyName:  policyTypeStatic,
 | 
								policyName:  policyTypeStatic,
 | 
				
			||||||
@@ -2255,7 +2132,6 @@ func TestGetTopologyHints(t *testing.T) {
 | 
				
			|||||||
					},
 | 
										},
 | 
				
			||||||
				},
 | 
									},
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
			activePods: []*v1.Pod{},
 | 
					 | 
				
			||||||
		},
 | 
							},
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -2268,14 +2144,14 @@ func TestGetTopologyHints(t *testing.T) {
 | 
				
			|||||||
				containerRuntime: mockRuntimeService{
 | 
									containerRuntime: mockRuntimeService{
 | 
				
			||||||
					err: nil,
 | 
										err: nil,
 | 
				
			||||||
				},
 | 
									},
 | 
				
			||||||
				activePods:        func() []*v1.Pod { return testCase.activePods },
 | 
									activePods:        func() []*v1.Pod { return nil },
 | 
				
			||||||
				podStatusProvider: mockPodStatusProvider{},
 | 
									podStatusProvider: mockPodStatusProvider{},
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
			mgr.sourcesReady = &sourcesReadyStub{}
 | 
								mgr.sourcesReady = &sourcesReadyStub{}
 | 
				
			||||||
			mgr.state.SetMachineState(testCase.machineState.Clone())
 | 
								mgr.state.SetMachineState(testCase.machineState.Clone())
 | 
				
			||||||
			mgr.state.SetMemoryAssignments(testCase.assignments.Clone())
 | 
								mgr.state.SetMemoryAssignments(testCase.assignments.Clone())
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			pod := getPod("fakePod2", "fakeContainer1", requirementsGuaranteed)
 | 
								pod := getPod("fakePod1", "fakeContainer1", requirementsGuaranteed)
 | 
				
			||||||
			container := &pod.Spec.Containers[0]
 | 
								container := &pod.Spec.Containers[0]
 | 
				
			||||||
			hints := mgr.GetTopologyHints(pod, container)
 | 
								hints := mgr.GetTopologyHints(pod, container)
 | 
				
			||||||
			if !reflect.DeepEqual(hints, testCase.expectedHints) {
 | 
								if !reflect.DeepEqual(hints, testCase.expectedHints) {
 | 
				
			||||||
 
 | 
				
			|||||||
		Reference in New Issue
	
	Block a user