mirror of
				https://github.com/optim-enterprises-bv/kubernetes.git
				synced 2025-11-04 04:08:16 +00:00 
			
		
		
		
	Merge pull request #37090 from humblec/iscsi-nodiskconf
Automatic merge from submit-queue (batch tested with PRs 35436, 37090, 38700) Make iscsi pv/pvc aware of nodiskconflict feature Being iscsi a `RWO, ROX` volume we should conflict if more than one pod is using same iscsi LUN. Signed-off-by: Humble Chirammal <hchiramm@redhat.com>
This commit is contained in:
		@@ -20,6 +20,7 @@ import (
 | 
				
			|||||||
	"fmt"
 | 
						"fmt"
 | 
				
			||||||
	"math/rand"
 | 
						"math/rand"
 | 
				
			||||||
	"strconv"
 | 
						"strconv"
 | 
				
			||||||
 | 
						"strings"
 | 
				
			||||||
	"sync"
 | 
						"sync"
 | 
				
			||||||
	"time"
 | 
						"time"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -109,7 +110,7 @@ type predicateMetadata struct {
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
func isVolumeConflict(volume v1.Volume, pod *v1.Pod) bool {
 | 
					func isVolumeConflict(volume v1.Volume, pod *v1.Pod) bool {
 | 
				
			||||||
	// fast path if there is no conflict checking targets.
 | 
						// fast path if there is no conflict checking targets.
 | 
				
			||||||
	if volume.GCEPersistentDisk == nil && volume.AWSElasticBlockStore == nil && volume.RBD == nil {
 | 
						if volume.GCEPersistentDisk == nil && volume.AWSElasticBlockStore == nil && volume.RBD == nil && volume.ISCSI == nil {
 | 
				
			||||||
		return false
 | 
							return false
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@@ -128,6 +129,26 @@ func isVolumeConflict(volume v1.Volume, pod *v1.Pod) bool {
 | 
				
			|||||||
			}
 | 
								}
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							if volume.ISCSI != nil && existingVolume.ISCSI != nil {
 | 
				
			||||||
 | 
								iqn, lun, target := volume.ISCSI.IQN, volume.ISCSI.Lun, volume.ISCSI.TargetPortal
 | 
				
			||||||
 | 
								eiqn, elun, etarget := existingVolume.ISCSI.IQN, existingVolume.ISCSI.Lun, existingVolume.ISCSI.TargetPortal
 | 
				
			||||||
 | 
								if !strings.Contains(target, ":") {
 | 
				
			||||||
 | 
									target = target + ":3260"
 | 
				
			||||||
 | 
								}
 | 
				
			||||||
 | 
								if !strings.Contains(etarget, ":") {
 | 
				
			||||||
 | 
									etarget = etarget + ":3260"
 | 
				
			||||||
 | 
								}
 | 
				
			||||||
 | 
								lun1 := strconv.Itoa(int(lun))
 | 
				
			||||||
 | 
								elun1 := strconv.Itoa(int(elun))
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
								// two ISCSI volumes are same, if they share the same iqn, lun and target. As iscsi volumes are of type
 | 
				
			||||||
 | 
								// RWO or ROX, we could permit only one RW mount. Same iscsi volume mounted by multiple Pods
 | 
				
			||||||
 | 
								// conflict unless all other pods mount as read only.
 | 
				
			||||||
 | 
								if iqn == eiqn && lun1 == elun1 && target == etarget && !(volume.ISCSI.ReadOnly && existingVolume.ISCSI.ReadOnly) {
 | 
				
			||||||
 | 
									return true
 | 
				
			||||||
 | 
								}
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		if volume.RBD != nil && existingVolume.RBD != nil {
 | 
							if volume.RBD != nil && existingVolume.RBD != nil {
 | 
				
			||||||
			mon, pool, image := volume.RBD.CephMonitors, volume.RBD.RBDPool, volume.RBD.RBDImage
 | 
								mon, pool, image := volume.RBD.CephMonitors, volume.RBD.RBDPool, volume.RBD.RBDImage
 | 
				
			||||||
			emon, epool, eimage := existingVolume.RBD.CephMonitors, existingVolume.RBD.RBDPool, existingVolume.RBD.RBDImage
 | 
								emon, epool, eimage := existingVolume.RBD.CephMonitors, existingVolume.RBD.RBDPool, existingVolume.RBD.RBDImage
 | 
				
			||||||
@@ -150,6 +171,7 @@ func isVolumeConflict(volume v1.Volume, pod *v1.Pod) bool {
 | 
				
			|||||||
// - GCE PD allows multiple mounts as long as they're all read-only
 | 
					// - GCE PD allows multiple mounts as long as they're all read-only
 | 
				
			||||||
// - AWS EBS forbids any two pods mounting the same volume ID
 | 
					// - AWS EBS forbids any two pods mounting the same volume ID
 | 
				
			||||||
// - Ceph RBD forbids if any two pods share at least same monitor, and match pool and image.
 | 
					// - Ceph RBD forbids if any two pods share at least same monitor, and match pool and image.
 | 
				
			||||||
 | 
					// - ISCSI forbids if any two pods share at least same IQN, LUN and Target
 | 
				
			||||||
// TODO: migrate this into some per-volume specific code?
 | 
					// TODO: migrate this into some per-volume specific code?
 | 
				
			||||||
func NoDiskConflict(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
 | 
					func NoDiskConflict(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
 | 
				
			||||||
	for _, v := range pod.Spec.Volumes {
 | 
						for _, v := range pod.Spec.Volumes {
 | 
				
			||||||
 
 | 
				
			|||||||
@@ -738,6 +738,65 @@ func TestRBDDiskConflicts(t *testing.T) {
 | 
				
			|||||||
	}
 | 
						}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					func TestISCSIDiskConflicts(t *testing.T) {
 | 
				
			||||||
 | 
						volState := v1.PodSpec{
 | 
				
			||||||
 | 
							Volumes: []v1.Volume{
 | 
				
			||||||
 | 
								{
 | 
				
			||||||
 | 
									VolumeSource: v1.VolumeSource{
 | 
				
			||||||
 | 
										ISCSI: &v1.ISCSIVolumeSource{
 | 
				
			||||||
 | 
											TargetPortal: "127.0.0.1:3260",
 | 
				
			||||||
 | 
											IQN:          "iqn.2014-12.server:storage.target01",
 | 
				
			||||||
 | 
											FSType:       "ext4",
 | 
				
			||||||
 | 
											Lun:          0,
 | 
				
			||||||
 | 
										},
 | 
				
			||||||
 | 
									},
 | 
				
			||||||
 | 
								},
 | 
				
			||||||
 | 
							},
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						volState2 := v1.PodSpec{
 | 
				
			||||||
 | 
							Volumes: []v1.Volume{
 | 
				
			||||||
 | 
								{
 | 
				
			||||||
 | 
									VolumeSource: v1.VolumeSource{
 | 
				
			||||||
 | 
										ISCSI: &v1.ISCSIVolumeSource{
 | 
				
			||||||
 | 
											TargetPortal: "127.0.0.2:3260",
 | 
				
			||||||
 | 
											IQN:          "iqn.2014-12.server:storage.target01",
 | 
				
			||||||
 | 
											FSType:       "ext4",
 | 
				
			||||||
 | 
											Lun:          1,
 | 
				
			||||||
 | 
										},
 | 
				
			||||||
 | 
									},
 | 
				
			||||||
 | 
								},
 | 
				
			||||||
 | 
							},
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						tests := []struct {
 | 
				
			||||||
 | 
							pod      *v1.Pod
 | 
				
			||||||
 | 
							nodeInfo *schedulercache.NodeInfo
 | 
				
			||||||
 | 
							isOk     bool
 | 
				
			||||||
 | 
							test     string
 | 
				
			||||||
 | 
						}{
 | 
				
			||||||
 | 
							{&v1.Pod{}, schedulercache.NewNodeInfo(), true, "nothing"},
 | 
				
			||||||
 | 
							{&v1.Pod{}, schedulercache.NewNodeInfo(&v1.Pod{Spec: volState}), true, "one state"},
 | 
				
			||||||
 | 
							{&v1.Pod{Spec: volState}, schedulercache.NewNodeInfo(&v1.Pod{Spec: volState}), false, "same state"},
 | 
				
			||||||
 | 
							{&v1.Pod{Spec: volState2}, schedulercache.NewNodeInfo(&v1.Pod{Spec: volState}), true, "different state"},
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						expectedFailureReasons := []algorithm.PredicateFailureReason{ErrDiskConflict}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						for _, test := range tests {
 | 
				
			||||||
 | 
							ok, reasons, err := NoDiskConflict(test.pod, PredicateMetadata(test.pod, nil), test.nodeInfo)
 | 
				
			||||||
 | 
							if err != nil {
 | 
				
			||||||
 | 
								t.Errorf("%s: unexpected error: %v", test.test, err)
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
							if !ok && !reflect.DeepEqual(reasons, expectedFailureReasons) {
 | 
				
			||||||
 | 
								t.Errorf("%s: unexpected failure reasons: %v, want: %v", test.test, reasons, expectedFailureReasons)
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
							if test.isOk && !ok {
 | 
				
			||||||
 | 
								t.Errorf("%s: expected ok, got none.  %v %s %s", test.test, test.pod, test.nodeInfo, test.test)
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
							if !test.isOk && ok {
 | 
				
			||||||
 | 
								t.Errorf("%s: expected no ok, got one.  %v %s %s", test.test, test.pod, test.nodeInfo, test.test)
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func TestPodFitsSelector(t *testing.T) {
 | 
					func TestPodFitsSelector(t *testing.T) {
 | 
				
			||||||
	tests := []struct {
 | 
						tests := []struct {
 | 
				
			||||||
		pod    *v1.Pod
 | 
							pod    *v1.Pod
 | 
				
			||||||
 
 | 
				
			|||||||
		Reference in New Issue
	
	Block a user