mirror of
				https://github.com/optim-enterprises-bv/kubernetes.git
				synced 2025-10-31 10:18:13 +00:00 
			
		
		
		
	Use the assert/require package in kubelet unit tests
This reduce the lines of code and improve readability.
This commit is contained in:
		| @@ -20,6 +20,9 @@ import ( | |||||||
| 	"fmt" | 	"fmt" | ||||||
| 	"testing" | 	"testing" | ||||||
|  |  | ||||||
|  | 	"github.com/stretchr/testify/assert" | ||||||
|  | 	"github.com/stretchr/testify/require" | ||||||
|  |  | ||||||
| 	cadvisorapi "github.com/google/cadvisor/info/v1" | 	cadvisorapi "github.com/google/cadvisor/info/v1" | ||||||
| 	cadvisorapiv2 "github.com/google/cadvisor/info/v2" | 	cadvisorapiv2 "github.com/google/cadvisor/info/v2" | ||||||
| 	"k8s.io/apimachinery/pkg/types" | 	"k8s.io/apimachinery/pkg/types" | ||||||
| @@ -194,11 +197,10 @@ func TestGetContainerInfo(t *testing.T) { | |||||||
| 		fakeRuntime.PodList = tc.podList | 		fakeRuntime.PodList = tc.podList | ||||||
|  |  | ||||||
| 		stats, err := kubelet.GetContainerInfo(tc.requestedPodFullName, tc.requestedPodUid, tc.requestedContainerName, cadvisorReq) | 		stats, err := kubelet.GetContainerInfo(tc.requestedPodFullName, tc.requestedPodUid, tc.requestedContainerName, cadvisorReq) | ||||||
| 		if err != tc.expectedError { | 		assert.Equal(t, tc.expectedError, err) | ||||||
| 			t.Errorf("test '%s' failed: expected error %#v, got %#v", tc.name, tc.expectedError, err) |  | ||||||
| 		} | 		if tc.expectStats { | ||||||
| 		if tc.expectStats && stats == nil { | 			require.NotNil(t, stats) | ||||||
| 			t.Fatalf("test '%s' failed: stats should not be nil", tc.name) |  | ||||||
| 		} | 		} | ||||||
| 		mockCadvisor.AssertExpectations(t) | 		mockCadvisor.AssertExpectations(t) | ||||||
| 	} | 	} | ||||||
| @@ -219,9 +221,7 @@ func TestGetRawContainerInfoRoot(t *testing.T) { | |||||||
| 	mockCadvisor.On("ContainerInfo", containerPath, cadvisorReq).Return(containerInfo, nil) | 	mockCadvisor.On("ContainerInfo", containerPath, cadvisorReq).Return(containerInfo, nil) | ||||||
|  |  | ||||||
| 	_, err := kubelet.GetRawContainerInfo(containerPath, cadvisorReq, false) | 	_, err := kubelet.GetRawContainerInfo(containerPath, cadvisorReq, false) | ||||||
| 	if err != nil { | 	assert.NoError(t, err) | ||||||
| 		t.Errorf("unexpected error: %v", err) |  | ||||||
| 	} |  | ||||||
| 	mockCadvisor.AssertExpectations(t) | 	mockCadvisor.AssertExpectations(t) | ||||||
| } | } | ||||||
|  |  | ||||||
| @@ -247,12 +247,8 @@ func TestGetRawContainerInfoSubcontainers(t *testing.T) { | |||||||
| 	mockCadvisor.On("SubcontainerInfo", containerPath, cadvisorReq).Return(containerInfo, nil) | 	mockCadvisor.On("SubcontainerInfo", containerPath, cadvisorReq).Return(containerInfo, nil) | ||||||
|  |  | ||||||
| 	result, err := kubelet.GetRawContainerInfo(containerPath, cadvisorReq, true) | 	result, err := kubelet.GetRawContainerInfo(containerPath, cadvisorReq, true) | ||||||
| 	if err != nil { | 	assert.NoError(t, err) | ||||||
| 		t.Errorf("unexpected error: %v", err) | 	assert.Len(t, result, 2) | ||||||
| 	} |  | ||||||
| 	if len(result) != 2 { |  | ||||||
| 		t.Errorf("Expected 2 elements, received: %#v", result) |  | ||||||
| 	} |  | ||||||
| 	mockCadvisor.AssertExpectations(t) | 	mockCadvisor.AssertExpectations(t) | ||||||
| } | } | ||||||
|  |  | ||||||
| @@ -282,11 +278,7 @@ func TestHasDedicatedImageFs(t *testing.T) { | |||||||
| 		mockCadvisor.On("ImagesFsInfo").Return(testCase.imageFsInfo, nil) | 		mockCadvisor.On("ImagesFsInfo").Return(testCase.imageFsInfo, nil) | ||||||
| 		mockCadvisor.On("RootFsInfo").Return(testCase.rootFsInfo, nil) | 		mockCadvisor.On("RootFsInfo").Return(testCase.rootFsInfo, nil) | ||||||
| 		actual, err := kubelet.HasDedicatedImageFs() | 		actual, err := kubelet.HasDedicatedImageFs() | ||||||
| 		if err != nil { | 		assert.NoError(t, err, "test [%s]", testName) | ||||||
| 			t.Errorf("case: %s, unexpected error: %v", testName, err) | 		assert.Equal(t, testCase.expected, actual, "test [%s]", testName) | ||||||
| 		} |  | ||||||
| 		if actual != testCase.expected { |  | ||||||
| 			t.Errorf("case: %s, expected: %v, actual: %v", testName, testCase.expected, actual) |  | ||||||
| 		} |  | ||||||
| 	} | 	} | ||||||
| } | } | ||||||
|   | |||||||
| @@ -21,6 +21,9 @@ import ( | |||||||
| 	"os" | 	"os" | ||||||
| 	"path/filepath" | 	"path/filepath" | ||||||
| 	"testing" | 	"testing" | ||||||
|  |  | ||||||
|  | 	"github.com/stretchr/testify/assert" | ||||||
|  | 	"github.com/stretchr/testify/require" | ||||||
| ) | ) | ||||||
|  |  | ||||||
| func TestKubeletDirs(t *testing.T) { | func TestKubeletDirs(t *testing.T) { | ||||||
| @@ -33,57 +36,39 @@ func TestKubeletDirs(t *testing.T) { | |||||||
|  |  | ||||||
| 	got = kubelet.getPodsDir() | 	got = kubelet.getPodsDir() | ||||||
| 	exp = filepath.Join(root, "pods") | 	exp = filepath.Join(root, "pods") | ||||||
| 	if got != exp { | 	assert.Equal(t, exp, got) | ||||||
| 		t.Errorf("expected %q', got %q", exp, got) |  | ||||||
| 	} |  | ||||||
|  |  | ||||||
| 	got = kubelet.getPluginsDir() | 	got = kubelet.getPluginsDir() | ||||||
| 	exp = filepath.Join(root, "plugins") | 	exp = filepath.Join(root, "plugins") | ||||||
| 	if got != exp { | 	assert.Equal(t, exp, got) | ||||||
| 		t.Errorf("expected %q', got %q", exp, got) |  | ||||||
| 	} |  | ||||||
|  |  | ||||||
| 	got = kubelet.getPluginDir("foobar") | 	got = kubelet.getPluginDir("foobar") | ||||||
| 	exp = filepath.Join(root, "plugins/foobar") | 	exp = filepath.Join(root, "plugins/foobar") | ||||||
| 	if got != exp { | 	assert.Equal(t, exp, got) | ||||||
| 		t.Errorf("expected %q', got %q", exp, got) |  | ||||||
| 	} |  | ||||||
|  |  | ||||||
| 	got = kubelet.getPodDir("abc123") | 	got = kubelet.getPodDir("abc123") | ||||||
| 	exp = filepath.Join(root, "pods/abc123") | 	exp = filepath.Join(root, "pods/abc123") | ||||||
| 	if got != exp { | 	assert.Equal(t, exp, got) | ||||||
| 		t.Errorf("expected %q', got %q", exp, got) |  | ||||||
| 	} |  | ||||||
|  |  | ||||||
| 	got = kubelet.getPodVolumesDir("abc123") | 	got = kubelet.getPodVolumesDir("abc123") | ||||||
| 	exp = filepath.Join(root, "pods/abc123/volumes") | 	exp = filepath.Join(root, "pods/abc123/volumes") | ||||||
| 	if got != exp { | 	assert.Equal(t, exp, got) | ||||||
| 		t.Errorf("expected %q', got %q", exp, got) |  | ||||||
| 	} |  | ||||||
|  |  | ||||||
| 	got = kubelet.getPodVolumeDir("abc123", "plugin", "foobar") | 	got = kubelet.getPodVolumeDir("abc123", "plugin", "foobar") | ||||||
| 	exp = filepath.Join(root, "pods/abc123/volumes/plugin/foobar") | 	exp = filepath.Join(root, "pods/abc123/volumes/plugin/foobar") | ||||||
| 	if got != exp { | 	assert.Equal(t, exp, got) | ||||||
| 		t.Errorf("expected %q', got %q", exp, got) |  | ||||||
| 	} |  | ||||||
|  |  | ||||||
| 	got = kubelet.getPodPluginsDir("abc123") | 	got = kubelet.getPodPluginsDir("abc123") | ||||||
| 	exp = filepath.Join(root, "pods/abc123/plugins") | 	exp = filepath.Join(root, "pods/abc123/plugins") | ||||||
| 	if got != exp { | 	assert.Equal(t, exp, got) | ||||||
| 		t.Errorf("expected %q', got %q", exp, got) |  | ||||||
| 	} |  | ||||||
|  |  | ||||||
| 	got = kubelet.getPodPluginDir("abc123", "foobar") | 	got = kubelet.getPodPluginDir("abc123", "foobar") | ||||||
| 	exp = filepath.Join(root, "pods/abc123/plugins/foobar") | 	exp = filepath.Join(root, "pods/abc123/plugins/foobar") | ||||||
| 	if got != exp { | 	assert.Equal(t, exp, got) | ||||||
| 		t.Errorf("expected %q', got %q", exp, got) |  | ||||||
| 	} |  | ||||||
|  |  | ||||||
| 	got = kubelet.getPodContainerDir("abc123", "def456") | 	got = kubelet.getPodContainerDir("abc123", "def456") | ||||||
| 	exp = filepath.Join(root, "pods/abc123/containers/def456") | 	exp = filepath.Join(root, "pods/abc123/containers/def456") | ||||||
| 	if got != exp { | 	assert.Equal(t, exp, got) | ||||||
| 		t.Errorf("expected %q', got %q", exp, got) |  | ||||||
| 	} |  | ||||||
| } | } | ||||||
|  |  | ||||||
| func TestKubeletDirsCompat(t *testing.T) { | func TestKubeletDirsCompat(t *testing.T) { | ||||||
| @@ -91,91 +76,37 @@ func TestKubeletDirsCompat(t *testing.T) { | |||||||
| 	defer testKubelet.Cleanup() | 	defer testKubelet.Cleanup() | ||||||
| 	kubelet := testKubelet.kubelet | 	kubelet := testKubelet.kubelet | ||||||
| 	root := kubelet.rootDirectory | 	root := kubelet.rootDirectory | ||||||
| 	if err := os.MkdirAll(root, 0750); err != nil { | 	require.NoError(t, os.MkdirAll(root, 0750), "can't mkdir(%q)", root) | ||||||
| 		t.Fatalf("can't mkdir(%q): %s", root, err) |  | ||||||
| 	} |  | ||||||
|  |  | ||||||
| 	var exp, got string |  | ||||||
|  |  | ||||||
| 	// Old-style pod dir. | 	// Old-style pod dir. | ||||||
| 	if err := os.MkdirAll(fmt.Sprintf("%s/oldpod", root), 0750); err != nil { | 	require.NoError(t, os.MkdirAll(fmt.Sprintf("%s/oldpod", root), 0750), "can't mkdir(%q)", root) | ||||||
| 		t.Fatalf("can't mkdir(%q): %s", root, err) |  | ||||||
| 	} |  | ||||||
| 	// New-style pod dir. | 	// New-style pod dir. | ||||||
| 	if err := os.MkdirAll(fmt.Sprintf("%s/pods/newpod", root), 0750); err != nil { | 	require.NoError(t, os.MkdirAll(fmt.Sprintf("%s/pods/newpod", root), 0750), "can't mkdir(%q)", root) | ||||||
| 		t.Fatalf("can't mkdir(%q): %s", root, err) |  | ||||||
| 	} |  | ||||||
| 	// Both-style pod dir. | 	// Both-style pod dir. | ||||||
| 	if err := os.MkdirAll(fmt.Sprintf("%s/bothpod", root), 0750); err != nil { | 	require.NoError(t, os.MkdirAll(fmt.Sprintf("%s/bothpod", root), 0750), "can't mkdir(%q)", root) | ||||||
| 		t.Fatalf("can't mkdir(%q): %s", root, err) | 	require.NoError(t, os.MkdirAll(fmt.Sprintf("%s/pods/bothpod", root), 0750), "can't mkdir(%q)", root) | ||||||
| 	} |  | ||||||
| 	if err := os.MkdirAll(fmt.Sprintf("%s/pods/bothpod", root), 0750); err != nil { |  | ||||||
| 		t.Fatalf("can't mkdir(%q): %s", root, err) |  | ||||||
| 	} |  | ||||||
|  |  | ||||||
| 	got = kubelet.getPodDir("oldpod") | 	assert.Equal(t, filepath.Join(root, "oldpod"), kubelet.getPodDir("oldpod")) | ||||||
| 	exp = filepath.Join(root, "oldpod") | 	assert.Equal(t, filepath.Join(root, "pods/newpod"), kubelet.getPodDir("newpod")) | ||||||
| 	if got != exp { | 	assert.Equal(t, filepath.Join(root, "pods/bothpod"), kubelet.getPodDir("bothpod")) | ||||||
| 		t.Errorf("expected %q', got %q", exp, got) | 	assert.Equal(t, filepath.Join(root, "pods/neitherpod"), kubelet.getPodDir("neitherpod")) | ||||||
| 	} |  | ||||||
|  |  | ||||||
| 	got = kubelet.getPodDir("newpod") |  | ||||||
| 	exp = filepath.Join(root, "pods/newpod") |  | ||||||
| 	if got != exp { |  | ||||||
| 		t.Errorf("expected %q', got %q", exp, got) |  | ||||||
| 	} |  | ||||||
|  |  | ||||||
| 	got = kubelet.getPodDir("bothpod") |  | ||||||
| 	exp = filepath.Join(root, "pods/bothpod") |  | ||||||
| 	if got != exp { |  | ||||||
| 		t.Errorf("expected %q', got %q", exp, got) |  | ||||||
| 	} |  | ||||||
|  |  | ||||||
| 	got = kubelet.getPodDir("neitherpod") |  | ||||||
| 	exp = filepath.Join(root, "pods/neitherpod") |  | ||||||
| 	if got != exp { |  | ||||||
| 		t.Errorf("expected %q', got %q", exp, got) |  | ||||||
| 	} |  | ||||||
|  |  | ||||||
| 	root = kubelet.getPodDir("newpod") | 	root = kubelet.getPodDir("newpod") | ||||||
|  |  | ||||||
| 	// Old-style container dir. | 	// Old-style container dir. | ||||||
| 	if err := os.MkdirAll(fmt.Sprintf("%s/oldctr", root), 0750); err != nil { | 	require.NoError(t, os.MkdirAll(fmt.Sprintf("%s/oldctr", root), 0750), "can't mkdir(%q)", root) | ||||||
| 		t.Fatalf("can't mkdir(%q): %s", root, err) |  | ||||||
| 	} |  | ||||||
| 	// New-style container dir. | 	// New-style container dir. | ||||||
| 	if err := os.MkdirAll(fmt.Sprintf("%s/containers/newctr", root), 0750); err != nil { | 	require.NoError(t, os.MkdirAll(fmt.Sprintf("%s/containers/newctr", root), 0750), "can't mkdir(%q)", root) | ||||||
| 		t.Fatalf("can't mkdir(%q): %s", root, err) |  | ||||||
| 	} |  | ||||||
| 	// Both-style container dir. | 	// Both-style container dir. | ||||||
| 	if err := os.MkdirAll(fmt.Sprintf("%s/bothctr", root), 0750); err != nil { | 	require.NoError(t, os.MkdirAll(fmt.Sprintf("%s/bothctr", root), 0750), "can't mkdir(%q)", root) | ||||||
| 		t.Fatalf("can't mkdir(%q): %s", root, err) | 	require.NoError(t, os.MkdirAll(fmt.Sprintf("%s/containers/bothctr", root), 0750), "can't mkdir(%q)", root) | ||||||
| 	} |  | ||||||
| 	if err := os.MkdirAll(fmt.Sprintf("%s/containers/bothctr", root), 0750); err != nil { |  | ||||||
| 		t.Fatalf("can't mkdir(%q): %s", root, err) |  | ||||||
| 	} |  | ||||||
|  |  | ||||||
| 	got = kubelet.getPodContainerDir("newpod", "oldctr") | 	assert.Equal(t, filepath.Join(root, "oldctr"), kubelet.getPodContainerDir("newpod", "oldctr")) | ||||||
| 	exp = filepath.Join(root, "oldctr") | 	assert.Equal(t, filepath.Join(root, "containers/newctr"), kubelet.getPodContainerDir("newpod", "newctr")) | ||||||
| 	if got != exp { | 	assert.Equal(t, filepath.Join(root, "containers/bothctr"), kubelet.getPodContainerDir("newpod", "bothctr")) | ||||||
| 		t.Errorf("expected %q', got %q", exp, got) | 	assert.Equal(t, filepath.Join(root, "containers/neitherctr"), kubelet.getPodContainerDir("newpod", "neitherctr")) | ||||||
| 	} |  | ||||||
|  |  | ||||||
| 	got = kubelet.getPodContainerDir("newpod", "newctr") |  | ||||||
| 	exp = filepath.Join(root, "containers/newctr") |  | ||||||
| 	if got != exp { |  | ||||||
| 		t.Errorf("expected %q', got %q", exp, got) |  | ||||||
| 	} |  | ||||||
|  |  | ||||||
| 	got = kubelet.getPodContainerDir("newpod", "bothctr") |  | ||||||
| 	exp = filepath.Join(root, "containers/bothctr") |  | ||||||
| 	if got != exp { |  | ||||||
| 		t.Errorf("expected %q', got %q", exp, got) |  | ||||||
| 	} |  | ||||||
|  |  | ||||||
| 	got = kubelet.getPodContainerDir("newpod", "neitherctr") |  | ||||||
| 	exp = filepath.Join(root, "containers/neitherctr") |  | ||||||
| 	if got != exp { |  | ||||||
| 		t.Errorf("expected %q', got %q", exp, got) |  | ||||||
| 	} |  | ||||||
| } | } | ||||||
|   | |||||||
| @@ -19,10 +19,11 @@ package kubelet | |||||||
| import ( | import ( | ||||||
| 	"fmt" | 	"fmt" | ||||||
| 	"net" | 	"net" | ||||||
| 	"reflect" |  | ||||||
| 	"strings" | 	"strings" | ||||||
| 	"testing" | 	"testing" | ||||||
|  |  | ||||||
|  | 	"github.com/stretchr/testify/assert" | ||||||
|  | 	"github.com/stretchr/testify/require" | ||||||
| 	"k8s.io/client-go/tools/record" | 	"k8s.io/client-go/tools/record" | ||||||
| 	"k8s.io/kubernetes/pkg/api/v1" | 	"k8s.io/kubernetes/pkg/api/v1" | ||||||
| 	"k8s.io/kubernetes/pkg/util/bandwidth" | 	"k8s.io/kubernetes/pkg/util/bandwidth" | ||||||
| @@ -61,10 +62,10 @@ func TestNodeIPParam(t *testing.T) { | |||||||
| 	for _, test := range tests { | 	for _, test := range tests { | ||||||
| 		kubelet.nodeIP = net.ParseIP(test.nodeIP) | 		kubelet.nodeIP = net.ParseIP(test.nodeIP) | ||||||
| 		err := kubelet.validateNodeIP() | 		err := kubelet.validateNodeIP() | ||||||
| 		if err != nil && test.success { | 		if test.success { | ||||||
| 			t.Errorf("Test: %s, expected no error but got: %v", test.testName, err) | 			assert.NoError(t, err, "test %s", test.testName) | ||||||
| 		} else if err == nil && !test.success { | 		} else { | ||||||
| 			t.Errorf("Test: %s, expected an error", test.testName) | 			assert.Error(t, err, fmt.Sprintf("test %s", test.testName)) | ||||||
| 		} | 		} | ||||||
| 	} | 	} | ||||||
| } | } | ||||||
| @@ -103,16 +104,9 @@ func TestParseResolvConf(t *testing.T) { | |||||||
| 	kubelet := testKubelet.kubelet | 	kubelet := testKubelet.kubelet | ||||||
| 	for i, tc := range testCases { | 	for i, tc := range testCases { | ||||||
| 		ns, srch, err := kubelet.parseResolvConf(strings.NewReader(tc.data)) | 		ns, srch, err := kubelet.parseResolvConf(strings.NewReader(tc.data)) | ||||||
| 		if err != nil { | 		require.NoError(t, err) | ||||||
| 			t.Errorf("expected success, got %v", err) | 		assert.EqualValues(t, tc.nameservers, ns, "test case [%d]: name servers", i) | ||||||
| 			continue | 		assert.EqualValues(t, tc.searches, srch, "test case [%d] searches", i) | ||||||
| 		} |  | ||||||
| 		if !reflect.DeepEqual(ns, tc.nameservers) { |  | ||||||
| 			t.Errorf("[%d] expected nameservers %#v, got %#v", i, tc.nameservers, ns) |  | ||||||
| 		} |  | ||||||
| 		if !reflect.DeepEqual(srch, tc.searches) { |  | ||||||
| 			t.Errorf("[%d] expected searches %#v, got %#v", i, tc.searches, srch) |  | ||||||
| 		} |  | ||||||
| 	} | 	} | ||||||
| } | } | ||||||
|  |  | ||||||
| @@ -179,17 +173,11 @@ func TestComposeDNSSearch(t *testing.T) { | |||||||
|  |  | ||||||
| 	for i, tc := range testCases { | 	for i, tc := range testCases { | ||||||
| 		dnsSearch := kubelet.formDNSSearch(tc.hostNames, pod) | 		dnsSearch := kubelet.formDNSSearch(tc.hostNames, pod) | ||||||
|  | 		assert.EqualValues(t, tc.resultSearch, dnsSearch, "test [%d]", i) | ||||||
| 		if !reflect.DeepEqual(dnsSearch, tc.resultSearch) { |  | ||||||
| 			t.Errorf("[%d] expected search line %#v, got %#v", i, tc.resultSearch, dnsSearch) |  | ||||||
| 		} |  | ||||||
|  |  | ||||||
| 		for _, expectedEvent := range tc.events { | 		for _, expectedEvent := range tc.events { | ||||||
| 			expected := fmt.Sprintf("%s %s %s", v1.EventTypeWarning, "DNSSearchForming", expectedEvent) | 			expected := fmt.Sprintf("%s %s %s", v1.EventTypeWarning, "DNSSearchForming", expectedEvent) | ||||||
| 			event := fetchEvent(recorder) | 			event := fetchEvent(recorder) | ||||||
| 			if event != expected { | 			assert.Equal(t, expected, event, "test [%d]", i) | ||||||
| 				t.Errorf("[%d] expected event '%s', got '%s", i, expected, event) |  | ||||||
| 			} |  | ||||||
| 		} | 		} | ||||||
| 	} | 	} | ||||||
| } | } | ||||||
| @@ -268,12 +256,8 @@ func TestCleanupBandwidthLimits(t *testing.T) { | |||||||
| 		} | 		} | ||||||
|  |  | ||||||
| 		err := testKube.kubelet.cleanupBandwidthLimits(test.pods) | 		err := testKube.kubelet.cleanupBandwidthLimits(test.pods) | ||||||
| 		if err != nil { | 		assert.NoError(t, err, "test [%s]", test.name) | ||||||
| 			t.Errorf("unexpected error: %v (%s)", test.name, err) | 		assert.EqualValues(t, test.expectResetCIDRs, shaper.ResetCIDRs, "test[%s]", test.name) | ||||||
| 		} |  | ||||||
| 		if !reflect.DeepEqual(shaper.ResetCIDRs, test.expectResetCIDRs) { |  | ||||||
| 			t.Errorf("[%s]\nexpected: %v, saw: %v", test.name, test.expectResetCIDRs, shaper.ResetCIDRs) |  | ||||||
| 		} |  | ||||||
| 	} | 	} | ||||||
| } | } | ||||||
|  |  | ||||||
| @@ -293,8 +277,6 @@ func TestGetIPTablesMark(t *testing.T) { | |||||||
| 	} | 	} | ||||||
| 	for _, tc := range tests { | 	for _, tc := range tests { | ||||||
| 		res := getIPTablesMark(tc.bit) | 		res := getIPTablesMark(tc.bit) | ||||||
| 		if res != tc.expect { | 		assert.Equal(t, tc.expect, res, "input %d", tc.bit) | ||||||
| 			t.Errorf("getIPTablesMark output unexpected result: %v when input bit is %d. Expect result: %v", res, tc.bit, tc.expect) |  | ||||||
| 		} |  | ||||||
| 	} | 	} | ||||||
| } | } | ||||||
|   | |||||||
| @@ -19,13 +19,15 @@ package kubelet | |||||||
| import ( | import ( | ||||||
| 	"encoding/json" | 	"encoding/json" | ||||||
| 	"fmt" | 	"fmt" | ||||||
| 	"reflect" |  | ||||||
| 	goruntime "runtime" | 	goruntime "runtime" | ||||||
| 	"sort" | 	"sort" | ||||||
| 	"strconv" | 	"strconv" | ||||||
| 	"testing" | 	"testing" | ||||||
| 	"time" | 	"time" | ||||||
|  |  | ||||||
|  | 	"github.com/stretchr/testify/assert" | ||||||
|  | 	"github.com/stretchr/testify/require" | ||||||
|  |  | ||||||
| 	cadvisorapi "github.com/google/cadvisor/info/v1" | 	cadvisorapi "github.com/google/cadvisor/info/v1" | ||||||
| 	cadvisorapiv2 "github.com/google/cadvisor/info/v2" | 	cadvisorapiv2 "github.com/google/cadvisor/info/v2" | ||||||
| 	apiequality "k8s.io/apimachinery/pkg/api/equality" | 	apiequality "k8s.io/apimachinery/pkg/api/equality" | ||||||
| @@ -153,9 +155,7 @@ func TestUpdateNewNodeStatus(t *testing.T) { | |||||||
| 	mockCadvisor.On("VersionInfo").Return(versionInfo, nil) | 	mockCadvisor.On("VersionInfo").Return(versionInfo, nil) | ||||||
|  |  | ||||||
| 	// Make kubelet report that it has sufficient disk space. | 	// Make kubelet report that it has sufficient disk space. | ||||||
| 	if err := updateDiskSpacePolicy(kubelet, mockCadvisor, 500, 500, 200, 200, 100, 100); err != nil { | 	require.NoError(t, updateDiskSpacePolicy(kubelet, mockCadvisor, 500, 500, 200, 200, 100, 100)) | ||||||
| 		t.Fatalf("can't update disk space manager: %v", err) |  | ||||||
| 	} |  | ||||||
|  |  | ||||||
| 	expectedNode := &v1.Node{ | 	expectedNode := &v1.Node{ | ||||||
| 		ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname}, | 		ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname}, | ||||||
| @@ -227,44 +227,25 @@ func TestUpdateNewNodeStatus(t *testing.T) { | |||||||
| 	} | 	} | ||||||
|  |  | ||||||
| 	kubelet.updateRuntimeUp() | 	kubelet.updateRuntimeUp() | ||||||
| 	if err := kubelet.updateNodeStatus(); err != nil { | 	assert.NoError(t, kubelet.updateNodeStatus()) | ||||||
| 		t.Errorf("unexpected error: %v", err) |  | ||||||
| 	} |  | ||||||
| 	actions := kubeClient.Actions() | 	actions := kubeClient.Actions() | ||||||
| 	if len(actions) != 2 { | 	require.Len(t, actions, 2) | ||||||
| 		t.Fatalf("unexpected actions: %v", actions) | 	require.True(t, actions[1].Matches("patch", "nodes")) | ||||||
| 	} | 	require.Equal(t, actions[1].GetSubresource(), "status") | ||||||
| 	if !actions[1].Matches("patch", "nodes") || actions[1].GetSubresource() != "status" { |  | ||||||
| 		t.Fatalf("unexpected actions: %v", actions) |  | ||||||
| 	} |  | ||||||
| 	updatedNode, err := applyNodeStatusPatch(&existingNode, actions[1].(core.PatchActionImpl).GetPatch()) | 	updatedNode, err := applyNodeStatusPatch(&existingNode, actions[1].(core.PatchActionImpl).GetPatch()) | ||||||
| 	if err != nil { | 	assert.NoError(t, err) | ||||||
| 		t.Fatalf("can't apply node status patch: %v", err) |  | ||||||
| 	} |  | ||||||
| 	for i, cond := range updatedNode.Status.Conditions { | 	for i, cond := range updatedNode.Status.Conditions { | ||||||
| 		if cond.LastHeartbeatTime.IsZero() { | 		assert.False(t, cond.LastHeartbeatTime.IsZero(), "LastHeartbeatTime for %v condition is zero", cond.Type) | ||||||
| 			t.Errorf("unexpected zero last probe timestamp for %v condition", cond.Type) | 		assert.False(t, cond.LastTransitionTime.IsZero(), "LastTransitionTime for %v condition  is zero", cond.Type) | ||||||
| 		} |  | ||||||
| 		if cond.LastTransitionTime.IsZero() { |  | ||||||
| 			t.Errorf("unexpected zero last transition timestamp for %v condition", cond.Type) |  | ||||||
| 		} |  | ||||||
| 		updatedNode.Status.Conditions[i].LastHeartbeatTime = metav1.Time{} | 		updatedNode.Status.Conditions[i].LastHeartbeatTime = metav1.Time{} | ||||||
| 		updatedNode.Status.Conditions[i].LastTransitionTime = metav1.Time{} | 		updatedNode.Status.Conditions[i].LastTransitionTime = metav1.Time{} | ||||||
| 	} | 	} | ||||||
|  |  | ||||||
| 	// Version skew workaround. See: https://github.com/kubernetes/kubernetes/issues/16961 | 	// Version skew workaround. See: https://github.com/kubernetes/kubernetes/issues/16961 | ||||||
| 	if updatedNode.Status.Conditions[len(updatedNode.Status.Conditions)-1].Type != v1.NodeReady { | 	assert.Equal(t, v1.NodeReady, updatedNode.Status.Conditions[len(updatedNode.Status.Conditions)-1].Type, "NotReady should be last") | ||||||
| 		t.Errorf("unexpected node condition order. NodeReady should be last.") | 	assert.Len(t, updatedNode.Status.Images, maxImagesInNodeStatus) | ||||||
| 	} | 	assert.True(t, apiequality.Semantic.DeepEqual(expectedNode, updatedNode), "%s", diff.ObjectDiff(expectedNode, updatedNode)) | ||||||
|  |  | ||||||
| 	if maxImagesInNodeStatus != len(updatedNode.Status.Images) { |  | ||||||
| 		t.Errorf("unexpected image list length in node status, expected: %v, got: %v", maxImagesInNodeStatus, len(updatedNode.Status.Images)) |  | ||||||
| 	} else { |  | ||||||
| 		if !apiequality.Semantic.DeepEqual(expectedNode, updatedNode) { |  | ||||||
| 			t.Errorf("unexpected objects: %s", diff.ObjectDiff(expectedNode, updatedNode)) |  | ||||||
| 		} |  | ||||||
| 	} |  | ||||||
|  |  | ||||||
| } | } | ||||||
|  |  | ||||||
| func TestUpdateNewNodeOutOfDiskStatusWithTransitionFrequency(t *testing.T) { | func TestUpdateNewNodeOutOfDiskStatusWithTransitionFrequency(t *testing.T) { | ||||||
| @@ -291,9 +272,8 @@ func TestUpdateNewNodeOutOfDiskStatusWithTransitionFrequency(t *testing.T) { | |||||||
| 	mockCadvisor.On("VersionInfo").Return(versionInfo, nil) | 	mockCadvisor.On("VersionInfo").Return(versionInfo, nil) | ||||||
|  |  | ||||||
| 	// Make Kubelet report that it has sufficient disk space. | 	// Make Kubelet report that it has sufficient disk space. | ||||||
| 	if err := updateDiskSpacePolicy(kubelet, mockCadvisor, 500, 500, 200, 200, 100, 100); err != nil { | 	err := updateDiskSpacePolicy(kubelet, mockCadvisor, 500, 500, 200, 200, 100, 100) | ||||||
| 		t.Fatalf("can't update disk space manager: %v", err) | 	require.NoError(t, err, "update the disk space manager") | ||||||
| 	} |  | ||||||
|  |  | ||||||
| 	kubelet.outOfDiskTransitionFrequency = 10 * time.Second | 	kubelet.outOfDiskTransitionFrequency = 10 * time.Second | ||||||
|  |  | ||||||
| @@ -307,40 +287,27 @@ func TestUpdateNewNodeOutOfDiskStatusWithTransitionFrequency(t *testing.T) { | |||||||
| 	} | 	} | ||||||
|  |  | ||||||
| 	kubelet.updateRuntimeUp() | 	kubelet.updateRuntimeUp() | ||||||
| 	if err := kubelet.updateNodeStatus(); err != nil { | 	assert.NoError(t, kubelet.updateNodeStatus()) | ||||||
| 		t.Errorf("unexpected error: %v", err) |  | ||||||
| 	} |  | ||||||
| 	actions := kubeClient.Actions() | 	actions := kubeClient.Actions() | ||||||
| 	if len(actions) != 2 { | 	require.Len(t, actions, 2) | ||||||
| 		t.Fatalf("unexpected actions: %v", actions) | 	require.True(t, actions[1].Matches("patch", "nodes")) | ||||||
| 	} | 	require.Equal(t, "status", actions[1].GetSubresource()) | ||||||
| 	// StrategicMergePatch(original, patch []byte, dataStruct interface{}) ([]byte, error) |  | ||||||
| 	if !actions[1].Matches("patch", "nodes") || actions[1].GetSubresource() != "status" { |  | ||||||
| 		t.Fatalf("unexpected actions: %v", actions) |  | ||||||
| 	} |  | ||||||
| 	updatedNode, err := applyNodeStatusPatch(&existingNode, actions[1].(core.PatchActionImpl).GetPatch()) | 	updatedNode, err := applyNodeStatusPatch(&existingNode, actions[1].(core.PatchActionImpl).GetPatch()) | ||||||
| 	if err != nil { | 	assert.NoError(t, err, "apply the node status patch") | ||||||
| 		t.Fatalf("can't apply node status patch: %v", err) |  | ||||||
| 	} |  | ||||||
|  |  | ||||||
| 	var oodCondition v1.NodeCondition | 	var oodCondition v1.NodeCondition | ||||||
| 	for i, cond := range updatedNode.Status.Conditions { | 	for i, cond := range updatedNode.Status.Conditions { | ||||||
| 		if cond.LastHeartbeatTime.IsZero() { | 		assert.False(t, cond.LastHeartbeatTime.IsZero(), "LastHeartbeatTime for %v condition is zero", cond.Type) | ||||||
| 			t.Errorf("unexpected zero last probe timestamp for %v condition", cond.Type) | 		assert.False(t, cond.LastTransitionTime.IsZero(), "LastTransitionTime for %v condition  is zero", cond.Type) | ||||||
| 		} |  | ||||||
| 		if cond.LastTransitionTime.IsZero() { |  | ||||||
| 			t.Errorf("unexpected zero last transition timestamp for %v condition", cond.Type) |  | ||||||
| 		} |  | ||||||
| 		updatedNode.Status.Conditions[i].LastHeartbeatTime = metav1.Time{} | 		updatedNode.Status.Conditions[i].LastHeartbeatTime = metav1.Time{} | ||||||
| 		updatedNode.Status.Conditions[i].LastTransitionTime = metav1.Time{} | 		updatedNode.Status.Conditions[i].LastTransitionTime = metav1.Time{} | ||||||
| 		if cond.Type == v1.NodeOutOfDisk { | 		if cond.Type == v1.NodeOutOfDisk { | ||||||
| 			oodCondition = updatedNode.Status.Conditions[i] | 			oodCondition = updatedNode.Status.Conditions[i] | ||||||
| 		} | 		} | ||||||
| 	} | 	} | ||||||
|  | 	assert.EqualValues(t, expectedNodeOutOfDiskCondition, oodCondition) | ||||||
| 	if !reflect.DeepEqual(expectedNodeOutOfDiskCondition, oodCondition) { |  | ||||||
| 		t.Errorf("unexpected objects: %s", diff.ObjectDiff(expectedNodeOutOfDiskCondition, oodCondition)) |  | ||||||
| 	} |  | ||||||
| } | } | ||||||
|  |  | ||||||
| func TestUpdateExistingNodeStatus(t *testing.T) { | func TestUpdateExistingNodeStatus(t *testing.T) { | ||||||
| @@ -424,9 +391,8 @@ func TestUpdateExistingNodeStatus(t *testing.T) { | |||||||
| 	mockCadvisor.On("VersionInfo").Return(versionInfo, nil) | 	mockCadvisor.On("VersionInfo").Return(versionInfo, nil) | ||||||
|  |  | ||||||
| 	// Make kubelet report that it is out of disk space. | 	// Make kubelet report that it is out of disk space. | ||||||
| 	if err := updateDiskSpacePolicy(kubelet, mockCadvisor, 500, 500, 50, 50, 100, 100); err != nil { | 	err := updateDiskSpacePolicy(kubelet, mockCadvisor, 500, 500, 50, 50, 100, 100) | ||||||
| 		t.Fatalf("can't update disk space manager: %v", err) | 	require.NoError(t, err, "update the disk space manager") | ||||||
| 	} |  | ||||||
|  |  | ||||||
| 	expectedNode := &v1.Node{ | 	expectedNode := &v1.Node{ | ||||||
| 		ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname}, | 		ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname}, | ||||||
| @@ -508,41 +474,31 @@ func TestUpdateExistingNodeStatus(t *testing.T) { | |||||||
| 	} | 	} | ||||||
|  |  | ||||||
| 	kubelet.updateRuntimeUp() | 	kubelet.updateRuntimeUp() | ||||||
| 	if err := kubelet.updateNodeStatus(); err != nil { | 	assert.NoError(t, kubelet.updateNodeStatus()) | ||||||
| 		t.Errorf("unexpected error: %v", err) |  | ||||||
| 	} |  | ||||||
| 	actions := kubeClient.Actions() | 	actions := kubeClient.Actions() | ||||||
| 	if len(actions) != 2 { | 	assert.Len(t, actions, 2) | ||||||
| 		t.Errorf("unexpected actions: %v", actions) |  | ||||||
| 	} | 	assert.IsType(t, core.PatchActionImpl{}, actions[1]) | ||||||
| 	patchAction, ok := actions[1].(core.PatchActionImpl) | 	patchAction := actions[1].(core.PatchActionImpl) | ||||||
| 	if !ok { |  | ||||||
| 		t.Errorf("unexpected action type.  expected PatchActionImpl, got %#v", actions[1]) |  | ||||||
| 	} |  | ||||||
| 	updatedNode, err := applyNodeStatusPatch(&existingNode, patchAction.GetPatch()) | 	updatedNode, err := applyNodeStatusPatch(&existingNode, patchAction.GetPatch()) | ||||||
| 	if !ok { | 	require.NoError(t, err) | ||||||
| 		t.Fatalf("can't apply node status patch: %v", err) |  | ||||||
| 	} |  | ||||||
| 	for i, cond := range updatedNode.Status.Conditions { | 	for i, cond := range updatedNode.Status.Conditions { | ||||||
| 		// Expect LastProbeTime to be updated to Now, while LastTransitionTime to be the same. | 		old := metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC).Time | ||||||
| 		if old := metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC).Time; reflect.DeepEqual(cond.LastHeartbeatTime.Rfc3339Copy().UTC(), old) { | 		// Expect LastHearbeat to be updated to Now, while LastTransitionTime to be the same. | ||||||
| 			t.Errorf("Condition %v LastProbeTime: expected \n%v\n, got \n%v", cond.Type, metav1.Now(), old) | 		assert.NotEqual(t, old, cond.LastHeartbeatTime.Rfc3339Copy().UTC(), "LastHeartbeatTime for condition %v", cond.Type) | ||||||
| 		} | 		assert.EqualValues(t, old, cond.LastTransitionTime.Rfc3339Copy().UTC(), "LastTransitionTime for condition %v", cond.Type) | ||||||
| 		if got, want := cond.LastTransitionTime.Rfc3339Copy().UTC(), metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC).Time; !reflect.DeepEqual(got, want) { |  | ||||||
| 			t.Errorf("Condition %v LastTransitionTime: expected \n%#v\n, got \n%#v", cond.Type, want, got) |  | ||||||
| 		} |  | ||||||
| 		updatedNode.Status.Conditions[i].LastHeartbeatTime = metav1.Time{} | 		updatedNode.Status.Conditions[i].LastHeartbeatTime = metav1.Time{} | ||||||
| 		updatedNode.Status.Conditions[i].LastTransitionTime = metav1.Time{} | 		updatedNode.Status.Conditions[i].LastTransitionTime = metav1.Time{} | ||||||
| 	} | 	} | ||||||
|  |  | ||||||
| 	// Version skew workaround. See: https://github.com/kubernetes/kubernetes/issues/16961 | 	// Version skew workaround. See: https://github.com/kubernetes/kubernetes/issues/16961 | ||||||
| 	if updatedNode.Status.Conditions[len(updatedNode.Status.Conditions)-1].Type != v1.NodeReady { | 	assert.Equal(t, v1.NodeReady, updatedNode.Status.Conditions[len(updatedNode.Status.Conditions)-1].Type, | ||||||
| 		t.Errorf("unexpected node condition order. NodeReady should be last.") | 		"NodeReady should be the last condition") | ||||||
| 	} | 	assert.True(t, apiequality.Semantic.DeepEqual(expectedNode, updatedNode), "%s", diff.ObjectDiff(expectedNode, updatedNode)) | ||||||
|  |  | ||||||
| 	if !apiequality.Semantic.DeepEqual(expectedNode, updatedNode) { |  | ||||||
| 		t.Errorf("unexpected objects: %s", diff.ObjectDiff(expectedNode, updatedNode)) |  | ||||||
| 	} |  | ||||||
| } | } | ||||||
|  |  | ||||||
| func TestUpdateExistingNodeOutOfDiskStatusWithTransitionFrequency(t *testing.T) { | func TestUpdateExistingNodeOutOfDiskStatusWithTransitionFrequency(t *testing.T) { | ||||||
| @@ -670,25 +626,18 @@ func TestUpdateExistingNodeOutOfDiskStatusWithTransitionFrequency(t *testing.T) | |||||||
| 		} | 		} | ||||||
|  |  | ||||||
| 		// Make kubelet report that it has sufficient disk space | 		// Make kubelet report that it has sufficient disk space | ||||||
| 		if err := updateDiskSpacePolicy(kubelet, mockCadvisor, 500, 500, tc.rootFsAvail, tc.dockerFsAvail, 100, 100); err != nil { | 		err := updateDiskSpacePolicy(kubelet, mockCadvisor, 500, 500, tc.rootFsAvail, tc.dockerFsAvail, 100, 100) | ||||||
| 			t.Fatalf("can't update disk space manager: %v", err) | 		require.NoError(t, err, "can't update disk space manager") | ||||||
| 		} | 		assert.NoError(t, kubelet.updateNodeStatus()) | ||||||
|  |  | ||||||
| 		if err := kubelet.updateNodeStatus(); err != nil { |  | ||||||
| 			t.Errorf("unexpected error: %v", err) |  | ||||||
| 		} |  | ||||||
| 		actions := kubeClient.Actions() | 		actions := kubeClient.Actions() | ||||||
| 		if len(actions) != 2 { | 		assert.Len(t, actions, 2, "test [%d]", tcIdx) | ||||||
| 			t.Errorf("%d. unexpected actions: %v", tcIdx, actions) |  | ||||||
| 		} | 		assert.IsType(t, core.PatchActionImpl{}, actions[1]) | ||||||
| 		patchAction, ok := actions[1].(core.PatchActionImpl) | 		patchAction := actions[1].(core.PatchActionImpl) | ||||||
| 		if !ok { |  | ||||||
| 			t.Errorf("%d. unexpected action type.  expected PatchActionImpl, got %#v", tcIdx, actions[1]) |  | ||||||
| 		} |  | ||||||
| 		updatedNode, err := applyNodeStatusPatch(&existingNode, patchAction.GetPatch()) | 		updatedNode, err := applyNodeStatusPatch(&existingNode, patchAction.GetPatch()) | ||||||
| 		if err != nil { | 		require.NoError(t, err, "can't apply node status patch") | ||||||
| 			t.Fatalf("can't apply node status patch: %v", err) |  | ||||||
| 		} |  | ||||||
| 		kubeClient.ClearActions() | 		kubeClient.ClearActions() | ||||||
|  |  | ||||||
| 		var oodCondition v1.NodeCondition | 		var oodCondition v1.NodeCondition | ||||||
| @@ -697,10 +646,7 @@ func TestUpdateExistingNodeOutOfDiskStatusWithTransitionFrequency(t *testing.T) | |||||||
| 				oodCondition = updatedNode.Status.Conditions[i] | 				oodCondition = updatedNode.Status.Conditions[i] | ||||||
| 			} | 			} | ||||||
| 		} | 		} | ||||||
|  | 		assert.EqualValues(t, tc.expected, oodCondition) | ||||||
| 		if !reflect.DeepEqual(tc.expected, oodCondition) { |  | ||||||
| 			t.Errorf("%d.\nunexpected objects: %s", tcIdx, diff.ObjectDiff(tc.expected, oodCondition)) |  | ||||||
| 		} |  | ||||||
| 	} | 	} | ||||||
| } | } | ||||||
|  |  | ||||||
| @@ -737,9 +683,7 @@ func TestUpdateNodeStatusWithRuntimeStateError(t *testing.T) { | |||||||
| 	mockCadvisor.On("VersionInfo").Return(versionInfo, nil) | 	mockCadvisor.On("VersionInfo").Return(versionInfo, nil) | ||||||
|  |  | ||||||
| 	// Make kubelet report that it has sufficient disk space. | 	// Make kubelet report that it has sufficient disk space. | ||||||
| 	if err := updateDiskSpacePolicy(kubelet, mockCadvisor, 500, 500, 200, 200, 100, 100); err != nil { | 	require.NoError(t, updateDiskSpacePolicy(kubelet, mockCadvisor, 500, 500, 200, 200, 100, 100)) | ||||||
| 		t.Fatalf("can't update disk space manager: %v", err) |  | ||||||
| 	} |  | ||||||
|  |  | ||||||
| 	expectedNode := &v1.Node{ | 	expectedNode := &v1.Node{ | ||||||
| 		ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname}, | 		ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname}, | ||||||
| @@ -814,40 +758,28 @@ func TestUpdateNodeStatusWithRuntimeStateError(t *testing.T) { | |||||||
|  |  | ||||||
| 	checkNodeStatus := func(status v1.ConditionStatus, reason string) { | 	checkNodeStatus := func(status v1.ConditionStatus, reason string) { | ||||||
| 		kubeClient.ClearActions() | 		kubeClient.ClearActions() | ||||||
| 		if err := kubelet.updateNodeStatus(); err != nil { | 		assert.NoError(t, kubelet.updateNodeStatus()) | ||||||
| 			t.Errorf("unexpected error: %v", err) |  | ||||||
| 		} |  | ||||||
| 		actions := kubeClient.Actions() | 		actions := kubeClient.Actions() | ||||||
| 		if len(actions) != 2 { | 		require.Len(t, actions, 2) | ||||||
| 			t.Fatalf("unexpected actions: %v", actions) | 		require.True(t, actions[1].Matches("patch", "nodes")) | ||||||
| 		} | 		require.Equal(t, actions[1].GetSubresource(), "status") | ||||||
| 		if !actions[1].Matches("patch", "nodes") || actions[1].GetSubresource() != "status" { |  | ||||||
| 			t.Fatalf("unexpected actions: %v", actions) |  | ||||||
| 		} |  | ||||||
| 		updatedNode, err := applyNodeStatusPatch(&existingNode, actions[1].(core.PatchActionImpl).GetPatch()) | 		updatedNode, err := applyNodeStatusPatch(&existingNode, actions[1].(core.PatchActionImpl).GetPatch()) | ||||||
| 		if err != nil { | 		require.NoError(t, err, "can't apply node status patch") | ||||||
| 			t.Fatalf("can't apply node status patch: %v", err) |  | ||||||
| 		} |  | ||||||
|  |  | ||||||
| 		for i, cond := range updatedNode.Status.Conditions { | 		for i, cond := range updatedNode.Status.Conditions { | ||||||
| 			if cond.LastHeartbeatTime.IsZero() { | 			assert.False(t, cond.LastHeartbeatTime.IsZero(), "LastHeartbeatTime for %v condition is zero", cond.Type) | ||||||
| 				t.Errorf("unexpected zero last probe timestamp") | 			assert.False(t, cond.LastTransitionTime.IsZero(), "LastTransitionTime for %v condition  is zero", cond.Type) | ||||||
| 			} |  | ||||||
| 			if cond.LastTransitionTime.IsZero() { |  | ||||||
| 				t.Errorf("unexpected zero last transition timestamp") |  | ||||||
| 			} |  | ||||||
| 			updatedNode.Status.Conditions[i].LastHeartbeatTime = metav1.Time{} | 			updatedNode.Status.Conditions[i].LastHeartbeatTime = metav1.Time{} | ||||||
| 			updatedNode.Status.Conditions[i].LastTransitionTime = metav1.Time{} | 			updatedNode.Status.Conditions[i].LastTransitionTime = metav1.Time{} | ||||||
| 		} | 		} | ||||||
|  |  | ||||||
| 		// Version skew workaround. See: https://github.com/kubernetes/kubernetes/issues/16961 | 		// Version skew workaround. See: https://github.com/kubernetes/kubernetes/issues/16961 | ||||||
| 		lastIndex := len(updatedNode.Status.Conditions) - 1 | 		lastIndex := len(updatedNode.Status.Conditions) - 1 | ||||||
| 		if updatedNode.Status.Conditions[lastIndex].Type != v1.NodeReady { |  | ||||||
| 			t.Errorf("unexpected node condition order. NodeReady should be last.") | 		assert.Equal(t, v1.NodeReady, updatedNode.Status.Conditions[lastIndex].Type, "NodeReady should be the last condition") | ||||||
| 		} | 		assert.NotEmpty(t, updatedNode.Status.Conditions[lastIndex].Message) | ||||||
| 		if updatedNode.Status.Conditions[lastIndex].Message == "" { |  | ||||||
| 			t.Errorf("unexpected empty condition message") |  | ||||||
| 		} |  | ||||||
| 		updatedNode.Status.Conditions[lastIndex].Message = "" | 		updatedNode.Status.Conditions[lastIndex].Message = "" | ||||||
| 		expectedNode.Status.Conditions[lastIndex] = v1.NodeCondition{ | 		expectedNode.Status.Conditions[lastIndex] = v1.NodeCondition{ | ||||||
| 			Type:               v1.NodeReady, | 			Type:               v1.NodeReady, | ||||||
| @@ -856,9 +788,7 @@ func TestUpdateNodeStatusWithRuntimeStateError(t *testing.T) { | |||||||
| 			LastHeartbeatTime:  metav1.Time{}, | 			LastHeartbeatTime:  metav1.Time{}, | ||||||
| 			LastTransitionTime: metav1.Time{}, | 			LastTransitionTime: metav1.Time{}, | ||||||
| 		} | 		} | ||||||
| 		if !apiequality.Semantic.DeepEqual(expectedNode, updatedNode) { | 		assert.True(t, apiequality.Semantic.DeepEqual(expectedNode, updatedNode), "%s", diff.ObjectDiff(expectedNode, updatedNode)) | ||||||
| 			t.Errorf("unexpected objects: %s", diff.ObjectDiff(expectedNode, updatedNode)) |  | ||||||
| 		} |  | ||||||
| 	} | 	} | ||||||
|  |  | ||||||
| 	// TODO(random-liu): Refactor the unit test to be table driven test. | 	// TODO(random-liu): Refactor the unit test to be table driven test. | ||||||
| @@ -936,13 +866,8 @@ func TestUpdateNodeStatusError(t *testing.T) { | |||||||
| 	kubelet := testKubelet.kubelet | 	kubelet := testKubelet.kubelet | ||||||
| 	// No matching node for the kubelet | 	// No matching node for the kubelet | ||||||
| 	testKubelet.fakeKubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{}}).ReactionChain | 	testKubelet.fakeKubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{}}).ReactionChain | ||||||
|  | 	assert.Error(t, kubelet.updateNodeStatus()) | ||||||
| 	if err := kubelet.updateNodeStatus(); err == nil { | 	assert.Len(t, testKubelet.fakeKubeClient.Actions(), nodeStatusUpdateRetry) | ||||||
| 		t.Errorf("unexpected non error: %v", err) |  | ||||||
| 	} |  | ||||||
| 	if len(testKubelet.fakeKubeClient.Actions()) != nodeStatusUpdateRetry { |  | ||||||
| 		t.Errorf("unexpected actions: %v", testKubelet.fakeKubeClient.Actions()) |  | ||||||
| 	} |  | ||||||
| } | } | ||||||
|  |  | ||||||
| func TestRegisterWithApiServer(t *testing.T) { | func TestRegisterWithApiServer(t *testing.T) { | ||||||
| @@ -998,7 +923,7 @@ func TestRegisterWithApiServer(t *testing.T) { | |||||||
| 	}() | 	}() | ||||||
| 	select { | 	select { | ||||||
| 	case <-time.After(wait.ForeverTestTimeout): | 	case <-time.After(wait.ForeverTestTimeout): | ||||||
| 		t.Errorf("timed out waiting for registration") | 		assert.Fail(t, "timed out waiting for registration") | ||||||
| 	case <-done: | 	case <-done: | ||||||
| 		return | 		return | ||||||
| 	} | 	} | ||||||
| @@ -1153,43 +1078,30 @@ func TestTryRegisterWithApiServer(t *testing.T) { | |||||||
| 		}) | 		}) | ||||||
|  |  | ||||||
| 		result := kubelet.tryRegisterWithApiServer(tc.newNode) | 		result := kubelet.tryRegisterWithApiServer(tc.newNode) | ||||||
| 		if e, a := tc.expectedResult, result; e != a { | 		require.Equal(t, tc.expectedResult, result, "test [%s]", tc.name) | ||||||
| 			t.Errorf("%v: unexpected result; expected %v got %v", tc.name, e, a) |  | ||||||
| 			continue |  | ||||||
| 		} |  | ||||||
|  |  | ||||||
| 		actions := kubeClient.Actions() | 		actions := kubeClient.Actions() | ||||||
| 		if e, a := tc.expectedActions, len(actions); e != a { | 		assert.Len(t, actions, tc.expectedActions, "test [%s]", tc.name) | ||||||
| 			t.Errorf("%v: unexpected number of actions, expected %v, got %v", tc.name, e, a) |  | ||||||
| 		} |  | ||||||
|  |  | ||||||
| 		if tc.testSavedNode { | 		if tc.testSavedNode { | ||||||
| 			var savedNode *v1.Node | 			var savedNode *v1.Node | ||||||
| 			var ok bool |  | ||||||
|  |  | ||||||
| 			t.Logf("actions: %v: %+v", len(actions), actions) | 			t.Logf("actions: %v: %+v", len(actions), actions) | ||||||
| 			action := actions[tc.savedNodeIndex] | 			action := actions[tc.savedNodeIndex] | ||||||
| 			if action.GetVerb() == "create" { | 			if action.GetVerb() == "create" { | ||||||
| 				createAction := action.(core.CreateAction) | 				createAction := action.(core.CreateAction) | ||||||
| 				savedNode, ok = createAction.GetObject().(*v1.Node) | 				obj := createAction.GetObject() | ||||||
| 				if !ok { | 				require.IsType(t, &v1.Node{}, obj) | ||||||
| 					t.Errorf("%v: unexpected type; couldn't convert to *v1.Node: %+v", tc.name, createAction.GetObject()) | 				savedNode = obj.(*v1.Node) | ||||||
| 					continue |  | ||||||
| 				} |  | ||||||
| 			} else if action.GetVerb() == "patch" { | 			} else if action.GetVerb() == "patch" { | ||||||
| 				patchAction := action.(core.PatchActionImpl) | 				patchAction := action.(core.PatchActionImpl) | ||||||
| 				var err error | 				var err error | ||||||
| 				savedNode, err = applyNodeStatusPatch(tc.existingNode, patchAction.GetPatch()) | 				savedNode, err = applyNodeStatusPatch(tc.existingNode, patchAction.GetPatch()) | ||||||
| 				if err != nil { | 				require.NoError(t, err) | ||||||
| 					t.Errorf("can't apply node status patch: %v", err) |  | ||||||
| 					continue |  | ||||||
| 				} |  | ||||||
| 			} | 			} | ||||||
|  |  | ||||||
| 			actualCMAD, _ := strconv.ParseBool(savedNode.Annotations[volumehelper.ControllerManagedAttachAnnotation]) | 			actualCMAD, _ := strconv.ParseBool(savedNode.Annotations[volumehelper.ControllerManagedAttachAnnotation]) | ||||||
| 			if e, a := tc.savedNodeCMAD, actualCMAD; e != a { | 			assert.Equal(t, tc.savedNodeCMAD, actualCMAD, "test [%s]", tc.name) | ||||||
| 				t.Errorf("%v: unexpected attach-detach value on saved node; expected %v got %v", tc.name, e, a) |  | ||||||
| 			} |  | ||||||
| 		} | 		} | ||||||
| 	} | 	} | ||||||
| } | } | ||||||
|   | |||||||
| @@ -68,19 +68,13 @@ func TestListVolumesForPod(t *testing.T) { | |||||||
| 	podName := volumehelper.GetUniquePodName(pod) | 	podName := volumehelper.GetUniquePodName(pod) | ||||||
|  |  | ||||||
| 	volumesToReturn, volumeExsit := kubelet.ListVolumesForPod(types.UID(podName)) | 	volumesToReturn, volumeExsit := kubelet.ListVolumesForPod(types.UID(podName)) | ||||||
| 	if !volumeExsit { | 	assert.True(t, volumeExsit, "expected to find volumes for pod %q", podName) | ||||||
| 		t.Errorf("Expected to find volumes for pod %q, but ListVolumesForPod find no volume", podName) |  | ||||||
| 	} |  | ||||||
|  |  | ||||||
| 	outerVolumeSpecName1 := "vol1" | 	outerVolumeSpecName1 := "vol1" | ||||||
| 	if volumesToReturn[outerVolumeSpecName1] == nil { | 	assert.NotNil(t, volumesToReturn[outerVolumeSpecName1], "key %s", outerVolumeSpecName1) | ||||||
| 		t.Errorf("Value of map volumesToReturn is not expected to be nil, which key is : %s", outerVolumeSpecName1) |  | ||||||
| 	} |  | ||||||
|  |  | ||||||
| 	outerVolumeSpecName2 := "vol2" | 	outerVolumeSpecName2 := "vol2" | ||||||
| 	if volumesToReturn[outerVolumeSpecName2] == nil { | 	assert.NotNil(t, volumesToReturn[outerVolumeSpecName2], "key %s", outerVolumeSpecName2) | ||||||
| 		t.Errorf("Value of map volumesToReturn is not expected to be nil, which key is : %s", outerVolumeSpecName2) |  | ||||||
| 	} |  | ||||||
|  |  | ||||||
| } | } | ||||||
|  |  | ||||||
| @@ -154,18 +148,12 @@ func TestPodVolumesExist(t *testing.T) { | |||||||
| 	kubelet.podManager.SetPods(pods) | 	kubelet.podManager.SetPods(pods) | ||||||
| 	for _, pod := range pods { | 	for _, pod := range pods { | ||||||
| 		err := kubelet.volumeManager.WaitForAttachAndMount(pod) | 		err := kubelet.volumeManager.WaitForAttachAndMount(pod) | ||||||
| 		if err != nil { | 		assert.NoError(t, err) | ||||||
| 			t.Errorf("Expected success: %v", err) |  | ||||||
| 		} |  | ||||||
| 	} | 	} | ||||||
|  |  | ||||||
| 	for _, pod := range pods { | 	for _, pod := range pods { | ||||||
| 		podVolumesExist := kubelet.podVolumesExist(pod.UID) | 		podVolumesExist := kubelet.podVolumesExist(pod.UID) | ||||||
| 		if !podVolumesExist { | 		assert.True(t, podVolumesExist, "pod %q", pod.UID) | ||||||
| 			t.Errorf( |  | ||||||
| 				"Expected to find volumes for pod %q, but podVolumesExist returned false", |  | ||||||
| 				pod.UID) |  | ||||||
| 		} |  | ||||||
| 	} | 	} | ||||||
| } | } | ||||||
|  |  | ||||||
|   | |||||||
| @@ -19,6 +19,8 @@ package kubelet | |||||||
| import ( | import ( | ||||||
| 	"testing" | 	"testing" | ||||||
|  |  | ||||||
|  | 	"github.com/stretchr/testify/assert" | ||||||
|  |  | ||||||
| 	"k8s.io/client-go/pkg/api/v1" | 	"k8s.io/client-go/pkg/api/v1" | ||||||
| 	"k8s.io/client-go/tools/record" | 	"k8s.io/client-go/tools/record" | ||||||
| 	cadvisortest "k8s.io/kubernetes/pkg/kubelet/cadvisor/testing" | 	cadvisortest "k8s.io/kubernetes/pkg/kubelet/cadvisor/testing" | ||||||
| @@ -29,10 +31,7 @@ func TestBasic(t *testing.T) { | |||||||
| 	mockCadvisor := &cadvisortest.Fake{} | 	mockCadvisor := &cadvisortest.Fake{} | ||||||
| 	node := &v1.ObjectReference{} | 	node := &v1.ObjectReference{} | ||||||
| 	oomWatcher := NewOOMWatcher(mockCadvisor, fakeRecorder) | 	oomWatcher := NewOOMWatcher(mockCadvisor, fakeRecorder) | ||||||
| 	err := oomWatcher.Start(node) | 	assert.NoError(t, oomWatcher.Start(node)) | ||||||
| 	if err != nil { |  | ||||||
| 		t.Errorf("Should not have failed: %v", err) |  | ||||||
| 	} |  | ||||||
|  |  | ||||||
| 	// TODO: Improve this test once cadvisor exports events.EventChannel as an interface | 	// TODO: Improve this test once cadvisor exports events.EventChannel as an interface | ||||||
| 	// and thereby allow using a mock version of cadvisor. | 	// and thereby allow using a mock version of cadvisor. | ||||||
|   | |||||||
		Reference in New Issue
	
	Block a user
	 Yu-Ju Hong
					Yu-Ju Hong