mirror of
https://github.com/optim-enterprises-bv/kubernetes.git
synced 2025-12-15 20:37:39 +00:00
remove duplicated import and wrong alias name of api package
This commit is contained in:
@@ -26,7 +26,6 @@ import (
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
clientv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/kubelet/events"
|
||||
@@ -73,7 +72,7 @@ func (cm *containerManagerImpl) enforceNodeAllocatableCgroups() error {
|
||||
}
|
||||
|
||||
// Using ObjectReference for events as the node maybe not cached; refer to #42701 for detail.
|
||||
nodeRef := &clientv1.ObjectReference{
|
||||
nodeRef := &v1.ObjectReference{
|
||||
Kind: "Node",
|
||||
Name: cm.nodeInfo.Name,
|
||||
UID: types.UID(cm.nodeInfo.Name),
|
||||
|
||||
@@ -25,7 +25,6 @@ import (
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
clientv1 "k8s.io/api/core/v1"
|
||||
apiequality "k8s.io/apimachinery/pkg/api/equality"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
@@ -88,7 +87,7 @@ func CreatePodUpdate(op kubetypes.PodOperation, source string, pods ...*v1.Pod)
|
||||
|
||||
func createPodConfigTester(mode PodConfigNotificationMode) (chan<- interface{}, <-chan kubetypes.PodUpdate, *PodConfig) {
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
config := NewPodConfig(mode, eventBroadcaster.NewRecorder(scheme.Scheme, clientv1.EventSource{Component: "kubelet"}))
|
||||
config := NewPodConfig(mode, eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "kubelet"}))
|
||||
channel := config.Channel(TestSource)
|
||||
ch := config.Updates()
|
||||
return channel, ch, config
|
||||
|
||||
@@ -25,7 +25,6 @@ import (
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
clientv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
@@ -68,7 +67,7 @@ type managerImpl struct {
|
||||
// captures when a node condition was last observed based on a threshold being met
|
||||
nodeConditionsLastObservedAt nodeConditionsObservedAt
|
||||
// nodeRef is a reference to the node
|
||||
nodeRef *clientv1.ObjectReference
|
||||
nodeRef *v1.ObjectReference
|
||||
// used to record events about the node
|
||||
recorder record.EventRecorder
|
||||
// used to measure usage stats on system
|
||||
@@ -100,7 +99,7 @@ func NewManager(
|
||||
imageGC ImageGC,
|
||||
containerGC ContainerGC,
|
||||
recorder record.EventRecorder,
|
||||
nodeRef *clientv1.ObjectReference,
|
||||
nodeRef *v1.ObjectReference,
|
||||
clock clock.Clock) (Manager, lifecycle.PodAdmitHandler) {
|
||||
manager := &managerImpl{
|
||||
clock: clock,
|
||||
|
||||
@@ -21,7 +21,6 @@ import (
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
clientv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
@@ -202,7 +201,7 @@ func TestMemoryPressure(t *testing.T) {
|
||||
diskInfoProvider := &mockDiskInfoProvider{dedicatedImageFs: false}
|
||||
nodeProvider := newMockNodeProvider(v1.ResourceList{v1.ResourceMemory: *quantityMustParse("2Gi")})
|
||||
imageGC := &mockDiskGC{imageBytesFreed: int64(0), err: nil}
|
||||
nodeRef := &clientv1.ObjectReference{Kind: "Node", Name: "test", UID: types.UID("test"), Namespace: ""}
|
||||
nodeRef := &v1.ObjectReference{Kind: "Node", Name: "test", UID: types.UID("test"), Namespace: ""}
|
||||
|
||||
config := Config{
|
||||
MaxPodGracePeriodSeconds: 5,
|
||||
@@ -420,7 +419,7 @@ func TestDiskPressureNodeFs(t *testing.T) {
|
||||
diskInfoProvider := &mockDiskInfoProvider{dedicatedImageFs: false}
|
||||
nodeProvider := newMockNodeProvider(v1.ResourceList{v1.ResourceMemory: *quantityMustParse("2Gi")})
|
||||
diskGC := &mockDiskGC{imageBytesFreed: int64(0), err: nil}
|
||||
nodeRef := &clientv1.ObjectReference{Kind: "Node", Name: "test", UID: types.UID("test"), Namespace: ""}
|
||||
nodeRef := &v1.ObjectReference{Kind: "Node", Name: "test", UID: types.UID("test"), Namespace: ""}
|
||||
|
||||
config := Config{
|
||||
MaxPodGracePeriodSeconds: 5,
|
||||
@@ -619,7 +618,7 @@ func TestMinReclaim(t *testing.T) {
|
||||
diskInfoProvider := &mockDiskInfoProvider{dedicatedImageFs: false}
|
||||
nodeProvider := newMockNodeProvider(v1.ResourceList{v1.ResourceMemory: *quantityMustParse("2Gi")})
|
||||
diskGC := &mockDiskGC{imageBytesFreed: int64(0), err: nil}
|
||||
nodeRef := &clientv1.ObjectReference{Kind: "Node", Name: "test", UID: types.UID("test"), Namespace: ""}
|
||||
nodeRef := &v1.ObjectReference{Kind: "Node", Name: "test", UID: types.UID("test"), Namespace: ""}
|
||||
|
||||
config := Config{
|
||||
MaxPodGracePeriodSeconds: 5,
|
||||
@@ -760,7 +759,7 @@ func TestNodeReclaimFuncs(t *testing.T) {
|
||||
nodeProvider := newMockNodeProvider(v1.ResourceList{v1.ResourceMemory: *quantityMustParse("2Gi")})
|
||||
imageGcFree := resource.MustParse("700Mi")
|
||||
diskGC := &mockDiskGC{imageBytesFreed: imageGcFree.Value(), err: nil}
|
||||
nodeRef := &clientv1.ObjectReference{Kind: "Node", Name: "test", UID: types.UID("test"), Namespace: ""}
|
||||
nodeRef := &v1.ObjectReference{Kind: "Node", Name: "test", UID: types.UID("test"), Namespace: ""}
|
||||
|
||||
config := Config{
|
||||
MaxPodGracePeriodSeconds: 5,
|
||||
@@ -957,7 +956,7 @@ func TestInodePressureNodeFsInodes(t *testing.T) {
|
||||
diskInfoProvider := &mockDiskInfoProvider{dedicatedImageFs: false}
|
||||
nodeProvider := newMockNodeProvider(v1.ResourceList{v1.ResourceMemory: *quantityMustParse("2Gi")})
|
||||
diskGC := &mockDiskGC{imageBytesFreed: int64(0), err: nil}
|
||||
nodeRef := &clientv1.ObjectReference{Kind: "Node", Name: "test", UID: types.UID("test"), Namespace: ""}
|
||||
nodeRef := &v1.ObjectReference{Kind: "Node", Name: "test", UID: types.UID("test"), Namespace: ""}
|
||||
|
||||
config := Config{
|
||||
MaxPodGracePeriodSeconds: 5,
|
||||
@@ -1159,7 +1158,7 @@ func TestCriticalPodsAreNotEvicted(t *testing.T) {
|
||||
diskInfoProvider := &mockDiskInfoProvider{dedicatedImageFs: false}
|
||||
nodeProvider := newMockNodeProvider(v1.ResourceList{v1.ResourceMemory: *quantityMustParse("2Gi")})
|
||||
diskGC := &mockDiskGC{imageBytesFreed: int64(0), err: nil}
|
||||
nodeRef := &clientv1.ObjectReference{
|
||||
nodeRef := &v1.ObjectReference{
|
||||
Kind: "Node", Name: "test", UID: types.UID("test"), Namespace: "",
|
||||
}
|
||||
|
||||
@@ -1292,7 +1291,7 @@ func TestAllocatableMemoryPressure(t *testing.T) {
|
||||
diskInfoProvider := &mockDiskInfoProvider{dedicatedImageFs: false}
|
||||
nodeProvider := newMockNodeProvider(v1.ResourceList{v1.ResourceMemory: *quantityMustParse("2Gi")})
|
||||
diskGC := &mockDiskGC{imageBytesFreed: int64(0), err: nil}
|
||||
nodeRef := &clientv1.ObjectReference{Kind: "Node", Name: "test", UID: types.UID("test"), Namespace: ""}
|
||||
nodeRef := &v1.ObjectReference{Kind: "Node", Name: "test", UID: types.UID("test"), Namespace: ""}
|
||||
|
||||
config := Config{
|
||||
MaxPodGracePeriodSeconds: 5,
|
||||
|
||||
@@ -25,7 +25,6 @@ import (
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/api/core/v1"
|
||||
clientv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/util/errors"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
@@ -86,7 +85,7 @@ type realImageGCManager struct {
|
||||
recorder record.EventRecorder
|
||||
|
||||
// Reference to this node.
|
||||
nodeRef *clientv1.ObjectReference
|
||||
nodeRef *v1.ObjectReference
|
||||
|
||||
// Track initialization
|
||||
initialized bool
|
||||
@@ -129,7 +128,7 @@ type imageRecord struct {
|
||||
size int64
|
||||
}
|
||||
|
||||
func NewImageGCManager(runtime container.Runtime, cadvisorInterface cadvisor.Interface, recorder record.EventRecorder, nodeRef *clientv1.ObjectReference, policy ImageGCPolicy) (ImageGCManager, error) {
|
||||
func NewImageGCManager(runtime container.Runtime, cadvisorInterface cadvisor.Interface, recorder record.EventRecorder, nodeRef *v1.ObjectReference, policy ImageGCPolicy) (ImageGCManager, error) {
|
||||
// Validate policy.
|
||||
if policy.HighThresholdPercent < 0 || policy.HighThresholdPercent > 100 {
|
||||
return nil, fmt.Errorf("invalid HighThresholdPercent %d, must be in range [0-100]", policy.HighThresholdPercent)
|
||||
|
||||
@@ -37,7 +37,6 @@ import (
|
||||
cadvisorapi "github.com/google/cadvisor/info/v1"
|
||||
cadvisorapiv2 "github.com/google/cadvisor/info/v2"
|
||||
"k8s.io/api/core/v1"
|
||||
clientv1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
@@ -409,7 +408,7 @@ func NewMainKubelet(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *Dep
|
||||
// TODO: get the real node object of ourself,
|
||||
// and use the real node name and UID.
|
||||
// TODO: what is namespace for node?
|
||||
nodeRef := &clientv1.ObjectReference{
|
||||
nodeRef := &v1.ObjectReference{
|
||||
Kind: "Node",
|
||||
Name: string(nodeName),
|
||||
UID: types.UID(nodeName),
|
||||
@@ -956,7 +955,7 @@ type Kubelet struct {
|
||||
// Indicates that the node initialization happens in an external cloud controller
|
||||
externalCloudProvider bool
|
||||
// Reference to this node.
|
||||
nodeRef *clientv1.ObjectReference
|
||||
nodeRef *v1.ObjectReference
|
||||
|
||||
// Container runtime.
|
||||
containerRuntime kubecontainer.Runtime
|
||||
|
||||
@@ -29,7 +29,6 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"k8s.io/api/core/v1"
|
||||
clientv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
@@ -233,7 +232,7 @@ func newTestKubeletWithImageList(
|
||||
kubelet.livenessManager = proberesults.NewManager()
|
||||
|
||||
kubelet.containerManager = cm.NewStubContainerManager()
|
||||
fakeNodeRef := &clientv1.ObjectReference{
|
||||
fakeNodeRef := &v1.ObjectReference{
|
||||
Kind: "Node",
|
||||
Name: testKubeletHostname,
|
||||
UID: types.UID(testKubeletHostname),
|
||||
@@ -263,7 +262,7 @@ func newTestKubeletWithImageList(
|
||||
// TODO: Factor out "StatsProvider" from Kubelet so we don't have a cyclic dependency
|
||||
volumeStatsAggPeriod := time.Second * 10
|
||||
kubelet.resourceAnalyzer = stats.NewResourceAnalyzer(kubelet, volumeStatsAggPeriod, kubelet.containerRuntime)
|
||||
nodeRef := &clientv1.ObjectReference{
|
||||
nodeRef := &v1.ObjectReference{
|
||||
Kind: "Node",
|
||||
Name: string(kubelet.nodeName),
|
||||
UID: types.UID(kubelet.nodeName),
|
||||
|
||||
@@ -24,7 +24,6 @@ import (
|
||||
cadvisorapi "github.com/google/cadvisor/info/v1"
|
||||
cadvisorapiv2 "github.com/google/cadvisor/info/v2"
|
||||
"k8s.io/api/core/v1"
|
||||
clientv1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
@@ -117,7 +116,7 @@ func TestRunOnce(t *testing.T) {
|
||||
// TODO: Factor out "StatsProvider" from Kubelet so we don't have a cyclic dependency
|
||||
volumeStatsAggPeriod := time.Second * 10
|
||||
kb.resourceAnalyzer = stats.NewResourceAnalyzer(kb, volumeStatsAggPeriod, kb.containerRuntime)
|
||||
nodeRef := &clientv1.ObjectReference{
|
||||
nodeRef := &v1.ObjectReference{
|
||||
Kind: "Node",
|
||||
Name: string(kb.nodeName),
|
||||
UID: types.UID(kb.nodeName),
|
||||
|
||||
Reference in New Issue
Block a user