mirror of
https://github.com/optim-enterprises-bv/kubernetes.git
synced 2025-11-03 19:58:17 +00:00
remove duplicated import and wrong alias name of api package
This commit is contained in:
@@ -23,7 +23,6 @@ import (
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
clientv1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
@@ -80,7 +79,7 @@ func NewCloudNodeController(
|
||||
nodeStatusUpdateFrequency time.Duration) *CloudNodeController {
|
||||
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, clientv1.EventSource{Component: "cloudcontrollermanager"})
|
||||
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "cloudcontrollermanager"})
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
if kubeClient != nil {
|
||||
glog.V(0).Infof("Sending events to api server.")
|
||||
|
||||
@@ -26,7 +26,6 @@ import (
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
|
||||
clientv1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
@@ -110,7 +109,7 @@ func TestNodeDeleted(t *testing.T) {
|
||||
nodeInformer: factory.Core().V1().Nodes(),
|
||||
cloud: &fakecloud.FakeCloud{Err: cloudprovider.InstanceNotFound},
|
||||
nodeMonitorPeriod: 1 * time.Second,
|
||||
recorder: eventBroadcaster.NewRecorder(scheme.Scheme, clientv1.EventSource{Component: "cloud-controller-manager"}),
|
||||
recorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "cloud-controller-manager"}),
|
||||
nodeStatusUpdateFrequency: 1 * time.Second,
|
||||
}
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
@@ -190,7 +189,7 @@ func TestNodeInitialized(t *testing.T) {
|
||||
nodeInformer: factory.Core().V1().Nodes(),
|
||||
cloud: fakeCloud,
|
||||
nodeMonitorPeriod: 1 * time.Second,
|
||||
recorder: eventBroadcaster.NewRecorder(scheme.Scheme, clientv1.EventSource{Component: "cloud-controller-manager"}),
|
||||
recorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "cloud-controller-manager"}),
|
||||
nodeStatusUpdateFrequency: 1 * time.Second,
|
||||
}
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
@@ -261,7 +260,7 @@ func TestNodeIgnored(t *testing.T) {
|
||||
nodeInformer: factory.Core().V1().Nodes(),
|
||||
cloud: fakeCloud,
|
||||
nodeMonitorPeriod: 5 * time.Second,
|
||||
recorder: eventBroadcaster.NewRecorder(scheme.Scheme, clientv1.EventSource{Component: "cloud-controller-manager"}),
|
||||
recorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "cloud-controller-manager"}),
|
||||
}
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
|
||||
@@ -338,7 +337,7 @@ func TestGCECondition(t *testing.T) {
|
||||
nodeInformer: factory.Core().V1().Nodes(),
|
||||
cloud: fakeCloud,
|
||||
nodeMonitorPeriod: 1 * time.Second,
|
||||
recorder: eventBroadcaster.NewRecorder(scheme.Scheme, clientv1.EventSource{Component: "cloud-controller-manager"}),
|
||||
recorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "cloud-controller-manager"}),
|
||||
}
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
|
||||
@@ -434,7 +433,7 @@ func TestZoneInitialized(t *testing.T) {
|
||||
nodeInformer: factory.Core().V1().Nodes(),
|
||||
cloud: fakeCloud,
|
||||
nodeMonitorPeriod: 5 * time.Second,
|
||||
recorder: eventBroadcaster.NewRecorder(scheme.Scheme, clientv1.EventSource{Component: "cloud-controller-manager"}),
|
||||
recorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "cloud-controller-manager"}),
|
||||
}
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
|
||||
@@ -531,7 +530,7 @@ func TestNodeAddresses(t *testing.T) {
|
||||
cloud: fakeCloud,
|
||||
nodeMonitorPeriod: 5 * time.Second,
|
||||
nodeStatusUpdateFrequency: 1 * time.Second,
|
||||
recorder: eventBroadcaster.NewRecorder(scheme.Scheme, clientv1.EventSource{Component: "cloud-controller-manager"}),
|
||||
recorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "cloud-controller-manager"}),
|
||||
}
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
|
||||
@@ -650,7 +649,7 @@ func TestNodeProvidedIPAddresses(t *testing.T) {
|
||||
cloud: fakeCloud,
|
||||
nodeMonitorPeriod: 5 * time.Second,
|
||||
nodeStatusUpdateFrequency: 1 * time.Second,
|
||||
recorder: eventBroadcaster.NewRecorder(scheme.Scheme, clientv1.EventSource{Component: "cloud-controller-manager"}),
|
||||
recorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "cloud-controller-manager"}),
|
||||
}
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
|
||||
|
||||
@@ -38,7 +38,6 @@ import (
|
||||
batchv1 "k8s.io/api/batch/v1"
|
||||
batchv2alpha1 "k8s.io/api/batch/v2alpha1"
|
||||
"k8s.io/api/core/v1"
|
||||
clientv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
@@ -82,7 +81,7 @@ func NewCronJobController(kubeClient clientset.Interface) *CronJobController {
|
||||
jobControl: realJobControl{KubeClient: kubeClient},
|
||||
sjControl: &realSJControl{KubeClient: kubeClient},
|
||||
podControl: &realPodControl{KubeClient: kubeClient},
|
||||
recorder: eventBroadcaster.NewRecorder(scheme.Scheme, clientv1.EventSource{Component: "cronjob-controller"}),
|
||||
recorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "cronjob-controller"}),
|
||||
}
|
||||
|
||||
return jm
|
||||
|
||||
@@ -25,7 +25,6 @@ import (
|
||||
|
||||
apps "k8s.io/api/apps/v1beta1"
|
||||
"k8s.io/api/core/v1"
|
||||
clientv1 "k8s.io/api/core/v1"
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -134,10 +133,10 @@ func NewDaemonSetsController(daemonSetInformer extensionsinformers.DaemonSetInfo
|
||||
}
|
||||
dsc := &DaemonSetsController{
|
||||
kubeClient: kubeClient,
|
||||
eventRecorder: eventBroadcaster.NewRecorder(scheme.Scheme, clientv1.EventSource{Component: "daemonset-controller"}),
|
||||
eventRecorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "daemonset-controller"}),
|
||||
podControl: controller.RealPodControl{
|
||||
KubeClient: kubeClient,
|
||||
Recorder: eventBroadcaster.NewRecorder(scheme.Scheme, clientv1.EventSource{Component: "daemon-set"}),
|
||||
Recorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "daemon-set"}),
|
||||
},
|
||||
crControl: controller.RealControllerRevisionControl{
|
||||
KubeClient: kubeClient,
|
||||
|
||||
@@ -28,7 +28,6 @@ import (
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
clientv1 "k8s.io/api/core/v1"
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -109,7 +108,7 @@ func NewDeploymentController(dInformer extensionsinformers.DeploymentInformer, r
|
||||
}
|
||||
dc := &DeploymentController{
|
||||
client: client,
|
||||
eventRecorder: eventBroadcaster.NewRecorder(scheme.Scheme, clientv1.EventSource{Component: "deployment-controller"}),
|
||||
eventRecorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "deployment-controller"}),
|
||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "deployment"),
|
||||
}
|
||||
dc.rsControl = controller.RealRSControl{
|
||||
|
||||
@@ -23,7 +23,6 @@ import (
|
||||
|
||||
apps "k8s.io/api/apps/v1beta1"
|
||||
"k8s.io/api/core/v1"
|
||||
clientv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/api/extensions/v1beta1"
|
||||
policy "k8s.io/api/policy/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
@@ -125,7 +124,7 @@ func NewDisruptionController(
|
||||
recheckQueue: workqueue.NewNamedDelayingQueue("disruption-recheck"),
|
||||
broadcaster: record.NewBroadcaster(),
|
||||
}
|
||||
dc.recorder = dc.broadcaster.NewRecorder(scheme.Scheme, clientv1.EventSource{Component: "controllermanager"})
|
||||
dc.recorder = dc.broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "controllermanager"})
|
||||
|
||||
dc.getUpdater = func() updater { return dc.writePdbStatus }
|
||||
|
||||
|
||||
@@ -25,7 +25,6 @@ import (
|
||||
|
||||
batch "k8s.io/api/batch/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
clientv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
@@ -93,11 +92,11 @@ func NewJobController(podInformer coreinformers.PodInformer, jobInformer batchin
|
||||
kubeClient: kubeClient,
|
||||
podControl: controller.RealPodControl{
|
||||
KubeClient: kubeClient,
|
||||
Recorder: eventBroadcaster.NewRecorder(scheme.Scheme, clientv1.EventSource{Component: "job-controller"}),
|
||||
Recorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "job-controller"}),
|
||||
},
|
||||
expectations: controller.NewControllerExpectations(),
|
||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "job"),
|
||||
recorder: eventBroadcaster.NewRecorder(scheme.Scheme, clientv1.EventSource{Component: "job-controller"}),
|
||||
recorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "job-controller"}),
|
||||
}
|
||||
|
||||
jobInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
|
||||
@@ -25,7 +25,6 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
|
||||
clientv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/client-go/tools/record"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
@@ -66,7 +65,7 @@ func NewCloudCIDRAllocator(
|
||||
cloud: gceCloud,
|
||||
recorder: record.NewBroadcaster().NewRecorder(
|
||||
api.Scheme,
|
||||
clientv1.EventSource{Component: "cidrAllocator"}),
|
||||
v1.EventSource{Component: "cidrAllocator"}),
|
||||
}
|
||||
|
||||
glog.V(0).Infof("Using cloud CIDR allocator (provider: %v)", cloud.ProviderName())
|
||||
|
||||
@@ -27,7 +27,6 @@ import (
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
|
||||
clientv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/record"
|
||||
@@ -266,7 +265,7 @@ func nodeExistsInCloudProvider(cloud cloudprovider.Interface, nodeName types.Nod
|
||||
}
|
||||
|
||||
func recordNodeEvent(recorder record.EventRecorder, nodeName, nodeUID, eventtype, reason, event string) {
|
||||
ref := &clientv1.ObjectReference{
|
||||
ref := &v1.ObjectReference{
|
||||
Kind: "Node",
|
||||
Name: nodeName,
|
||||
UID: types.UID(nodeUID),
|
||||
@@ -277,7 +276,7 @@ func recordNodeEvent(recorder record.EventRecorder, nodeName, nodeUID, eventtype
|
||||
}
|
||||
|
||||
func recordNodeStatusChange(recorder record.EventRecorder, node *v1.Node, new_status string) {
|
||||
ref := &clientv1.ObjectReference{
|
||||
ref := &v1.ObjectReference{
|
||||
Kind: "Node",
|
||||
Name: node.Name,
|
||||
UID: node.UID,
|
||||
|
||||
@@ -32,7 +32,6 @@ import (
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
|
||||
clientv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
@@ -218,7 +217,7 @@ func NewNodeController(
|
||||
runTaintManager bool,
|
||||
useTaintBasedEvictions bool) (*NodeController, error) {
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, clientv1.EventSource{Component: "controllermanager"})
|
||||
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "controllermanager"})
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
if kubeClient != nil {
|
||||
glog.V(0).Infof("Sending events to api server.")
|
||||
|
||||
@@ -22,7 +22,6 @@ import (
|
||||
"sync"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
clientv1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
@@ -64,7 +63,7 @@ type rangeAllocator struct {
|
||||
// can initialize its CIDR map. NodeList is only nil in testing.
|
||||
func NewCIDRRangeAllocator(client clientset.Interface, clusterCIDR *net.IPNet, serviceCIDR *net.IPNet, subNetMaskSize int, nodeList *v1.NodeList) (CIDRAllocator, error) {
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, clientv1.EventSource{Component: "cidrAllocator"})
|
||||
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "cidrAllocator"})
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
if client != nil {
|
||||
glog.V(0).Infof("Sending events to api server.")
|
||||
|
||||
@@ -31,7 +31,6 @@ import (
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
|
||||
clientv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
"k8s.io/client-go/tools/record"
|
||||
@@ -153,7 +152,7 @@ func getMinTolerationTime(tolerations []v1.Toleration) time.Duration {
|
||||
// communicate with the API server.
|
||||
func NewNoExecuteTaintManager(c clientset.Interface) *NoExecuteTaintManager {
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, clientv1.EventSource{Component: "controllermanager"})
|
||||
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "controllermanager"})
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
if c != nil {
|
||||
glog.V(0).Infof("Sending events to api server.")
|
||||
@@ -417,22 +416,22 @@ func (tc *NoExecuteTaintManager) emitPodDeletionEvent(nsName types.NamespacedNam
|
||||
if tc.recorder == nil {
|
||||
return
|
||||
}
|
||||
ref := &clientv1.ObjectReference{
|
||||
ref := &v1.ObjectReference{
|
||||
Kind: "Pod",
|
||||
Name: nsName.Name,
|
||||
Namespace: nsName.Namespace,
|
||||
}
|
||||
tc.recorder.Eventf(ref, clientv1.EventTypeNormal, "TaintManagerEviction", "Marking for deletion Pod %s", nsName.String())
|
||||
tc.recorder.Eventf(ref, v1.EventTypeNormal, "TaintManagerEviction", "Marking for deletion Pod %s", nsName.String())
|
||||
}
|
||||
|
||||
func (tc *NoExecuteTaintManager) emitCancelPodDeletionEvent(nsName types.NamespacedName) {
|
||||
if tc.recorder == nil {
|
||||
return
|
||||
}
|
||||
ref := &clientv1.ObjectReference{
|
||||
ref := &v1.ObjectReference{
|
||||
Kind: "Pod",
|
||||
Name: nsName.Name,
|
||||
Namespace: nsName.Namespace,
|
||||
}
|
||||
tc.recorder.Eventf(ref, clientv1.EventTypeNormal, "TaintManagerEviction", "Cancelling deletion of Pod %s", nsName.String())
|
||||
tc.recorder.Eventf(ref, v1.EventTypeNormal, "TaintManagerEviction", "Cancelling deletion of Pod %s", nsName.String())
|
||||
}
|
||||
|
||||
@@ -32,7 +32,6 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/strategicpatch"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
|
||||
clientv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
ref "k8s.io/client-go/tools/reference"
|
||||
|
||||
@@ -342,8 +341,8 @@ func (m *FakeNodeHandler) Patch(name string, pt types.PatchType, data []byte, su
|
||||
// FakeRecorder is used as a fake during testing.
|
||||
type FakeRecorder struct {
|
||||
sync.Mutex
|
||||
source clientv1.EventSource
|
||||
Events []*clientv1.Event
|
||||
source v1.EventSource
|
||||
Events []*v1.Event
|
||||
clock clock.Clock
|
||||
}
|
||||
|
||||
@@ -376,14 +375,14 @@ func (f *FakeRecorder) generateEvent(obj runtime.Object, timestamp metav1.Time,
|
||||
}
|
||||
}
|
||||
|
||||
func (f *FakeRecorder) makeEvent(ref *clientv1.ObjectReference, eventtype, reason, message string) *clientv1.Event {
|
||||
func (f *FakeRecorder) makeEvent(ref *v1.ObjectReference, eventtype, reason, message string) *v1.Event {
|
||||
t := metav1.Time{Time: f.clock.Now()}
|
||||
namespace := ref.Namespace
|
||||
if namespace == "" {
|
||||
namespace = metav1.NamespaceDefault
|
||||
}
|
||||
|
||||
clientref := clientv1.ObjectReference{
|
||||
clientref := v1.ObjectReference{
|
||||
Kind: ref.Kind,
|
||||
Namespace: ref.Namespace,
|
||||
Name: ref.Name,
|
||||
@@ -393,7 +392,7 @@ func (f *FakeRecorder) makeEvent(ref *clientv1.ObjectReference, eventtype, reaso
|
||||
FieldPath: ref.FieldPath,
|
||||
}
|
||||
|
||||
return &clientv1.Event{
|
||||
return &v1.Event{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("%v.%x", ref.Name, t.UnixNano()),
|
||||
Namespace: namespace,
|
||||
@@ -411,8 +410,8 @@ func (f *FakeRecorder) makeEvent(ref *clientv1.ObjectReference, eventtype, reaso
|
||||
// NewFakeRecorder returns a pointer to a newly constructed FakeRecorder.
|
||||
func NewFakeRecorder() *FakeRecorder {
|
||||
return &FakeRecorder{
|
||||
source: clientv1.EventSource{Component: "nodeControllerTest"},
|
||||
Events: []*clientv1.Event{},
|
||||
source: v1.EventSource{Component: "nodeControllerTest"},
|
||||
Events: []*v1.Event{},
|
||||
clock: clock.NewFakeClock(time.Now()),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -25,7 +25,6 @@ import (
|
||||
autoscalingv1 "k8s.io/api/autoscaling/v1"
|
||||
autoscalingv2 "k8s.io/api/autoscaling/v2alpha1"
|
||||
"k8s.io/api/core/v1"
|
||||
clientv1 "k8s.io/api/core/v1"
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
apiequality "k8s.io/apimachinery/pkg/api/equality"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
@@ -113,7 +112,7 @@ func NewHorizontalController(
|
||||
broadcaster := record.NewBroadcaster()
|
||||
// TODO: remove the wrapper when every clients have moved to use the clientset.
|
||||
broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: evtNamespacer.Events("")})
|
||||
recorder := broadcaster.NewRecorder(scheme.Scheme, clientv1.EventSource{Component: "horizontal-pod-autoscaler"})
|
||||
recorder := broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "horizontal-pod-autoscaler"})
|
||||
|
||||
hpaController := &HorizontalController{
|
||||
replicaCalc: replicaCalc,
|
||||
|
||||
@@ -27,7 +27,6 @@ import (
|
||||
autoscalingv1 "k8s.io/api/autoscaling/v1"
|
||||
autoscalingv2 "k8s.io/api/autoscaling/v2alpha1"
|
||||
"k8s.io/api/core/v1"
|
||||
clientv1 "k8s.io/api/core/v1"
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -435,11 +434,11 @@ func (tc *testCase) prepareTestClient(t *testing.T) (*fake.Clientset, *metricsfa
|
||||
Containers: []metricsapi.ContainerMetrics{
|
||||
{
|
||||
Name: "container",
|
||||
Usage: clientv1.ResourceList{
|
||||
clientv1.ResourceCPU: *resource.NewMilliQuantity(
|
||||
Usage: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(
|
||||
int64(cpu),
|
||||
resource.DecimalSI),
|
||||
clientv1.ResourceMemory: *resource.NewQuantity(
|
||||
v1.ResourceMemory: *resource.NewQuantity(
|
||||
int64(1024*1024),
|
||||
resource.BinarySI),
|
||||
},
|
||||
@@ -471,7 +470,7 @@ func (tc *testCase) prepareTestClient(t *testing.T) (*fake.Clientset, *metricsfa
|
||||
|
||||
for i, level := range tc.reportedLevels {
|
||||
podMetric := cmapi.MetricValue{
|
||||
DescribedObject: clientv1.ObjectReference{
|
||||
DescribedObject: v1.ObjectReference{
|
||||
Kind: "Pod",
|
||||
Name: fmt.Sprintf("%s-%d", podNamePrefix, i),
|
||||
Namespace: namespace,
|
||||
@@ -509,7 +508,7 @@ func (tc *testCase) prepareTestClient(t *testing.T) (*fake.Clientset, *metricsfa
|
||||
|
||||
metrics.Items = []cmapi.MetricValue{
|
||||
{
|
||||
DescribedObject: clientv1.ObjectReference{
|
||||
DescribedObject: v1.ObjectReference{
|
||||
Kind: matchedTarget.Object.Target.Kind,
|
||||
APIVersion: matchedTarget.Object.Target.APIVersion,
|
||||
Name: name,
|
||||
@@ -559,7 +558,7 @@ func (tc *testCase) setupController(t *testing.T) (*HorizontalController, inform
|
||||
tc.Lock()
|
||||
defer tc.Unlock()
|
||||
|
||||
obj := action.(core.CreateAction).GetObject().(*clientv1.Event)
|
||||
obj := action.(core.CreateAction).GetObject().(*v1.Event)
|
||||
if tc.verifyEvents {
|
||||
switch obj.Reason {
|
||||
case "SuccessfulRescale":
|
||||
|
||||
@@ -30,7 +30,6 @@ import (
|
||||
autoscalingv1 "k8s.io/api/autoscaling/v1"
|
||||
autoscalingv2 "k8s.io/api/autoscaling/v2alpha1"
|
||||
"k8s.io/api/core/v1"
|
||||
clientv1 "k8s.io/api/core/v1"
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -332,11 +331,11 @@ func (tc *legacyTestCase) prepareTestClient(t *testing.T) *fake.Clientset {
|
||||
Containers: []metricsapi.ContainerMetrics{
|
||||
{
|
||||
Name: "container",
|
||||
Usage: clientv1.ResourceList{
|
||||
clientv1.ResourceCPU: *resource.NewMilliQuantity(
|
||||
Usage: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(
|
||||
int64(cpu),
|
||||
resource.DecimalSI),
|
||||
clientv1.ResourceMemory: *resource.NewQuantity(
|
||||
v1.ResourceMemory: *resource.NewQuantity(
|
||||
int64(1024*1024),
|
||||
resource.BinarySI),
|
||||
},
|
||||
@@ -464,7 +463,7 @@ func (tc *legacyTestCase) runTest(t *testing.T) {
|
||||
tc.Lock()
|
||||
defer tc.Unlock()
|
||||
|
||||
obj := action.(core.CreateAction).GetObject().(*clientv1.Event)
|
||||
obj := action.(core.CreateAction).GetObject().(*v1.Event)
|
||||
if tc.verifyEvents {
|
||||
switch obj.Reason {
|
||||
case "SuccessfulRescale":
|
||||
|
||||
@@ -26,7 +26,6 @@ import (
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
clientv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
@@ -127,8 +126,8 @@ func (tc *legacyReplicaCalcTestCase) prepareTestClient(t *testing.T) *fake.Clien
|
||||
for i := 0; i < numContainersPerPod; i++ {
|
||||
podMetric.Containers[i] = metricsapi.ContainerMetrics{
|
||||
Name: fmt.Sprintf("container%v", i),
|
||||
Usage: clientv1.ResourceList{
|
||||
clientv1.ResourceName(tc.resource.name): *resource.NewMilliQuantity(
|
||||
Usage: v1.ResourceList{
|
||||
v1.ResourceName(tc.resource.name): *resource.NewMilliQuantity(
|
||||
int64(resValue),
|
||||
resource.DecimalSI),
|
||||
},
|
||||
|
||||
@@ -24,7 +24,6 @@ import (
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
clientv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
@@ -115,11 +114,11 @@ func (tc *testCase) prepareTestClient(t *testing.T) *fake.Clientset {
|
||||
for j, cpu := range containers {
|
||||
cm := metricsapi.ContainerMetrics{
|
||||
Name: fmt.Sprintf("%s-%d-container-%d", podNamePrefix, i, j),
|
||||
Usage: clientv1.ResourceList{
|
||||
clientv1.ResourceCPU: *resource.NewMilliQuantity(
|
||||
Usage: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(
|
||||
cpu,
|
||||
resource.DecimalSI),
|
||||
clientv1.ResourceMemory: *resource.NewQuantity(
|
||||
v1.ResourceMemory: *resource.NewQuantity(
|
||||
int64(1024*1024),
|
||||
resource.BinarySI),
|
||||
},
|
||||
|
||||
@@ -24,7 +24,6 @@ import (
|
||||
|
||||
autoscaling "k8s.io/api/autoscaling/v2alpha1"
|
||||
"k8s.io/api/core/v1"
|
||||
clientv1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
@@ -72,7 +71,7 @@ func (c *resourceMetricsClient) GetResourceMetric(resource v1.ResourceName, name
|
||||
podSum := int64(0)
|
||||
missing := len(m.Containers) == 0
|
||||
for _, c := range m.Containers {
|
||||
resValue, found := c.Usage[clientv1.ResourceName(resource)]
|
||||
resValue, found := c.Usage[v1.ResourceName(resource)]
|
||||
if !found {
|
||||
missing = true
|
||||
glog.V(2).Infof("missing resource metric %v for container %s in pod %s/%s", resource, c.Name, namespace, m.Name)
|
||||
|
||||
@@ -24,7 +24,6 @@ import (
|
||||
|
||||
autoscalingv2 "k8s.io/api/autoscaling/v2alpha1"
|
||||
"k8s.io/api/core/v1"
|
||||
clientv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
@@ -158,8 +157,8 @@ func (tc *replicaCalcTestCase) prepareTestClient(t *testing.T) (*fake.Clientset,
|
||||
for i := 0; i < numContainersPerPod; i++ {
|
||||
podMetric.Containers[i] = metricsapi.ContainerMetrics{
|
||||
Name: fmt.Sprintf("container%v", i),
|
||||
Usage: clientv1.ResourceList{
|
||||
clientv1.ResourceName(tc.resource.name): *resource.NewMilliQuantity(
|
||||
Usage: v1.ResourceList{
|
||||
v1.ResourceName(tc.resource.name): *resource.NewMilliQuantity(
|
||||
int64(resValue),
|
||||
resource.DecimalSI),
|
||||
},
|
||||
@@ -194,7 +193,7 @@ func (tc *replicaCalcTestCase) prepareTestClient(t *testing.T) (*fake.Clientset,
|
||||
|
||||
for i, level := range tc.metric.levels {
|
||||
podMetric := cmapi.MetricValue{
|
||||
DescribedObject: clientv1.ObjectReference{
|
||||
DescribedObject: v1.ObjectReference{
|
||||
Kind: "Pod",
|
||||
Name: fmt.Sprintf("%s-%d", podNamePrefix, i),
|
||||
Namespace: testNamespace,
|
||||
@@ -224,7 +223,7 @@ func (tc *replicaCalcTestCase) prepareTestClient(t *testing.T) (*fake.Clientset,
|
||||
|
||||
metrics.Items = []cmapi.MetricValue{
|
||||
{
|
||||
DescribedObject: clientv1.ObjectReference{
|
||||
DescribedObject: v1.ObjectReference{
|
||||
Kind: tc.metric.singleObject.Kind,
|
||||
APIVersion: tc.metric.singleObject.APIVersion,
|
||||
Name: name,
|
||||
|
||||
@@ -27,7 +27,6 @@ import (
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/api/core/v1"
|
||||
clientv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/api/extensions/v1beta1"
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
@@ -106,7 +105,7 @@ func NewReplicaSetController(rsInformer extensionsinformers.ReplicaSetInformer,
|
||||
kubeClient: kubeClient,
|
||||
podControl: controller.RealPodControl{
|
||||
KubeClient: kubeClient,
|
||||
Recorder: eventBroadcaster.NewRecorder(scheme.Scheme, clientv1.EventSource{Component: "replicaset-controller"}),
|
||||
Recorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "replicaset-controller"}),
|
||||
},
|
||||
burstReplicas: burstReplicas,
|
||||
expectations: controller.NewUIDTrackingControllerExpectations(controller.NewControllerExpectations()),
|
||||
|
||||
@@ -27,7 +27,6 @@ import (
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/api/core/v1"
|
||||
clientv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
@@ -102,7 +101,7 @@ func NewReplicationManager(podInformer coreinformers.PodInformer, rcInformer cor
|
||||
kubeClient: kubeClient,
|
||||
podControl: controller.RealPodControl{
|
||||
KubeClient: kubeClient,
|
||||
Recorder: eventBroadcaster.NewRecorder(scheme.Scheme, clientv1.EventSource{Component: "replication-controller"}),
|
||||
Recorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "replication-controller"}),
|
||||
},
|
||||
burstReplicas: burstReplicas,
|
||||
expectations: controller.NewUIDTrackingControllerExpectations(controller.NewControllerExpectations()),
|
||||
|
||||
@@ -26,7 +26,6 @@ import (
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/api/core/v1"
|
||||
clientv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
@@ -107,7 +106,7 @@ func New(
|
||||
) (*ServiceController, error) {
|
||||
broadcaster := record.NewBroadcaster()
|
||||
broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(kubeClient.Core().RESTClient()).Events("")})
|
||||
recorder := broadcaster.NewRecorder(scheme.Scheme, clientv1.EventSource{Component: "service-controller"})
|
||||
recorder := broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "service-controller"})
|
||||
|
||||
if kubeClient != nil && kubeClient.Core().RESTClient().GetRateLimiter() != nil {
|
||||
metrics.RegisterMetricAndTrackRateLimiterUsage("service_controller", kubeClient.Core().RESTClient().GetRateLimiter())
|
||||
|
||||
@@ -23,7 +23,6 @@ import (
|
||||
|
||||
apps "k8s.io/api/apps/v1beta1"
|
||||
"k8s.io/api/core/v1"
|
||||
clientv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
@@ -88,7 +87,7 @@ func NewStatefulSetController(
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(kubeClient.Core().RESTClient()).Events("")})
|
||||
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, clientv1.EventSource{Component: "statefulset"})
|
||||
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "statefulset"})
|
||||
|
||||
ssc := &StatefulSetController{
|
||||
kubeClient: kubeClient,
|
||||
|
||||
@@ -25,7 +25,6 @@ import (
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/api/core/v1"
|
||||
clientv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/runtime"
|
||||
@@ -135,7 +134,7 @@ func NewAttachDetachController(
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(kubeClient.Core().RESTClient()).Events("")})
|
||||
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, clientv1.EventSource{Component: "attachdetach"})
|
||||
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "attachdetach"})
|
||||
|
||||
adc.desiredStateOfWorld = cache.NewDesiredStateOfWorld(&adc.volumePluginMgr)
|
||||
adc.actualStateOfWorld = cache.NewActualStateOfWorld(&adc.volumePluginMgr)
|
||||
|
||||
@@ -22,7 +22,6 @@ import (
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
clientv1 "k8s.io/api/core/v1"
|
||||
storage "k8s.io/api/storage/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
@@ -72,7 +71,7 @@ func NewController(p ControllerParameters) (*PersistentVolumeController, error)
|
||||
if eventRecorder == nil {
|
||||
broadcaster := record.NewBroadcaster()
|
||||
broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(p.KubeClient.Core().RESTClient()).Events("")})
|
||||
eventRecorder = broadcaster.NewRecorder(scheme.Scheme, clientv1.EventSource{Component: "persistentvolume-controller"})
|
||||
eventRecorder = broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "persistentvolume-controller"})
|
||||
}
|
||||
|
||||
controller := &PersistentVolumeController{
|
||||
|
||||
Reference in New Issue
Block a user