mirror of
https://github.com/optim-enterprises-bv/kubernetes.git
synced 2025-11-01 18:58:18 +00:00
Introduce new kubelet volume manager
This commit adds a new volume manager in kubelet that synchronizes volume mount/unmount (and attach/detach, if attach/detach controller is not enabled). This eliminates the race conditions between the pod creation loop and the orphaned volumes loops. It also removes the unmount/detach from the `syncPod()` path so volume clean up never blocks the `syncPod` loop.
This commit is contained in:
@@ -16767,7 +16767,7 @@
|
|||||||
"items": {
|
"items": {
|
||||||
"$ref": "v1.UniqueVolumeName"
|
"$ref": "v1.UniqueVolumeName"
|
||||||
},
|
},
|
||||||
"description": "List of attachable volume devices in use (mounted) by the node."
|
"description": "List of attachable volumes in use (mounted) by the node."
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -2766,6 +2766,10 @@ Populated by the system when a graceful deletion is requested. Read-only. More i
|
|||||||
</tbody>
|
</tbody>
|
||||||
</table>
|
</table>
|
||||||
|
|
||||||
|
</div>
|
||||||
|
<div class="sect2">
|
||||||
|
<h3 id="_v1_uniquevolumename">v1.UniqueVolumeName</h3>
|
||||||
|
|
||||||
</div>
|
</div>
|
||||||
<div class="sect2">
|
<div class="sect2">
|
||||||
<h3 id="_unversioned_labelselector">unversioned.LabelSelector</h3>
|
<h3 id="_unversioned_labelselector">unversioned.LabelSelector</h3>
|
||||||
@@ -2806,9 +2810,7 @@ Populated by the system when a graceful deletion is requested. Read-only. More i
|
|||||||
</tr>
|
</tr>
|
||||||
</tbody>
|
</tbody>
|
||||||
</table>
|
</table>
|
||||||
</div>
|
|
||||||
<div class="sect2">
|
|
||||||
<h3 id="_v1_uniquevolumename">v1.UniqueVolumeName</h3>
|
|
||||||
</div>
|
</div>
|
||||||
<div class="sect2">
|
<div class="sect2">
|
||||||
<h3 id="_v1_endpointsubset">v1.EndpointSubset</h3>
|
<h3 id="_v1_endpointsubset">v1.EndpointSubset</h3>
|
||||||
@@ -4755,7 +4757,7 @@ The resulting set of endpoints can be viewed as:<br>
|
|||||||
</tr>
|
</tr>
|
||||||
<tr>
|
<tr>
|
||||||
<td class="tableblock halign-left valign-top"><p class="tableblock">volumesInUse</p></td>
|
<td class="tableblock halign-left valign-top"><p class="tableblock">volumesInUse</p></td>
|
||||||
<td class="tableblock halign-left valign-top"><p class="tableblock">List of attachable volume devices in use (mounted) by the node.</p></td>
|
<td class="tableblock halign-left valign-top"><p class="tableblock">List of attachable volumes in use (mounted) by the node.</p></td>
|
||||||
<td class="tableblock halign-left valign-top"><p class="tableblock">false</p></td>
|
<td class="tableblock halign-left valign-top"><p class="tableblock">false</p></td>
|
||||||
<td class="tableblock halign-left valign-top"><p class="tableblock"><a href="#_v1_uniquevolumename">v1.UniqueVolumeName</a> array</p></td>
|
<td class="tableblock halign-left valign-top"><p class="tableblock"><a href="#_v1_uniquevolumename">v1.UniqueVolumeName</a> array</p></td>
|
||||||
<td class="tableblock halign-left valign-top"></td>
|
<td class="tableblock halign-left valign-top"></td>
|
||||||
@@ -8103,7 +8105,7 @@ The resulting set of endpoints can be viewed as:<br>
|
|||||||
</div>
|
</div>
|
||||||
<div id="footer">
|
<div id="footer">
|
||||||
<div id="footer-text">
|
<div id="footer-text">
|
||||||
Last updated 2016-06-06 17:05:06 UTC
|
Last updated 2016-06-08 04:10:38 UTC
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
</body>
|
</body>
|
||||||
|
|||||||
@@ -1304,7 +1304,7 @@ message NodeStatus {
|
|||||||
// List of container images on this node
|
// List of container images on this node
|
||||||
repeated ContainerImage images = 8;
|
repeated ContainerImage images = 8;
|
||||||
|
|
||||||
// List of attachable volume devices in use (mounted) by the node.
|
// List of attachable volumes in use (mounted) by the node.
|
||||||
repeated string volumesInUse = 9;
|
repeated string volumesInUse = 9;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -2386,7 +2386,7 @@ type NodeStatus struct {
|
|||||||
NodeInfo NodeSystemInfo `json:"nodeInfo,omitempty" protobuf:"bytes,7,opt,name=nodeInfo"`
|
NodeInfo NodeSystemInfo `json:"nodeInfo,omitempty" protobuf:"bytes,7,opt,name=nodeInfo"`
|
||||||
// List of container images on this node
|
// List of container images on this node
|
||||||
Images []ContainerImage `json:"images,omitempty" protobuf:"bytes,8,rep,name=images"`
|
Images []ContainerImage `json:"images,omitempty" protobuf:"bytes,8,rep,name=images"`
|
||||||
// List of attachable volume devices in use (mounted) by the node.
|
// List of attachable volumes in use (mounted) by the node.
|
||||||
VolumesInUse []UniqueVolumeName `json:"volumesInUse,omitempty" protobuf:"bytes,9,rep,name=volumesInUse"`
|
VolumesInUse []UniqueVolumeName `json:"volumesInUse,omitempty" protobuf:"bytes,9,rep,name=volumesInUse"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -880,7 +880,7 @@ var map_NodeStatus = map[string]string{
|
|||||||
"daemonEndpoints": "Endpoints of daemons running on the Node.",
|
"daemonEndpoints": "Endpoints of daemons running on the Node.",
|
||||||
"nodeInfo": "Set of ids/uuids to uniquely identify the node. More info: http://releases.k8s.io/HEAD/docs/admin/node.md#node-info",
|
"nodeInfo": "Set of ids/uuids to uniquely identify the node. More info: http://releases.k8s.io/HEAD/docs/admin/node.md#node-info",
|
||||||
"images": "List of container images on this node",
|
"images": "List of container images on this node",
|
||||||
"volumesInUse": "List of attachable volume devices in use (mounted) by the node.",
|
"volumesInUse": "List of attachable volumes in use (mounted) by the node.",
|
||||||
}
|
}
|
||||||
|
|
||||||
func (NodeStatus) SwaggerDoc() map[string]string {
|
func (NodeStatus) SwaggerDoc() map[string]string {
|
||||||
|
|||||||
@@ -980,14 +980,22 @@ func (plugin *mockVolumePlugin) Init(host vol.VolumeHost) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (plugin *mockVolumePlugin) Name() string {
|
func (plugin *mockVolumePlugin) GetPluginName() string {
|
||||||
return mockPluginName
|
return mockPluginName
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (plugin *mockVolumePlugin) GetVolumeName(spec *vol.Spec) (string, error) {
|
||||||
|
return spec.Name(), nil
|
||||||
|
}
|
||||||
|
|
||||||
func (plugin *mockVolumePlugin) CanSupport(spec *vol.Spec) bool {
|
func (plugin *mockVolumePlugin) CanSupport(spec *vol.Spec) bool {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (plugin *mockVolumePlugin) RequiresRemount() bool {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
func (plugin *mockVolumePlugin) NewMounter(spec *vol.Spec, podRef *api.Pod, opts vol.VolumeOptions) (vol.Mounter, error) {
|
func (plugin *mockVolumePlugin) NewMounter(spec *vol.Spec, podRef *api.Pod, opts vol.VolumeOptions) (vol.Mounter, error) {
|
||||||
return nil, fmt.Errorf("Mounter is not supported by this plugin")
|
return nil, fmt.Errorf("Mounter is not supported by this plugin")
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -18,6 +18,7 @@ package persistentvolume
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"net"
|
||||||
|
|
||||||
"k8s.io/kubernetes/pkg/api"
|
"k8s.io/kubernetes/pkg/api"
|
||||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||||
@@ -71,3 +72,11 @@ func (ctrl *PersistentVolumeController) GetWriter() io.Writer {
|
|||||||
func (ctrl *PersistentVolumeController) GetHostName() string {
|
func (ctrl *PersistentVolumeController) GetHostName() string {
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (ctrl *PersistentVolumeController) GetHostIP() (net.IP, error) {
|
||||||
|
return nil, fmt.Errorf("PersistentVolumeController.GetHostIP() is not implemented")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ctrl *PersistentVolumeController) GetRootContext() string {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|||||||
@@ -20,6 +20,7 @@ package volume
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"net"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/golang/glog"
|
"github.com/golang/glog"
|
||||||
@@ -27,7 +28,6 @@ import (
|
|||||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||||
"k8s.io/kubernetes/pkg/controller/framework"
|
"k8s.io/kubernetes/pkg/controller/framework"
|
||||||
"k8s.io/kubernetes/pkg/controller/volume/attacherdetacher"
|
|
||||||
"k8s.io/kubernetes/pkg/controller/volume/cache"
|
"k8s.io/kubernetes/pkg/controller/volume/cache"
|
||||||
"k8s.io/kubernetes/pkg/controller/volume/reconciler"
|
"k8s.io/kubernetes/pkg/controller/volume/reconciler"
|
||||||
"k8s.io/kubernetes/pkg/types"
|
"k8s.io/kubernetes/pkg/types"
|
||||||
@@ -35,11 +35,12 @@ import (
|
|||||||
"k8s.io/kubernetes/pkg/util/mount"
|
"k8s.io/kubernetes/pkg/util/mount"
|
||||||
"k8s.io/kubernetes/pkg/util/runtime"
|
"k8s.io/kubernetes/pkg/util/runtime"
|
||||||
"k8s.io/kubernetes/pkg/volume"
|
"k8s.io/kubernetes/pkg/volume"
|
||||||
|
"k8s.io/kubernetes/pkg/volume/util/operationexecutor"
|
||||||
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
|
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
// loopPeriod is the ammount of time the reconciler loop waits between
|
// loopPeriod is the amount of time the reconciler loop waits between
|
||||||
// successive executions
|
// successive executions
|
||||||
reconcilerLoopPeriod time.Duration = 100 * time.Millisecond
|
reconcilerLoopPeriod time.Duration = 100 * time.Millisecond
|
||||||
|
|
||||||
@@ -103,7 +104,8 @@ func NewAttachDetachController(
|
|||||||
|
|
||||||
adc.desiredStateOfWorld = cache.NewDesiredStateOfWorld(&adc.volumePluginMgr)
|
adc.desiredStateOfWorld = cache.NewDesiredStateOfWorld(&adc.volumePluginMgr)
|
||||||
adc.actualStateOfWorld = cache.NewActualStateOfWorld(&adc.volumePluginMgr)
|
adc.actualStateOfWorld = cache.NewActualStateOfWorld(&adc.volumePluginMgr)
|
||||||
adc.attacherDetacher = attacherdetacher.NewAttacherDetacher(&adc.volumePluginMgr)
|
adc.attacherDetacher =
|
||||||
|
operationexecutor.NewOperationExecutor(&adc.volumePluginMgr)
|
||||||
adc.reconciler = reconciler.NewReconciler(
|
adc.reconciler = reconciler.NewReconciler(
|
||||||
reconcilerLoopPeriod,
|
reconcilerLoopPeriod,
|
||||||
reconcilerMaxWaitForUnmountDuration,
|
reconcilerMaxWaitForUnmountDuration,
|
||||||
@@ -152,7 +154,7 @@ type attachDetachController struct {
|
|||||||
actualStateOfWorld cache.ActualStateOfWorld
|
actualStateOfWorld cache.ActualStateOfWorld
|
||||||
|
|
||||||
// attacherDetacher is used to start asynchronous attach and operations
|
// attacherDetacher is used to start asynchronous attach and operations
|
||||||
attacherDetacher attacherdetacher.AttacherDetacher
|
attacherDetacher operationexecutor.OperationExecutor
|
||||||
|
|
||||||
// reconciler is used to run an asynchronous periodic loop to reconcile the
|
// reconciler is used to run an asynchronous periodic loop to reconcile the
|
||||||
// desiredStateOfWorld with the actualStateOfWorld by triggering attach
|
// desiredStateOfWorld with the actualStateOfWorld by triggering attach
|
||||||
@@ -205,7 +207,7 @@ func (adc *attachDetachController) nodeAdd(obj interface{}) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
nodeName := node.Name
|
nodeName := node.Name
|
||||||
if _, exists := node.Annotations[volumehelper.ControllerManagedAnnotation]; exists {
|
if _, exists := node.Annotations[volumehelper.ControllerManagedAttachAnnotation]; exists {
|
||||||
// Node specifies annotation indicating it should be managed by attach
|
// Node specifies annotation indicating it should be managed by attach
|
||||||
// detach controller. Add it to desired state of world.
|
// detach controller. Add it to desired state of world.
|
||||||
adc.desiredStateOfWorld.AddNode(nodeName)
|
adc.desiredStateOfWorld.AddNode(nodeName)
|
||||||
@@ -284,7 +286,7 @@ func (adc *attachDetachController) processPodVolumes(
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
uniquePodName := getUniquePodName(pod)
|
uniquePodName := volumehelper.GetUniquePodName(pod)
|
||||||
if addVolumes {
|
if addVolumes {
|
||||||
// Add volume to desired state of world
|
// Add volume to desired state of world
|
||||||
_, err := adc.desiredStateOfWorld.AddPod(
|
_, err := adc.desiredStateOfWorld.AddPod(
|
||||||
@@ -304,7 +306,7 @@ func (adc *attachDetachController) processPodVolumes(
|
|||||||
attachableVolumePlugin, volumeSpec)
|
attachableVolumePlugin, volumeSpec)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.V(10).Infof(
|
glog.V(10).Infof(
|
||||||
"Failed to delete volume %q for pod %q/%q from desiredStateOfWorld. GenerateUniqueVolumeName failed with %v",
|
"Failed to delete volume %q for pod %q/%q from desiredStateOfWorld. GetUniqueVolumeNameFromSpec failed with %v",
|
||||||
podVolume.Name,
|
podVolume.Name,
|
||||||
pod.Namespace,
|
pod.Namespace,
|
||||||
pod.Name,
|
pod.Name,
|
||||||
@@ -502,11 +504,6 @@ func (adc *attachDetachController) processVolumesInUse(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// getUniquePodName returns a unique name to reference pod by in memory caches
|
|
||||||
func getUniquePodName(pod *api.Pod) types.UniquePodName {
|
|
||||||
return types.NamespacedName{Namespace: pod.Namespace, Name: pod.Name}.UniquePodName()
|
|
||||||
}
|
|
||||||
|
|
||||||
// VolumeHost implementation
|
// VolumeHost implementation
|
||||||
// This is an unfortunate requirement of the current factoring of volume plugin
|
// This is an unfortunate requirement of the current factoring of volume plugin
|
||||||
// initializing code. It requires kubelet specific methods used by the mounting
|
// initializing code. It requires kubelet specific methods used by the mounting
|
||||||
@@ -552,3 +549,11 @@ func (adc *attachDetachController) GetWriter() io.Writer {
|
|||||||
func (adc *attachDetachController) GetHostName() string {
|
func (adc *attachDetachController) GetHostName() string {
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (adc *attachDetachController) GetHostIP() (net.IP, error) {
|
||||||
|
return nil, fmt.Errorf("GetHostIP() not supported by Attach/Detach controller's VolumeHost implementation")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (adc *attachDetachController) GetRootContext() string {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,195 +0,0 @@
|
|||||||
/*
|
|
||||||
Copyright 2016 The Kubernetes Authors All rights reserved.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
// Package attacherdetacher implements interfaces that enable triggering attach
|
|
||||||
// and detach operations on volumes.
|
|
||||||
package attacherdetacher
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"github.com/golang/glog"
|
|
||||||
|
|
||||||
"k8s.io/kubernetes/pkg/controller/volume/cache"
|
|
||||||
"k8s.io/kubernetes/pkg/util/goroutinemap"
|
|
||||||
"k8s.io/kubernetes/pkg/volume"
|
|
||||||
)
|
|
||||||
|
|
||||||
// AttacherDetacher defines a set of operations for attaching or detaching a
|
|
||||||
// volume from a node.
|
|
||||||
type AttacherDetacher interface {
|
|
||||||
// Spawns a new goroutine to execute volume-specific logic to attach the
|
|
||||||
// volume to the node specified in the volumeToAttach.
|
|
||||||
// Once attachment completes successfully, the actualStateOfWorld is updated
|
|
||||||
// to indicate the volume is attached to the node.
|
|
||||||
// If there is an error indicating the volume is already attached to the
|
|
||||||
// specified node, attachment is assumed to be successful (plugins are
|
|
||||||
// responsible for implmenting this behavior).
|
|
||||||
// All other errors are logged and the goroutine terminates without updating
|
|
||||||
// actualStateOfWorld (caller is responsible for retrying as needed).
|
|
||||||
AttachVolume(volumeToAttach cache.VolumeToAttach, actualStateOfWorld cache.ActualStateOfWorld) error
|
|
||||||
|
|
||||||
// Spawns a new goroutine to execute volume-specific logic to detach the
|
|
||||||
// volume from the node specified in volumeToDetach.
|
|
||||||
// Once detachment completes successfully, the actualStateOfWorld is updated
|
|
||||||
// to remove the volume/node combo.
|
|
||||||
// If there is an error indicating the volume is already detached from the
|
|
||||||
// specified node, detachment is assumed to be successful (plugins are
|
|
||||||
// responsible for implmenting this behavior).
|
|
||||||
// All other errors are logged and the goroutine terminates without updating
|
|
||||||
// actualStateOfWorld (caller is responsible for retrying as needed).
|
|
||||||
DetachVolume(volumeToDetach cache.AttachedVolume, actualStateOfWorld cache.ActualStateOfWorld) error
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewAttacherDetacher returns a new instance of AttacherDetacher.
|
|
||||||
func NewAttacherDetacher(volumePluginMgr *volume.VolumePluginMgr) AttacherDetacher {
|
|
||||||
return &attacherDetacher{
|
|
||||||
volumePluginMgr: volumePluginMgr,
|
|
||||||
pendingOperations: goroutinemap.NewGoRoutineMap(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type attacherDetacher struct {
|
|
||||||
// volumePluginMgr is the volume plugin manager used to create volume
|
|
||||||
// plugin objects.
|
|
||||||
volumePluginMgr *volume.VolumePluginMgr
|
|
||||||
// pendingOperations keeps track of pending attach and detach operations so
|
|
||||||
// multiple operations are not started on the same volume
|
|
||||||
pendingOperations goroutinemap.GoRoutineMap
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ad *attacherDetacher) AttachVolume(
|
|
||||||
volumeToAttach cache.VolumeToAttach,
|
|
||||||
actualStateOfWorld cache.ActualStateOfWorld) error {
|
|
||||||
attachFunc, err := ad.generateAttachVolumeFunc(volumeToAttach, actualStateOfWorld)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return ad.pendingOperations.Run(string(volumeToAttach.VolumeName), attachFunc)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ad *attacherDetacher) DetachVolume(
|
|
||||||
volumeToDetach cache.AttachedVolume,
|
|
||||||
actualStateOfWorld cache.ActualStateOfWorld) error {
|
|
||||||
detachFunc, err := ad.generateDetachVolumeFunc(volumeToDetach, actualStateOfWorld)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return ad.pendingOperations.Run(string(volumeToDetach.VolumeName), detachFunc)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ad *attacherDetacher) generateAttachVolumeFunc(
|
|
||||||
volumeToAttach cache.VolumeToAttach,
|
|
||||||
actualStateOfWorld cache.ActualStateOfWorld) (func() error, error) {
|
|
||||||
// Get attacher plugin
|
|
||||||
attachableVolumePlugin, err := ad.volumePluginMgr.FindAttachablePluginBySpec(volumeToAttach.VolumeSpec)
|
|
||||||
if err != nil || attachableVolumePlugin == nil {
|
|
||||||
return nil, fmt.Errorf(
|
|
||||||
"failed to get AttachablePlugin from volumeSpec for volume %q err=%v",
|
|
||||||
volumeToAttach.VolumeSpec.Name(),
|
|
||||||
err)
|
|
||||||
}
|
|
||||||
|
|
||||||
volumeAttacher, newAttacherErr := attachableVolumePlugin.NewAttacher()
|
|
||||||
if newAttacherErr != nil {
|
|
||||||
return nil, fmt.Errorf(
|
|
||||||
"failed to get NewAttacher from volumeSpec for volume %q err=%v",
|
|
||||||
volumeToAttach.VolumeSpec.Name(),
|
|
||||||
newAttacherErr)
|
|
||||||
}
|
|
||||||
|
|
||||||
return func() error {
|
|
||||||
// Execute attach
|
|
||||||
attachErr := volumeAttacher.Attach(volumeToAttach.VolumeSpec, volumeToAttach.NodeName)
|
|
||||||
|
|
||||||
if attachErr != nil {
|
|
||||||
// On failure, just log and exit. The controller will retry
|
|
||||||
glog.Errorf(
|
|
||||||
"Attach operation for device %q to node %q failed with: %v",
|
|
||||||
volumeToAttach.VolumeName, volumeToAttach.NodeName, attachErr)
|
|
||||||
return attachErr
|
|
||||||
}
|
|
||||||
|
|
||||||
glog.Infof(
|
|
||||||
"Successfully attached device %q to node %q. Will update actual state of world.",
|
|
||||||
volumeToAttach.VolumeName, volumeToAttach.NodeName)
|
|
||||||
|
|
||||||
// Update actual state of world
|
|
||||||
_, addVolumeNodeErr := actualStateOfWorld.AddVolumeNode(volumeToAttach.VolumeSpec, volumeToAttach.NodeName)
|
|
||||||
if addVolumeNodeErr != nil {
|
|
||||||
// On failure, just log and exit. The controller will retry
|
|
||||||
glog.Errorf(
|
|
||||||
"Attach operation for device %q to node %q succeeded, but updating actualStateOfWorld failed with: %v",
|
|
||||||
volumeToAttach.VolumeName, volumeToAttach.NodeName, addVolumeNodeErr)
|
|
||||||
return addVolumeNodeErr
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ad *attacherDetacher) generateDetachVolumeFunc(
|
|
||||||
volumeToDetach cache.AttachedVolume,
|
|
||||||
actualStateOfWorld cache.ActualStateOfWorld) (func() error, error) {
|
|
||||||
// Get attacher plugin
|
|
||||||
attachableVolumePlugin, err := ad.volumePluginMgr.FindAttachablePluginBySpec(volumeToDetach.VolumeSpec)
|
|
||||||
if err != nil || attachableVolumePlugin == nil {
|
|
||||||
return nil, fmt.Errorf(
|
|
||||||
"failed to get AttachablePlugin from volumeSpec for volume %q err=%v",
|
|
||||||
volumeToDetach.VolumeSpec.Name(),
|
|
||||||
err)
|
|
||||||
}
|
|
||||||
|
|
||||||
deviceName, err := attachableVolumePlugin.GetVolumeName(volumeToDetach.VolumeSpec)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf(
|
|
||||||
"failed to GetDeviceName from AttachablePlugin for volumeSpec %q err=%v",
|
|
||||||
volumeToDetach.VolumeSpec.Name(),
|
|
||||||
err)
|
|
||||||
}
|
|
||||||
|
|
||||||
volumeDetacher, err := attachableVolumePlugin.NewDetacher()
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf(
|
|
||||||
"failed to get NewDetacher from volumeSpec for volume %q err=%v",
|
|
||||||
volumeToDetach.VolumeSpec.Name(),
|
|
||||||
err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return func() error {
|
|
||||||
// Execute detach
|
|
||||||
detachErr := volumeDetacher.Detach(deviceName, volumeToDetach.NodeName)
|
|
||||||
|
|
||||||
if detachErr != nil {
|
|
||||||
// On failure, just log and exit. The controller will retry
|
|
||||||
glog.Errorf(
|
|
||||||
"Detach operation for device %q from node %q failed with: %v",
|
|
||||||
volumeToDetach.VolumeName, volumeToDetach.NodeName, detachErr)
|
|
||||||
return detachErr
|
|
||||||
}
|
|
||||||
|
|
||||||
glog.Infof(
|
|
||||||
"Successfully detached device %q from node %q. Will update actual state of world.",
|
|
||||||
volumeToDetach.VolumeName, volumeToDetach.NodeName)
|
|
||||||
|
|
||||||
// Update actual state of world
|
|
||||||
actualStateOfWorld.DeleteVolumeNode(volumeToDetach.VolumeName, volumeToDetach.NodeName)
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
@@ -28,6 +28,7 @@ import (
|
|||||||
|
|
||||||
"k8s.io/kubernetes/pkg/api"
|
"k8s.io/kubernetes/pkg/api"
|
||||||
"k8s.io/kubernetes/pkg/volume"
|
"k8s.io/kubernetes/pkg/volume"
|
||||||
|
"k8s.io/kubernetes/pkg/volume/util/operationexecutor"
|
||||||
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
|
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -35,10 +36,17 @@ import (
|
|||||||
// the attach/detach controller's actual state of the world cache.
|
// the attach/detach controller's actual state of the world cache.
|
||||||
// This cache contains volumes->nodes i.e. a set of all volumes and the nodes
|
// This cache contains volumes->nodes i.e. a set of all volumes and the nodes
|
||||||
// the attach/detach controller believes are successfully attached.
|
// the attach/detach controller believes are successfully attached.
|
||||||
|
// Note: This is distinct from the ActualStateOfWorld implemented by the kubelet
|
||||||
|
// volume manager. They both keep track of different objects. This contains
|
||||||
|
// attach/detach controller specific state.
|
||||||
type ActualStateOfWorld interface {
|
type ActualStateOfWorld interface {
|
||||||
|
// ActualStateOfWorld must implement the methods required to allow
|
||||||
|
// operationexecutor to interact with it.
|
||||||
|
operationexecutor.ActualStateOfWorldAttacherUpdater
|
||||||
|
|
||||||
// AddVolumeNode adds the given volume and node to the underlying store
|
// AddVolumeNode adds the given volume and node to the underlying store
|
||||||
// indicating the specified volume is attached to the specified node.
|
// indicating the specified volume is attached to the specified node.
|
||||||
// A unique volumeName is generated from the volumeSpec and returned on
|
// A unique volume name is generated from the volumeSpec and returned on
|
||||||
// success.
|
// success.
|
||||||
// If the volume/node combo already exists, the detachRequestedTime is reset
|
// If the volume/node combo already exists, the detachRequestedTime is reset
|
||||||
// to zero.
|
// to zero.
|
||||||
@@ -93,15 +101,7 @@ type ActualStateOfWorld interface {
|
|||||||
|
|
||||||
// AttachedVolume represents a volume that is attached to a node.
|
// AttachedVolume represents a volume that is attached to a node.
|
||||||
type AttachedVolume struct {
|
type AttachedVolume struct {
|
||||||
// VolumeName is the unique identifier for the volume that is attached.
|
operationexecutor.AttachedVolume
|
||||||
VolumeName api.UniqueVolumeName
|
|
||||||
|
|
||||||
// VolumeSpec is the volume spec containing the specification for the
|
|
||||||
// volume that is attached.
|
|
||||||
VolumeSpec *volume.Spec
|
|
||||||
|
|
||||||
// NodeName is the identifier for the node that the volume is attached to.
|
|
||||||
NodeName string
|
|
||||||
|
|
||||||
// MountedByNode indicates that this volume has been been mounted by the
|
// MountedByNode indicates that this volume has been been mounted by the
|
||||||
// node and is unsafe to detach.
|
// node and is unsafe to detach.
|
||||||
@@ -173,6 +173,17 @@ type nodeAttachedTo struct {
|
|||||||
detachRequestedTime time.Time
|
detachRequestedTime time.Time
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (asw *actualStateOfWorld) MarkVolumeAsAttached(
|
||||||
|
volumeSpec *volume.Spec, nodeName string) error {
|
||||||
|
_, err := asw.AddVolumeNode(volumeSpec, nodeName)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (asw *actualStateOfWorld) MarkVolumeAsDetached(
|
||||||
|
volumeName api.UniqueVolumeName, nodeName string) {
|
||||||
|
asw.DeleteVolumeNode(volumeName, nodeName)
|
||||||
|
}
|
||||||
|
|
||||||
func (asw *actualStateOfWorld) AddVolumeNode(
|
func (asw *actualStateOfWorld) AddVolumeNode(
|
||||||
volumeSpec *volume.Spec, nodeName string) (api.UniqueVolumeName, error) {
|
volumeSpec *volume.Spec, nodeName string) (api.UniqueVolumeName, error) {
|
||||||
asw.Lock()
|
asw.Lock()
|
||||||
@@ -330,16 +341,11 @@ func (asw *actualStateOfWorld) GetAttachedVolumes() []AttachedVolume {
|
|||||||
defer asw.RUnlock()
|
defer asw.RUnlock()
|
||||||
|
|
||||||
attachedVolumes := make([]AttachedVolume, 0 /* len */, len(asw.attachedVolumes) /* cap */)
|
attachedVolumes := make([]AttachedVolume, 0 /* len */, len(asw.attachedVolumes) /* cap */)
|
||||||
for volumeName, volumeObj := range asw.attachedVolumes {
|
for _, volumeObj := range asw.attachedVolumes {
|
||||||
for nodeName, nodeObj := range volumeObj.nodesAttachedTo {
|
for _, nodeObj := range volumeObj.nodesAttachedTo {
|
||||||
attachedVolumes = append(
|
attachedVolumes = append(
|
||||||
attachedVolumes,
|
attachedVolumes,
|
||||||
AttachedVolume{
|
getAttachedVolume(&volumeObj, &nodeObj))
|
||||||
NodeName: nodeName,
|
|
||||||
VolumeName: volumeName,
|
|
||||||
VolumeSpec: volumeObj.spec,
|
|
||||||
MountedByNode: nodeObj.mountedByNode,
|
|
||||||
DetachRequestedTime: nodeObj.detachRequestedTime})
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -353,20 +359,29 @@ func (asw *actualStateOfWorld) GetAttachedVolumesForNode(
|
|||||||
|
|
||||||
attachedVolumes := make(
|
attachedVolumes := make(
|
||||||
[]AttachedVolume, 0 /* len */, len(asw.attachedVolumes) /* cap */)
|
[]AttachedVolume, 0 /* len */, len(asw.attachedVolumes) /* cap */)
|
||||||
for volumeName, volumeObj := range asw.attachedVolumes {
|
for _, volumeObj := range asw.attachedVolumes {
|
||||||
for actualNodeName, nodeObj := range volumeObj.nodesAttachedTo {
|
for actualNodeName, nodeObj := range volumeObj.nodesAttachedTo {
|
||||||
if actualNodeName == nodeName {
|
if actualNodeName == nodeName {
|
||||||
attachedVolumes = append(
|
attachedVolumes = append(
|
||||||
attachedVolumes,
|
attachedVolumes,
|
||||||
AttachedVolume{
|
getAttachedVolume(&volumeObj, &nodeObj))
|
||||||
NodeName: nodeName,
|
|
||||||
VolumeName: volumeName,
|
|
||||||
VolumeSpec: volumeObj.spec,
|
|
||||||
MountedByNode: nodeObj.mountedByNode,
|
|
||||||
DetachRequestedTime: nodeObj.detachRequestedTime})
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return attachedVolumes
|
return attachedVolumes
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func getAttachedVolume(
|
||||||
|
attachedVolume *attachedVolume,
|
||||||
|
nodeAttachedTo *nodeAttachedTo) AttachedVolume {
|
||||||
|
return AttachedVolume{
|
||||||
|
AttachedVolume: operationexecutor.AttachedVolume{
|
||||||
|
VolumeName: attachedVolume.volumeName,
|
||||||
|
VolumeSpec: attachedVolume.spec,
|
||||||
|
NodeName: nodeAttachedTo.nodeName,
|
||||||
|
PluginIsAttachable: true,
|
||||||
|
},
|
||||||
|
MountedByNode: nodeAttachedTo.mountedByNode,
|
||||||
|
DetachRequestedTime: nodeAttachedTo.detachRequestedTime}
|
||||||
|
}
|
||||||
|
|||||||
@@ -21,13 +21,14 @@ import (
|
|||||||
|
|
||||||
"k8s.io/kubernetes/pkg/api"
|
"k8s.io/kubernetes/pkg/api"
|
||||||
controllervolumetesting "k8s.io/kubernetes/pkg/controller/volume/testing"
|
controllervolumetesting "k8s.io/kubernetes/pkg/controller/volume/testing"
|
||||||
|
volumetesting "k8s.io/kubernetes/pkg/volume/testing"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Calls AddVolumeNode() once.
|
// Calls AddVolumeNode() once.
|
||||||
// Verifies a single volume/node entry exists.
|
// Verifies a single volume/node entry exists.
|
||||||
func Test_AddVolumeNode_Positive_NewVolumeNewNode(t *testing.T) {
|
func Test_AddVolumeNode_Positive_NewVolumeNewNode(t *testing.T) {
|
||||||
// Arrange
|
// Arrange
|
||||||
volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t))
|
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
||||||
asw := NewActualStateOfWorld(volumePluginMgr)
|
asw := NewActualStateOfWorld(volumePluginMgr)
|
||||||
volumeName := api.UniqueVolumeName("volume-name")
|
volumeName := api.UniqueVolumeName("volume-name")
|
||||||
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
||||||
@@ -59,7 +60,7 @@ func Test_AddVolumeNode_Positive_NewVolumeNewNode(t *testing.T) {
|
|||||||
// Verifies two volume/node entries exist with the same volumeSpec.
|
// Verifies two volume/node entries exist with the same volumeSpec.
|
||||||
func Test_AddVolumeNode_Positive_ExistingVolumeNewNode(t *testing.T) {
|
func Test_AddVolumeNode_Positive_ExistingVolumeNewNode(t *testing.T) {
|
||||||
// Arrange
|
// Arrange
|
||||||
volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t))
|
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
||||||
asw := NewActualStateOfWorld(volumePluginMgr)
|
asw := NewActualStateOfWorld(volumePluginMgr)
|
||||||
volumeName := api.UniqueVolumeName("volume-name")
|
volumeName := api.UniqueVolumeName("volume-name")
|
||||||
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
||||||
@@ -108,7 +109,7 @@ func Test_AddVolumeNode_Positive_ExistingVolumeNewNode(t *testing.T) {
|
|||||||
// Verifies a single volume/node entry exists.
|
// Verifies a single volume/node entry exists.
|
||||||
func Test_AddVolumeNode_Positive_ExistingVolumeExistingNode(t *testing.T) {
|
func Test_AddVolumeNode_Positive_ExistingVolumeExistingNode(t *testing.T) {
|
||||||
// Arrange
|
// Arrange
|
||||||
volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t))
|
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
||||||
asw := NewActualStateOfWorld(volumePluginMgr)
|
asw := NewActualStateOfWorld(volumePluginMgr)
|
||||||
volumeName := api.UniqueVolumeName("volume-name")
|
volumeName := api.UniqueVolumeName("volume-name")
|
||||||
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
||||||
@@ -151,7 +152,7 @@ func Test_AddVolumeNode_Positive_ExistingVolumeExistingNode(t *testing.T) {
|
|||||||
// Verifies no volume/node entries exists.
|
// Verifies no volume/node entries exists.
|
||||||
func Test_DeleteVolumeNode_Positive_VolumeExistsNodeExists(t *testing.T) {
|
func Test_DeleteVolumeNode_Positive_VolumeExistsNodeExists(t *testing.T) {
|
||||||
// Arrange
|
// Arrange
|
||||||
volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t))
|
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
||||||
asw := NewActualStateOfWorld(volumePluginMgr)
|
asw := NewActualStateOfWorld(volumePluginMgr)
|
||||||
volumeName := api.UniqueVolumeName("volume-name")
|
volumeName := api.UniqueVolumeName("volume-name")
|
||||||
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
||||||
@@ -176,11 +177,11 @@ func Test_DeleteVolumeNode_Positive_VolumeExistsNodeExists(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Calls DeleteVolumeNode() to delete volume/node on empty data stcut
|
// Calls DeleteVolumeNode() to delete volume/node on empty data struct
|
||||||
// Verifies no volume/node entries exists.
|
// Verifies no volume/node entries exists.
|
||||||
func Test_DeleteVolumeNode_Positive_VolumeDoesntExistNodeDoesntExist(t *testing.T) {
|
func Test_DeleteVolumeNode_Positive_VolumeDoesntExistNodeDoesntExist(t *testing.T) {
|
||||||
// Arrange
|
// Arrange
|
||||||
volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t))
|
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
||||||
asw := NewActualStateOfWorld(volumePluginMgr)
|
asw := NewActualStateOfWorld(volumePluginMgr)
|
||||||
volumeName := api.UniqueVolumeName("volume-name")
|
volumeName := api.UniqueVolumeName("volume-name")
|
||||||
nodeName := "node-name"
|
nodeName := "node-name"
|
||||||
@@ -206,7 +207,7 @@ func Test_DeleteVolumeNode_Positive_VolumeDoesntExistNodeDoesntExist(t *testing.
|
|||||||
// Verifies only second volume/node entry exists.
|
// Verifies only second volume/node entry exists.
|
||||||
func Test_DeleteVolumeNode_Positive_TwoNodesOneDeleted(t *testing.T) {
|
func Test_DeleteVolumeNode_Positive_TwoNodesOneDeleted(t *testing.T) {
|
||||||
// Arrange
|
// Arrange
|
||||||
volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t))
|
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
||||||
asw := NewActualStateOfWorld(volumePluginMgr)
|
asw := NewActualStateOfWorld(volumePluginMgr)
|
||||||
volumeName := api.UniqueVolumeName("volume-name")
|
volumeName := api.UniqueVolumeName("volume-name")
|
||||||
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
||||||
@@ -254,7 +255,7 @@ func Test_DeleteVolumeNode_Positive_TwoNodesOneDeleted(t *testing.T) {
|
|||||||
// Verifies the populated volume/node entry exists.
|
// Verifies the populated volume/node entry exists.
|
||||||
func Test_VolumeNodeExists_Positive_VolumeExistsNodeExists(t *testing.T) {
|
func Test_VolumeNodeExists_Positive_VolumeExistsNodeExists(t *testing.T) {
|
||||||
// Arrange
|
// Arrange
|
||||||
volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t))
|
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
||||||
asw := NewActualStateOfWorld(volumePluginMgr)
|
asw := NewActualStateOfWorld(volumePluginMgr)
|
||||||
volumeName := api.UniqueVolumeName("volume-name")
|
volumeName := api.UniqueVolumeName("volume-name")
|
||||||
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
||||||
@@ -285,7 +286,7 @@ func Test_VolumeNodeExists_Positive_VolumeExistsNodeExists(t *testing.T) {
|
|||||||
// Verifies requested entry does not exist, but populated entry does.
|
// Verifies requested entry does not exist, but populated entry does.
|
||||||
func Test_VolumeNodeExists_Positive_VolumeExistsNodeDoesntExist(t *testing.T) {
|
func Test_VolumeNodeExists_Positive_VolumeExistsNodeDoesntExist(t *testing.T) {
|
||||||
// Arrange
|
// Arrange
|
||||||
volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t))
|
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
||||||
asw := NewActualStateOfWorld(volumePluginMgr)
|
asw := NewActualStateOfWorld(volumePluginMgr)
|
||||||
volumeName := api.UniqueVolumeName("volume-name")
|
volumeName := api.UniqueVolumeName("volume-name")
|
||||||
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
||||||
@@ -316,7 +317,7 @@ func Test_VolumeNodeExists_Positive_VolumeExistsNodeDoesntExist(t *testing.T) {
|
|||||||
// Verifies requested entry does not exist.
|
// Verifies requested entry does not exist.
|
||||||
func Test_VolumeNodeExists_Positive_VolumeAndNodeDontExist(t *testing.T) {
|
func Test_VolumeNodeExists_Positive_VolumeAndNodeDontExist(t *testing.T) {
|
||||||
// Arrange
|
// Arrange
|
||||||
volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t))
|
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
||||||
asw := NewActualStateOfWorld(volumePluginMgr)
|
asw := NewActualStateOfWorld(volumePluginMgr)
|
||||||
volumeName := api.UniqueVolumeName("volume-name")
|
volumeName := api.UniqueVolumeName("volume-name")
|
||||||
nodeName := "node-name"
|
nodeName := "node-name"
|
||||||
@@ -339,7 +340,7 @@ func Test_VolumeNodeExists_Positive_VolumeAndNodeDontExist(t *testing.T) {
|
|||||||
// Verifies no volume/node entries are returned.
|
// Verifies no volume/node entries are returned.
|
||||||
func Test_GetAttachedVolumes_Positive_NoVolumesOrNodes(t *testing.T) {
|
func Test_GetAttachedVolumes_Positive_NoVolumesOrNodes(t *testing.T) {
|
||||||
// Arrange
|
// Arrange
|
||||||
volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t))
|
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
||||||
asw := NewActualStateOfWorld(volumePluginMgr)
|
asw := NewActualStateOfWorld(volumePluginMgr)
|
||||||
|
|
||||||
// Act
|
// Act
|
||||||
@@ -356,7 +357,7 @@ func Test_GetAttachedVolumes_Positive_NoVolumesOrNodes(t *testing.T) {
|
|||||||
// Verifies one volume/node entry is returned.
|
// Verifies one volume/node entry is returned.
|
||||||
func Test_GetAttachedVolumes_Positive_OneVolumeOneNode(t *testing.T) {
|
func Test_GetAttachedVolumes_Positive_OneVolumeOneNode(t *testing.T) {
|
||||||
// Arrange
|
// Arrange
|
||||||
volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t))
|
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
||||||
asw := NewActualStateOfWorld(volumePluginMgr)
|
asw := NewActualStateOfWorld(volumePluginMgr)
|
||||||
volumeName := api.UniqueVolumeName("volume-name")
|
volumeName := api.UniqueVolumeName("volume-name")
|
||||||
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
||||||
@@ -382,7 +383,7 @@ func Test_GetAttachedVolumes_Positive_OneVolumeOneNode(t *testing.T) {
|
|||||||
// Verifies both volume/node entries are returned.
|
// Verifies both volume/node entries are returned.
|
||||||
func Test_GetAttachedVolumes_Positive_TwoVolumeTwoNodes(t *testing.T) {
|
func Test_GetAttachedVolumes_Positive_TwoVolumeTwoNodes(t *testing.T) {
|
||||||
// Arrange
|
// Arrange
|
||||||
volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t))
|
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
||||||
asw := NewActualStateOfWorld(volumePluginMgr)
|
asw := NewActualStateOfWorld(volumePluginMgr)
|
||||||
volume1Name := api.UniqueVolumeName("volume1-name")
|
volume1Name := api.UniqueVolumeName("volume1-name")
|
||||||
volume1Spec := controllervolumetesting.GetTestVolumeSpec(string(volume1Name), volume1Name)
|
volume1Spec := controllervolumetesting.GetTestVolumeSpec(string(volume1Name), volume1Name)
|
||||||
@@ -416,7 +417,7 @@ func Test_GetAttachedVolumes_Positive_TwoVolumeTwoNodes(t *testing.T) {
|
|||||||
// Verifies both volume/node entries are returned.
|
// Verifies both volume/node entries are returned.
|
||||||
func Test_GetAttachedVolumes_Positive_OneVolumeTwoNodes(t *testing.T) {
|
func Test_GetAttachedVolumes_Positive_OneVolumeTwoNodes(t *testing.T) {
|
||||||
// Arrange
|
// Arrange
|
||||||
volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t))
|
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
||||||
asw := NewActualStateOfWorld(volumePluginMgr)
|
asw := NewActualStateOfWorld(volumePluginMgr)
|
||||||
volumeName := api.UniqueVolumeName("volume-name")
|
volumeName := api.UniqueVolumeName("volume-name")
|
||||||
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
||||||
@@ -454,7 +455,7 @@ func Test_GetAttachedVolumes_Positive_OneVolumeTwoNodes(t *testing.T) {
|
|||||||
// Verifies mountedByNode is true and DetachRequestedTime is zero.
|
// Verifies mountedByNode is true and DetachRequestedTime is zero.
|
||||||
func Test_SetVolumeMountedByNode_Positive_Set(t *testing.T) {
|
func Test_SetVolumeMountedByNode_Positive_Set(t *testing.T) {
|
||||||
// Arrange
|
// Arrange
|
||||||
volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t))
|
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
||||||
asw := NewActualStateOfWorld(volumePluginMgr)
|
asw := NewActualStateOfWorld(volumePluginMgr)
|
||||||
volumeName := api.UniqueVolumeName("volume-name")
|
volumeName := api.UniqueVolumeName("volume-name")
|
||||||
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
||||||
@@ -480,7 +481,7 @@ func Test_SetVolumeMountedByNode_Positive_Set(t *testing.T) {
|
|||||||
// Verifies mountedByNode is false.
|
// Verifies mountedByNode is false.
|
||||||
func Test_SetVolumeMountedByNode_Positive_UnsetWithInitialSet(t *testing.T) {
|
func Test_SetVolumeMountedByNode_Positive_UnsetWithInitialSet(t *testing.T) {
|
||||||
// Arrange
|
// Arrange
|
||||||
volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t))
|
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
||||||
asw := NewActualStateOfWorld(volumePluginMgr)
|
asw := NewActualStateOfWorld(volumePluginMgr)
|
||||||
volumeName := api.UniqueVolumeName("volume-name")
|
volumeName := api.UniqueVolumeName("volume-name")
|
||||||
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
||||||
@@ -515,7 +516,7 @@ func Test_SetVolumeMountedByNode_Positive_UnsetWithInitialSet(t *testing.T) {
|
|||||||
// Verifies mountedByNode is still true (since there was no SetVolumeMountedByNode to true call first)
|
// Verifies mountedByNode is still true (since there was no SetVolumeMountedByNode to true call first)
|
||||||
func Test_SetVolumeMountedByNode_Positive_UnsetWithoutInitialSet(t *testing.T) {
|
func Test_SetVolumeMountedByNode_Positive_UnsetWithoutInitialSet(t *testing.T) {
|
||||||
// Arrange
|
// Arrange
|
||||||
volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t))
|
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
||||||
asw := NewActualStateOfWorld(volumePluginMgr)
|
asw := NewActualStateOfWorld(volumePluginMgr)
|
||||||
volumeName := api.UniqueVolumeName("volume-name")
|
volumeName := api.UniqueVolumeName("volume-name")
|
||||||
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
||||||
@@ -547,7 +548,7 @@ func Test_SetVolumeMountedByNode_Positive_UnsetWithoutInitialSet(t *testing.T) {
|
|||||||
// Verifies mountedByNode is false and detachRequestedTime is zero.
|
// Verifies mountedByNode is false and detachRequestedTime is zero.
|
||||||
func Test_SetVolumeMountedByNode_Positive_UnsetWithInitialSetAddVolumeNodeNotReset(t *testing.T) {
|
func Test_SetVolumeMountedByNode_Positive_UnsetWithInitialSetAddVolumeNodeNotReset(t *testing.T) {
|
||||||
// Arrange
|
// Arrange
|
||||||
volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t))
|
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
||||||
asw := NewActualStateOfWorld(volumePluginMgr)
|
asw := NewActualStateOfWorld(volumePluginMgr)
|
||||||
volumeName := api.UniqueVolumeName("volume-name")
|
volumeName := api.UniqueVolumeName("volume-name")
|
||||||
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
||||||
@@ -587,7 +588,7 @@ func Test_SetVolumeMountedByNode_Positive_UnsetWithInitialSetAddVolumeNodeNotRes
|
|||||||
// Verifies mountedByNode is false and detachRequestedTime is NOT zero.
|
// Verifies mountedByNode is false and detachRequestedTime is NOT zero.
|
||||||
func Test_SetVolumeMountedByNode_Positive_UnsetWithInitialSetVerifyDetachRequestedTimePerserved(t *testing.T) {
|
func Test_SetVolumeMountedByNode_Positive_UnsetWithInitialSetVerifyDetachRequestedTimePerserved(t *testing.T) {
|
||||||
// Arrange
|
// Arrange
|
||||||
volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t))
|
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
||||||
asw := NewActualStateOfWorld(volumePluginMgr)
|
asw := NewActualStateOfWorld(volumePluginMgr)
|
||||||
volumeName := api.UniqueVolumeName("volume-name")
|
volumeName := api.UniqueVolumeName("volume-name")
|
||||||
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
||||||
@@ -629,7 +630,7 @@ func Test_SetVolumeMountedByNode_Positive_UnsetWithInitialSetVerifyDetachRequest
|
|||||||
// Verifies mountedByNode is true and detachRequestedTime is zero (default values).
|
// Verifies mountedByNode is true and detachRequestedTime is zero (default values).
|
||||||
func Test_MarkDesireToDetach_Positive_Set(t *testing.T) {
|
func Test_MarkDesireToDetach_Positive_Set(t *testing.T) {
|
||||||
// Arrange
|
// Arrange
|
||||||
volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t))
|
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
||||||
asw := NewActualStateOfWorld(volumePluginMgr)
|
asw := NewActualStateOfWorld(volumePluginMgr)
|
||||||
volumeName := api.UniqueVolumeName("volume-name")
|
volumeName := api.UniqueVolumeName("volume-name")
|
||||||
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
||||||
@@ -655,7 +656,7 @@ func Test_MarkDesireToDetach_Positive_Set(t *testing.T) {
|
|||||||
// Verifies mountedByNode is true and detachRequestedTime is NOT zero.
|
// Verifies mountedByNode is true and detachRequestedTime is NOT zero.
|
||||||
func Test_MarkDesireToDetach_Positive_Marked(t *testing.T) {
|
func Test_MarkDesireToDetach_Positive_Marked(t *testing.T) {
|
||||||
// Arrange
|
// Arrange
|
||||||
volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t))
|
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
||||||
asw := NewActualStateOfWorld(volumePluginMgr)
|
asw := NewActualStateOfWorld(volumePluginMgr)
|
||||||
volumeName := api.UniqueVolumeName("volume-name")
|
volumeName := api.UniqueVolumeName("volume-name")
|
||||||
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
||||||
@@ -688,7 +689,7 @@ func Test_MarkDesireToDetach_Positive_Marked(t *testing.T) {
|
|||||||
// Verifies mountedByNode is true and detachRequestedTime is reset to zero.
|
// Verifies mountedByNode is true and detachRequestedTime is reset to zero.
|
||||||
func Test_MarkDesireToDetach_Positive_MarkedAddVolumeNodeReset(t *testing.T) {
|
func Test_MarkDesireToDetach_Positive_MarkedAddVolumeNodeReset(t *testing.T) {
|
||||||
// Arrange
|
// Arrange
|
||||||
volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t))
|
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
||||||
asw := NewActualStateOfWorld(volumePluginMgr)
|
asw := NewActualStateOfWorld(volumePluginMgr)
|
||||||
volumeName := api.UniqueVolumeName("volume-name")
|
volumeName := api.UniqueVolumeName("volume-name")
|
||||||
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
||||||
@@ -725,7 +726,7 @@ func Test_MarkDesireToDetach_Positive_MarkedAddVolumeNodeReset(t *testing.T) {
|
|||||||
// Verifies mountedByNode is false and detachRequestedTime is NOT zero.
|
// Verifies mountedByNode is false and detachRequestedTime is NOT zero.
|
||||||
func Test_MarkDesireToDetach_Positive_UnsetWithInitialSetVolumeMountedByNodePreserved(t *testing.T) {
|
func Test_MarkDesireToDetach_Positive_UnsetWithInitialSetVolumeMountedByNodePreserved(t *testing.T) {
|
||||||
// Arrange
|
// Arrange
|
||||||
volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t))
|
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
||||||
asw := NewActualStateOfWorld(volumePluginMgr)
|
asw := NewActualStateOfWorld(volumePluginMgr)
|
||||||
volumeName := api.UniqueVolumeName("volume-name")
|
volumeName := api.UniqueVolumeName("volume-name")
|
||||||
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
||||||
@@ -789,7 +790,7 @@ func verifyAttachedVolume(
|
|||||||
|
|
||||||
func Test_GetAttachedVolumesForNode_Positive_NoVolumesOrNodes(t *testing.T) {
|
func Test_GetAttachedVolumesForNode_Positive_NoVolumesOrNodes(t *testing.T) {
|
||||||
// Arrange
|
// Arrange
|
||||||
volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t))
|
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
||||||
asw := NewActualStateOfWorld(volumePluginMgr)
|
asw := NewActualStateOfWorld(volumePluginMgr)
|
||||||
node := "random"
|
node := "random"
|
||||||
|
|
||||||
@@ -804,9 +805,9 @@ func Test_GetAttachedVolumesForNode_Positive_NoVolumesOrNodes(t *testing.T) {
|
|||||||
|
|
||||||
func Test_GetAttachedVolumesForNode_Positive_OneVolumeOneNode(t *testing.T) {
|
func Test_GetAttachedVolumesForNode_Positive_OneVolumeOneNode(t *testing.T) {
|
||||||
// Arrange
|
// Arrange
|
||||||
volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t))
|
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
||||||
asw := NewActualStateOfWorld(volumePluginMgr)
|
asw := NewActualStateOfWorld(volumePluginMgr)
|
||||||
volumeName := api.UniqueDeviceName("volume-name")
|
volumeName := api.UniqueVolumeName("volume-name")
|
||||||
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
||||||
nodeName := "node-name"
|
nodeName := "node-name"
|
||||||
generatedVolumeName, addErr := asw.AddVolumeNode(volumeSpec, nodeName)
|
generatedVolumeName, addErr := asw.AddVolumeNode(volumeSpec, nodeName)
|
||||||
@@ -827,16 +828,16 @@ func Test_GetAttachedVolumesForNode_Positive_OneVolumeOneNode(t *testing.T) {
|
|||||||
|
|
||||||
func Test_GetAttachedVolumesForNode_Positive_TwoVolumeTwoNodes(t *testing.T) {
|
func Test_GetAttachedVolumesForNode_Positive_TwoVolumeTwoNodes(t *testing.T) {
|
||||||
// Arrange
|
// Arrange
|
||||||
volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t))
|
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
||||||
asw := NewActualStateOfWorld(volumePluginMgr)
|
asw := NewActualStateOfWorld(volumePluginMgr)
|
||||||
volume1Name := api.UniqueDeviceName("volume1-name")
|
volume1Name := api.UniqueVolumeName("volume1-name")
|
||||||
volume1Spec := controllervolumetesting.GetTestVolumeSpec(string(volume1Name), volume1Name)
|
volume1Spec := controllervolumetesting.GetTestVolumeSpec(string(volume1Name), volume1Name)
|
||||||
node1Name := "node1-name"
|
node1Name := "node1-name"
|
||||||
_, add1Err := asw.AddVolumeNode(volume1Spec, node1Name)
|
_, add1Err := asw.AddVolumeNode(volume1Spec, node1Name)
|
||||||
if add1Err != nil {
|
if add1Err != nil {
|
||||||
t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", add1Err)
|
t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", add1Err)
|
||||||
}
|
}
|
||||||
volume2Name := api.UniqueDeviceName("volume2-name")
|
volume2Name := api.UniqueVolumeName("volume2-name")
|
||||||
volume2Spec := controllervolumetesting.GetTestVolumeSpec(string(volume2Name), volume2Name)
|
volume2Spec := controllervolumetesting.GetTestVolumeSpec(string(volume2Name), volume2Name)
|
||||||
node2Name := "node2-name"
|
node2Name := "node2-name"
|
||||||
generatedVolumeName2, add2Err := asw.AddVolumeNode(volume2Spec, node2Name)
|
generatedVolumeName2, add2Err := asw.AddVolumeNode(volume2Spec, node2Name)
|
||||||
@@ -857,9 +858,9 @@ func Test_GetAttachedVolumesForNode_Positive_TwoVolumeTwoNodes(t *testing.T) {
|
|||||||
|
|
||||||
func Test_GetAttachedVolumesForNode_Positive_OneVolumeTwoNodes(t *testing.T) {
|
func Test_GetAttachedVolumesForNode_Positive_OneVolumeTwoNodes(t *testing.T) {
|
||||||
// Arrange
|
// Arrange
|
||||||
volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t))
|
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
||||||
asw := NewActualStateOfWorld(volumePluginMgr)
|
asw := NewActualStateOfWorld(volumePluginMgr)
|
||||||
volumeName := api.UniqueDeviceName("volume-name")
|
volumeName := api.UniqueVolumeName("volume-name")
|
||||||
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
||||||
node1Name := "node1-name"
|
node1Name := "node1-name"
|
||||||
generatedVolumeName1, add1Err := asw.AddVolumeNode(volumeSpec, node1Name)
|
generatedVolumeName1, add1Err := asw.AddVolumeNode(volumeSpec, node1Name)
|
||||||
|
|||||||
@@ -26,8 +26,9 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"k8s.io/kubernetes/pkg/api"
|
"k8s.io/kubernetes/pkg/api"
|
||||||
"k8s.io/kubernetes/pkg/types"
|
|
||||||
"k8s.io/kubernetes/pkg/volume"
|
"k8s.io/kubernetes/pkg/volume"
|
||||||
|
"k8s.io/kubernetes/pkg/volume/util/operationexecutor"
|
||||||
|
"k8s.io/kubernetes/pkg/volume/util/types"
|
||||||
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
|
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -37,6 +38,9 @@ import (
|
|||||||
// managed by the attach/detach controller, volumes are all the volumes that
|
// managed by the attach/detach controller, volumes are all the volumes that
|
||||||
// should be attached to the specified node, and pods are the pods that
|
// should be attached to the specified node, and pods are the pods that
|
||||||
// reference the volume and are scheduled to that node.
|
// reference the volume and are scheduled to that node.
|
||||||
|
// Note: This is distinct from the DesiredStateOfWorld implemented by the
|
||||||
|
// kubelet volume manager. The both keep track of different objects. This
|
||||||
|
// contains attach/detach controller specific state.
|
||||||
type DesiredStateOfWorld interface {
|
type DesiredStateOfWorld interface {
|
||||||
// AddNode adds the given node to the list of nodes managed by the attach/
|
// AddNode adds the given node to the list of nodes managed by the attach/
|
||||||
// detach controller.
|
// detach controller.
|
||||||
@@ -90,17 +94,7 @@ type DesiredStateOfWorld interface {
|
|||||||
|
|
||||||
// VolumeToAttach represents a volume that should be attached to a node.
|
// VolumeToAttach represents a volume that should be attached to a node.
|
||||||
type VolumeToAttach struct {
|
type VolumeToAttach struct {
|
||||||
// VolumeName is the unique identifier for the volume that should be
|
operationexecutor.VolumeToAttach
|
||||||
// attached.
|
|
||||||
VolumeName api.UniqueVolumeName
|
|
||||||
|
|
||||||
// VolumeSpec is a volume spec containing the specification for the volume
|
|
||||||
// that should be attached.
|
|
||||||
VolumeSpec *volume.Spec
|
|
||||||
|
|
||||||
// NodeName is the identifier for the node that the volume should be
|
|
||||||
// attached to.
|
|
||||||
NodeName string
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewDesiredStateOfWorld returns a new instance of DesiredStateOfWorld.
|
// NewDesiredStateOfWorld returns a new instance of DesiredStateOfWorld.
|
||||||
@@ -196,7 +190,7 @@ func (dsw *desiredStateOfWorld) AddPod(
|
|||||||
attachableVolumePlugin, volumeSpec)
|
attachableVolumePlugin, volumeSpec)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", fmt.Errorf(
|
return "", fmt.Errorf(
|
||||||
"failed to GenerateUniqueVolumeName for volumeSpec %q err=%v",
|
"failed to GetUniqueVolumeNameFromSpec for volumeSpec %q err=%v",
|
||||||
volumeSpec.Name(),
|
volumeSpec.Name(),
|
||||||
err)
|
err)
|
||||||
}
|
}
|
||||||
@@ -304,7 +298,12 @@ func (dsw *desiredStateOfWorld) GetVolumesToAttach() []VolumeToAttach {
|
|||||||
volumesToAttach := make([]VolumeToAttach, 0 /* len */, len(dsw.nodesManaged) /* cap */)
|
volumesToAttach := make([]VolumeToAttach, 0 /* len */, len(dsw.nodesManaged) /* cap */)
|
||||||
for nodeName, nodeObj := range dsw.nodesManaged {
|
for nodeName, nodeObj := range dsw.nodesManaged {
|
||||||
for volumeName, volumeObj := range nodeObj.volumesToAttach {
|
for volumeName, volumeObj := range nodeObj.volumesToAttach {
|
||||||
volumesToAttach = append(volumesToAttach, VolumeToAttach{NodeName: nodeName, VolumeName: volumeName, VolumeSpec: volumeObj.spec})
|
volumesToAttach = append(volumesToAttach,
|
||||||
|
VolumeToAttach{
|
||||||
|
VolumeToAttach: operationexecutor.VolumeToAttach{
|
||||||
|
VolumeName: volumeName,
|
||||||
|
VolumeSpec: volumeObj.spec,
|
||||||
|
NodeName: nodeName}})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -21,14 +21,15 @@ import (
|
|||||||
|
|
||||||
"k8s.io/kubernetes/pkg/api"
|
"k8s.io/kubernetes/pkg/api"
|
||||||
controllervolumetesting "k8s.io/kubernetes/pkg/controller/volume/testing"
|
controllervolumetesting "k8s.io/kubernetes/pkg/controller/volume/testing"
|
||||||
"k8s.io/kubernetes/pkg/types"
|
volumetesting "k8s.io/kubernetes/pkg/volume/testing"
|
||||||
|
"k8s.io/kubernetes/pkg/volume/util/types"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Calls AddNode() once.
|
// Calls AddNode() once.
|
||||||
// Verifies node exists, and zero volumes to attach.
|
// Verifies node exists, and zero volumes to attach.
|
||||||
func Test_AddNode_Positive_NewNode(t *testing.T) {
|
func Test_AddNode_Positive_NewNode(t *testing.T) {
|
||||||
// Arrange
|
// Arrange
|
||||||
volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t))
|
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
||||||
dsw := NewDesiredStateOfWorld(volumePluginMgr)
|
dsw := NewDesiredStateOfWorld(volumePluginMgr)
|
||||||
nodeName := "node-name"
|
nodeName := "node-name"
|
||||||
|
|
||||||
@@ -53,7 +54,7 @@ func Test_AddNode_Positive_NewNode(t *testing.T) {
|
|||||||
// Verifies node exists, and zero volumes to attach.
|
// Verifies node exists, and zero volumes to attach.
|
||||||
func Test_AddNode_Positive_ExistingNode(t *testing.T) {
|
func Test_AddNode_Positive_ExistingNode(t *testing.T) {
|
||||||
// Arrange
|
// Arrange
|
||||||
volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t))
|
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
||||||
dsw := NewDesiredStateOfWorld(volumePluginMgr)
|
dsw := NewDesiredStateOfWorld(volumePluginMgr)
|
||||||
nodeName := "node-name"
|
nodeName := "node-name"
|
||||||
|
|
||||||
@@ -86,9 +87,9 @@ func Test_AddNode_Positive_ExistingNode(t *testing.T) {
|
|||||||
// Verifies node/volume exists, and 1 volumes to attach.
|
// Verifies node/volume exists, and 1 volumes to attach.
|
||||||
func Test_AddPod_Positive_NewPodNodeExistsVolumeDoesntExist(t *testing.T) {
|
func Test_AddPod_Positive_NewPodNodeExistsVolumeDoesntExist(t *testing.T) {
|
||||||
// Arrange
|
// Arrange
|
||||||
volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t))
|
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
||||||
dsw := NewDesiredStateOfWorld(volumePluginMgr)
|
dsw := NewDesiredStateOfWorld(volumePluginMgr)
|
||||||
podName := types.UniquePodName("pod-name")
|
podName := types.UniquePodName("pod-uid")
|
||||||
volumeName := api.UniqueVolumeName("volume-name")
|
volumeName := api.UniqueVolumeName("volume-name")
|
||||||
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
||||||
nodeName := "node-name"
|
nodeName := "node-name"
|
||||||
@@ -133,10 +134,10 @@ func Test_AddPod_Positive_NewPodNodeExistsVolumeDoesntExist(t *testing.T) {
|
|||||||
// Verifies the same node/volume exists, and 1 volumes to attach.
|
// Verifies the same node/volume exists, and 1 volumes to attach.
|
||||||
func Test_AddPod_Positive_NewPodNodeExistsVolumeExists(t *testing.T) {
|
func Test_AddPod_Positive_NewPodNodeExistsVolumeExists(t *testing.T) {
|
||||||
// Arrange
|
// Arrange
|
||||||
volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t))
|
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
||||||
dsw := NewDesiredStateOfWorld(volumePluginMgr)
|
dsw := NewDesiredStateOfWorld(volumePluginMgr)
|
||||||
pod1Name := types.UniquePodName("pod1-name")
|
pod1Name := types.UniquePodName("pod1-uid")
|
||||||
pod2Name := types.UniquePodName("pod2-name")
|
pod2Name := types.UniquePodName("pod2-uid")
|
||||||
volumeName := api.UniqueVolumeName("volume-name")
|
volumeName := api.UniqueVolumeName("volume-name")
|
||||||
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
||||||
nodeName := "node-name"
|
nodeName := "node-name"
|
||||||
@@ -203,9 +204,9 @@ func Test_AddPod_Positive_NewPodNodeExistsVolumeExists(t *testing.T) {
|
|||||||
// Verifies the same node/volume exists, and 1 volumes to attach.
|
// Verifies the same node/volume exists, and 1 volumes to attach.
|
||||||
func Test_AddPod_Positive_PodExistsNodeExistsVolumeExists(t *testing.T) {
|
func Test_AddPod_Positive_PodExistsNodeExistsVolumeExists(t *testing.T) {
|
||||||
// Arrange
|
// Arrange
|
||||||
volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t))
|
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
||||||
dsw := NewDesiredStateOfWorld(volumePluginMgr)
|
dsw := NewDesiredStateOfWorld(volumePluginMgr)
|
||||||
podName := types.UniquePodName("pod-name")
|
podName := types.UniquePodName("pod-uid")
|
||||||
volumeName := api.UniqueVolumeName("volume-name")
|
volumeName := api.UniqueVolumeName("volume-name")
|
||||||
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
||||||
nodeName := "node-name"
|
nodeName := "node-name"
|
||||||
@@ -269,9 +270,9 @@ func Test_AddPod_Positive_PodExistsNodeExistsVolumeExists(t *testing.T) {
|
|||||||
// Verifies call fails because node does not exist.
|
// Verifies call fails because node does not exist.
|
||||||
func Test_AddPod_Negative_NewPodNodeDoesntExistVolumeDoesntExist(t *testing.T) {
|
func Test_AddPod_Negative_NewPodNodeDoesntExistVolumeDoesntExist(t *testing.T) {
|
||||||
// Arrange
|
// Arrange
|
||||||
volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t))
|
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
||||||
dsw := NewDesiredStateOfWorld(volumePluginMgr)
|
dsw := NewDesiredStateOfWorld(volumePluginMgr)
|
||||||
podName := types.UniquePodName("pod-name")
|
podName := types.UniquePodName("pod-uid")
|
||||||
volumeName := api.UniqueVolumeName("volume-name")
|
volumeName := api.UniqueVolumeName("volume-name")
|
||||||
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
||||||
nodeName := "node-name"
|
nodeName := "node-name"
|
||||||
@@ -310,7 +311,7 @@ func Test_AddPod_Negative_NewPodNodeDoesntExistVolumeDoesntExist(t *testing.T) {
|
|||||||
// Verifies node no longer exists, and zero volumes to attach.
|
// Verifies node no longer exists, and zero volumes to attach.
|
||||||
func Test_DeleteNode_Positive_NodeExists(t *testing.T) {
|
func Test_DeleteNode_Positive_NodeExists(t *testing.T) {
|
||||||
// Arrange
|
// Arrange
|
||||||
volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t))
|
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
||||||
dsw := NewDesiredStateOfWorld(volumePluginMgr)
|
dsw := NewDesiredStateOfWorld(volumePluginMgr)
|
||||||
nodeName := "node-name"
|
nodeName := "node-name"
|
||||||
dsw.AddNode(nodeName)
|
dsw.AddNode(nodeName)
|
||||||
@@ -338,7 +339,7 @@ func Test_DeleteNode_Positive_NodeExists(t *testing.T) {
|
|||||||
// Verifies no error is returned, and zero volumes to attach.
|
// Verifies no error is returned, and zero volumes to attach.
|
||||||
func Test_DeleteNode_Positive_NodeDoesntExist(t *testing.T) {
|
func Test_DeleteNode_Positive_NodeDoesntExist(t *testing.T) {
|
||||||
// Arrange
|
// Arrange
|
||||||
volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t))
|
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
||||||
dsw := NewDesiredStateOfWorld(volumePluginMgr)
|
dsw := NewDesiredStateOfWorld(volumePluginMgr)
|
||||||
notAddedNodeName := "node-not-added-name"
|
notAddedNodeName := "node-not-added-name"
|
||||||
|
|
||||||
@@ -366,11 +367,11 @@ func Test_DeleteNode_Positive_NodeDoesntExist(t *testing.T) {
|
|||||||
// Verifies call fails because node still contains child volumes.
|
// Verifies call fails because node still contains child volumes.
|
||||||
func Test_DeleteNode_Negative_NodeExistsHasChildVolumes(t *testing.T) {
|
func Test_DeleteNode_Negative_NodeExistsHasChildVolumes(t *testing.T) {
|
||||||
// Arrange
|
// Arrange
|
||||||
volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t))
|
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
||||||
dsw := NewDesiredStateOfWorld(volumePluginMgr)
|
dsw := NewDesiredStateOfWorld(volumePluginMgr)
|
||||||
nodeName := "node-name"
|
nodeName := "node-name"
|
||||||
dsw.AddNode(nodeName)
|
dsw.AddNode(nodeName)
|
||||||
podName := types.UniquePodName("pod-name")
|
podName := types.UniquePodName("pod-uid")
|
||||||
volumeName := api.UniqueVolumeName("volume-name")
|
volumeName := api.UniqueVolumeName("volume-name")
|
||||||
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
||||||
generatedVolumeName, podAddErr := dsw.AddPod(podName, volumeSpec, nodeName)
|
generatedVolumeName, podAddErr := dsw.AddPod(podName, volumeSpec, nodeName)
|
||||||
@@ -407,9 +408,9 @@ func Test_DeleteNode_Negative_NodeExistsHasChildVolumes(t *testing.T) {
|
|||||||
// Verifies volume no longer exists, and zero volumes to attach.
|
// Verifies volume no longer exists, and zero volumes to attach.
|
||||||
func Test_DeletePod_Positive_PodExistsNodeExistsVolumeExists(t *testing.T) {
|
func Test_DeletePod_Positive_PodExistsNodeExistsVolumeExists(t *testing.T) {
|
||||||
// Arrange
|
// Arrange
|
||||||
volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t))
|
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
||||||
dsw := NewDesiredStateOfWorld(volumePluginMgr)
|
dsw := NewDesiredStateOfWorld(volumePluginMgr)
|
||||||
podName := types.UniquePodName("pod-name")
|
podName := types.UniquePodName("pod-uid")
|
||||||
volumeName := api.UniqueVolumeName("volume-name")
|
volumeName := api.UniqueVolumeName("volume-name")
|
||||||
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
||||||
nodeName := "node-name"
|
nodeName := "node-name"
|
||||||
@@ -454,10 +455,10 @@ func Test_DeletePod_Positive_PodExistsNodeExistsVolumeExists(t *testing.T) {
|
|||||||
// Verifies volume still exists, and one volumes to attach.
|
// Verifies volume still exists, and one volumes to attach.
|
||||||
func Test_DeletePod_Positive_2PodsExistNodeExistsVolumesExist(t *testing.T) {
|
func Test_DeletePod_Positive_2PodsExistNodeExistsVolumesExist(t *testing.T) {
|
||||||
// Arrange
|
// Arrange
|
||||||
volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t))
|
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
||||||
dsw := NewDesiredStateOfWorld(volumePluginMgr)
|
dsw := NewDesiredStateOfWorld(volumePluginMgr)
|
||||||
pod1Name := types.UniquePodName("pod1-name")
|
pod1Name := types.UniquePodName("pod1-uid")
|
||||||
pod2Name := types.UniquePodName("pod2-name")
|
pod2Name := types.UniquePodName("pod2-uid")
|
||||||
volumeName := api.UniqueVolumeName("volume-name")
|
volumeName := api.UniqueVolumeName("volume-name")
|
||||||
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
||||||
nodeName := "node-name"
|
nodeName := "node-name"
|
||||||
@@ -515,10 +516,10 @@ func Test_DeletePod_Positive_2PodsExistNodeExistsVolumesExist(t *testing.T) {
|
|||||||
// Verifies volume still exists, and one volumes to attach.
|
// Verifies volume still exists, and one volumes to attach.
|
||||||
func Test_DeletePod_Positive_PodDoesNotExist(t *testing.T) {
|
func Test_DeletePod_Positive_PodDoesNotExist(t *testing.T) {
|
||||||
// Arrange
|
// Arrange
|
||||||
volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t))
|
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
||||||
dsw := NewDesiredStateOfWorld(volumePluginMgr)
|
dsw := NewDesiredStateOfWorld(volumePluginMgr)
|
||||||
pod1Name := types.UniquePodName("pod1-name")
|
pod1Name := types.UniquePodName("pod1-uid")
|
||||||
pod2Name := types.UniquePodName("pod2-name")
|
pod2Name := types.UniquePodName("pod2-uid")
|
||||||
volumeName := api.UniqueVolumeName("volume-name")
|
volumeName := api.UniqueVolumeName("volume-name")
|
||||||
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
||||||
nodeName := "node-name"
|
nodeName := "node-name"
|
||||||
@@ -564,9 +565,9 @@ func Test_DeletePod_Positive_PodDoesNotExist(t *testing.T) {
|
|||||||
// Verifies volume still exists, and one volumes to attach.
|
// Verifies volume still exists, and one volumes to attach.
|
||||||
func Test_DeletePod_Positive_NodeDoesNotExist(t *testing.T) {
|
func Test_DeletePod_Positive_NodeDoesNotExist(t *testing.T) {
|
||||||
// Arrange
|
// Arrange
|
||||||
volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t))
|
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
||||||
dsw := NewDesiredStateOfWorld(volumePluginMgr)
|
dsw := NewDesiredStateOfWorld(volumePluginMgr)
|
||||||
podName := types.UniquePodName("pod-name")
|
podName := types.UniquePodName("pod-uid")
|
||||||
volumeName := api.UniqueVolumeName("volume-name")
|
volumeName := api.UniqueVolumeName("volume-name")
|
||||||
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
||||||
node1Name := "node1-name"
|
node1Name := "node1-name"
|
||||||
@@ -619,9 +620,9 @@ func Test_DeletePod_Positive_NodeDoesNotExist(t *testing.T) {
|
|||||||
// Verifies volume still exists, and one volumes to attach.
|
// Verifies volume still exists, and one volumes to attach.
|
||||||
func Test_DeletePod_Positive_VolumeDoesNotExist(t *testing.T) {
|
func Test_DeletePod_Positive_VolumeDoesNotExist(t *testing.T) {
|
||||||
// Arrange
|
// Arrange
|
||||||
volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t))
|
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
||||||
dsw := NewDesiredStateOfWorld(volumePluginMgr)
|
dsw := NewDesiredStateOfWorld(volumePluginMgr)
|
||||||
podName := types.UniquePodName("pod-name")
|
podName := types.UniquePodName("pod-uid")
|
||||||
volume1Name := api.UniqueVolumeName("volume1-name")
|
volume1Name := api.UniqueVolumeName("volume1-name")
|
||||||
volume1Spec := controllervolumetesting.GetTestVolumeSpec(string(volume1Name), volume1Name)
|
volume1Spec := controllervolumetesting.GetTestVolumeSpec(string(volume1Name), volume1Name)
|
||||||
nodeName := "node-name"
|
nodeName := "node-name"
|
||||||
@@ -673,7 +674,7 @@ func Test_DeletePod_Positive_VolumeDoesNotExist(t *testing.T) {
|
|||||||
// Verifies node does not exist, and no volumes to attach.
|
// Verifies node does not exist, and no volumes to attach.
|
||||||
func Test_NodeExists_Positive_NodeExists(t *testing.T) {
|
func Test_NodeExists_Positive_NodeExists(t *testing.T) {
|
||||||
// Arrange
|
// Arrange
|
||||||
volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t))
|
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
||||||
dsw := NewDesiredStateOfWorld(volumePluginMgr)
|
dsw := NewDesiredStateOfWorld(volumePluginMgr)
|
||||||
notAddedNodeName := "node-not-added-name"
|
notAddedNodeName := "node-not-added-name"
|
||||||
|
|
||||||
@@ -696,7 +697,7 @@ func Test_NodeExists_Positive_NodeExists(t *testing.T) {
|
|||||||
// Verifies node exists, and no volumes to attach.
|
// Verifies node exists, and no volumes to attach.
|
||||||
func Test_NodeExists_Positive_NodeDoesntExist(t *testing.T) {
|
func Test_NodeExists_Positive_NodeDoesntExist(t *testing.T) {
|
||||||
// Arrange
|
// Arrange
|
||||||
volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t))
|
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
||||||
dsw := NewDesiredStateOfWorld(volumePluginMgr)
|
dsw := NewDesiredStateOfWorld(volumePluginMgr)
|
||||||
nodeName := "node-name"
|
nodeName := "node-name"
|
||||||
dsw.AddNode(nodeName)
|
dsw.AddNode(nodeName)
|
||||||
@@ -720,11 +721,11 @@ func Test_NodeExists_Positive_NodeDoesntExist(t *testing.T) {
|
|||||||
// Verifies volume/node exists, and one volume to attach.
|
// Verifies volume/node exists, and one volume to attach.
|
||||||
func Test_VolumeExists_Positive_VolumeExistsNodeExists(t *testing.T) {
|
func Test_VolumeExists_Positive_VolumeExistsNodeExists(t *testing.T) {
|
||||||
// Arrange
|
// Arrange
|
||||||
volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t))
|
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
||||||
dsw := NewDesiredStateOfWorld(volumePluginMgr)
|
dsw := NewDesiredStateOfWorld(volumePluginMgr)
|
||||||
nodeName := "node-name"
|
nodeName := "node-name"
|
||||||
dsw.AddNode(nodeName)
|
dsw.AddNode(nodeName)
|
||||||
podName := types.UniquePodName("pod-name")
|
podName := types.UniquePodName("pod-uid")
|
||||||
volumeName := api.UniqueVolumeName("volume-name")
|
volumeName := api.UniqueVolumeName("volume-name")
|
||||||
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
||||||
generatedVolumeName, _ := dsw.AddPod(podName, volumeSpec, nodeName)
|
generatedVolumeName, _ := dsw.AddPod(podName, volumeSpec, nodeName)
|
||||||
@@ -750,11 +751,11 @@ func Test_VolumeExists_Positive_VolumeExistsNodeExists(t *testing.T) {
|
|||||||
// Verifies volume2/node does not exist, and one volume to attach.
|
// Verifies volume2/node does not exist, and one volume to attach.
|
||||||
func Test_VolumeExists_Positive_VolumeDoesntExistNodeExists(t *testing.T) {
|
func Test_VolumeExists_Positive_VolumeDoesntExistNodeExists(t *testing.T) {
|
||||||
// Arrange
|
// Arrange
|
||||||
volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t))
|
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
||||||
dsw := NewDesiredStateOfWorld(volumePluginMgr)
|
dsw := NewDesiredStateOfWorld(volumePluginMgr)
|
||||||
nodeName := "node-name"
|
nodeName := "node-name"
|
||||||
dsw.AddNode(nodeName)
|
dsw.AddNode(nodeName)
|
||||||
podName := types.UniquePodName("pod-name")
|
podName := types.UniquePodName("pod-uid")
|
||||||
volume1Name := api.UniqueVolumeName("volume1-name")
|
volume1Name := api.UniqueVolumeName("volume1-name")
|
||||||
volume1Spec := controllervolumetesting.GetTestVolumeSpec(string(volume1Name), volume1Name)
|
volume1Spec := controllervolumetesting.GetTestVolumeSpec(string(volume1Name), volume1Name)
|
||||||
generatedVolume1Name, podAddErr := dsw.AddPod(podName, volume1Spec, nodeName)
|
generatedVolume1Name, podAddErr := dsw.AddPod(podName, volume1Spec, nodeName)
|
||||||
@@ -786,7 +787,7 @@ func Test_VolumeExists_Positive_VolumeDoesntExistNodeExists(t *testing.T) {
|
|||||||
// Verifies volume/node do not exist, and zero volumes to attach.
|
// Verifies volume/node do not exist, and zero volumes to attach.
|
||||||
func Test_VolumeExists_Positive_VolumeDoesntExistNodeDoesntExists(t *testing.T) {
|
func Test_VolumeExists_Positive_VolumeDoesntExistNodeDoesntExists(t *testing.T) {
|
||||||
// Arrange
|
// Arrange
|
||||||
volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t))
|
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
||||||
dsw := NewDesiredStateOfWorld(volumePluginMgr)
|
dsw := NewDesiredStateOfWorld(volumePluginMgr)
|
||||||
nodeName := "node-name"
|
nodeName := "node-name"
|
||||||
volumeName := api.UniqueVolumeName("volume-name")
|
volumeName := api.UniqueVolumeName("volume-name")
|
||||||
@@ -809,7 +810,7 @@ func Test_VolumeExists_Positive_VolumeDoesntExistNodeDoesntExists(t *testing.T)
|
|||||||
// Verifies zero volumes to attach.
|
// Verifies zero volumes to attach.
|
||||||
func Test_GetVolumesToAttach_Positive_NoNodes(t *testing.T) {
|
func Test_GetVolumesToAttach_Positive_NoNodes(t *testing.T) {
|
||||||
// Arrange
|
// Arrange
|
||||||
volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t))
|
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
||||||
dsw := NewDesiredStateOfWorld(volumePluginMgr)
|
dsw := NewDesiredStateOfWorld(volumePluginMgr)
|
||||||
|
|
||||||
// Act
|
// Act
|
||||||
@@ -826,7 +827,7 @@ func Test_GetVolumesToAttach_Positive_NoNodes(t *testing.T) {
|
|||||||
// Verifies zero volumes to attach.
|
// Verifies zero volumes to attach.
|
||||||
func Test_GetVolumesToAttach_Positive_TwoNodes(t *testing.T) {
|
func Test_GetVolumesToAttach_Positive_TwoNodes(t *testing.T) {
|
||||||
// Arrange
|
// Arrange
|
||||||
volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t))
|
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
||||||
dsw := NewDesiredStateOfWorld(volumePluginMgr)
|
dsw := NewDesiredStateOfWorld(volumePluginMgr)
|
||||||
node1Name := "node1-name"
|
node1Name := "node1-name"
|
||||||
node2Name := "node2-name"
|
node2Name := "node2-name"
|
||||||
@@ -847,10 +848,10 @@ func Test_GetVolumesToAttach_Positive_TwoNodes(t *testing.T) {
|
|||||||
// Verifies two volumes to attach.
|
// Verifies two volumes to attach.
|
||||||
func Test_GetVolumesToAttach_Positive_TwoNodesOneVolumeEach(t *testing.T) {
|
func Test_GetVolumesToAttach_Positive_TwoNodesOneVolumeEach(t *testing.T) {
|
||||||
// Arrange
|
// Arrange
|
||||||
volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t))
|
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
||||||
dsw := NewDesiredStateOfWorld(volumePluginMgr)
|
dsw := NewDesiredStateOfWorld(volumePluginMgr)
|
||||||
node1Name := "node1-name"
|
node1Name := "node1-name"
|
||||||
pod1Name := types.UniquePodName("pod1-name")
|
pod1Name := types.UniquePodName("pod1-uid")
|
||||||
volume1Name := api.UniqueVolumeName("volume1-name")
|
volume1Name := api.UniqueVolumeName("volume1-name")
|
||||||
volume1Spec := controllervolumetesting.GetTestVolumeSpec(string(volume1Name), volume1Name)
|
volume1Spec := controllervolumetesting.GetTestVolumeSpec(string(volume1Name), volume1Name)
|
||||||
dsw.AddNode(node1Name)
|
dsw.AddNode(node1Name)
|
||||||
@@ -862,7 +863,7 @@ func Test_GetVolumesToAttach_Positive_TwoNodesOneVolumeEach(t *testing.T) {
|
|||||||
podAddErr)
|
podAddErr)
|
||||||
}
|
}
|
||||||
node2Name := "node2-name"
|
node2Name := "node2-name"
|
||||||
pod2Name := types.UniquePodName("pod2-name")
|
pod2Name := types.UniquePodName("pod2-uid")
|
||||||
volume2Name := api.UniqueVolumeName("volume2-name")
|
volume2Name := api.UniqueVolumeName("volume2-name")
|
||||||
volume2Spec := controllervolumetesting.GetTestVolumeSpec(string(volume2Name), volume2Name)
|
volume2Spec := controllervolumetesting.GetTestVolumeSpec(string(volume2Name), volume2Name)
|
||||||
dsw.AddNode(node2Name)
|
dsw.AddNode(node2Name)
|
||||||
@@ -892,10 +893,10 @@ func Test_GetVolumesToAttach_Positive_TwoNodesOneVolumeEach(t *testing.T) {
|
|||||||
// Verifies two volumes to attach.
|
// Verifies two volumes to attach.
|
||||||
func Test_GetVolumesToAttach_Positive_TwoNodesOneVolumeEachExtraPod(t *testing.T) {
|
func Test_GetVolumesToAttach_Positive_TwoNodesOneVolumeEachExtraPod(t *testing.T) {
|
||||||
// Arrange
|
// Arrange
|
||||||
volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t))
|
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
||||||
dsw := NewDesiredStateOfWorld(volumePluginMgr)
|
dsw := NewDesiredStateOfWorld(volumePluginMgr)
|
||||||
node1Name := "node1-name"
|
node1Name := "node1-name"
|
||||||
pod1Name := types.UniquePodName("pod1-name")
|
pod1Name := types.UniquePodName("pod1-uid")
|
||||||
volume1Name := api.UniqueVolumeName("volume1-name")
|
volume1Name := api.UniqueVolumeName("volume1-name")
|
||||||
volume1Spec := controllervolumetesting.GetTestVolumeSpec(string(volume1Name), volume1Name)
|
volume1Spec := controllervolumetesting.GetTestVolumeSpec(string(volume1Name), volume1Name)
|
||||||
dsw.AddNode(node1Name)
|
dsw.AddNode(node1Name)
|
||||||
@@ -907,7 +908,7 @@ func Test_GetVolumesToAttach_Positive_TwoNodesOneVolumeEachExtraPod(t *testing.T
|
|||||||
podAddErr)
|
podAddErr)
|
||||||
}
|
}
|
||||||
node2Name := "node2-name"
|
node2Name := "node2-name"
|
||||||
pod2Name := types.UniquePodName("pod2-name")
|
pod2Name := types.UniquePodName("pod2-uid")
|
||||||
volume2Name := api.UniqueVolumeName("volume2-name")
|
volume2Name := api.UniqueVolumeName("volume2-name")
|
||||||
volume2Spec := controllervolumetesting.GetTestVolumeSpec(string(volume2Name), volume2Name)
|
volume2Spec := controllervolumetesting.GetTestVolumeSpec(string(volume2Name), volume2Name)
|
||||||
dsw.AddNode(node2Name)
|
dsw.AddNode(node2Name)
|
||||||
@@ -918,7 +919,7 @@ func Test_GetVolumesToAttach_Positive_TwoNodesOneVolumeEachExtraPod(t *testing.T
|
|||||||
pod2Name,
|
pod2Name,
|
||||||
podAddErr)
|
podAddErr)
|
||||||
}
|
}
|
||||||
pod3Name := types.UniquePodName("pod3-name")
|
pod3Name := types.UniquePodName("pod3-uid")
|
||||||
dsw.AddPod(pod3Name, volume2Spec, node2Name)
|
dsw.AddPod(pod3Name, volume2Spec, node2Name)
|
||||||
_, podAddErr = dsw.AddPod(pod3Name, volume2Spec, node2Name)
|
_, podAddErr = dsw.AddPod(pod3Name, volume2Spec, node2Name)
|
||||||
if podAddErr != nil {
|
if podAddErr != nil {
|
||||||
@@ -946,10 +947,10 @@ func Test_GetVolumesToAttach_Positive_TwoNodesOneVolumeEachExtraPod(t *testing.T
|
|||||||
// Verifies three volumes to attach.
|
// Verifies three volumes to attach.
|
||||||
func Test_GetVolumesToAttach_Positive_TwoNodesThreeVolumes(t *testing.T) {
|
func Test_GetVolumesToAttach_Positive_TwoNodesThreeVolumes(t *testing.T) {
|
||||||
// Arrange
|
// Arrange
|
||||||
volumePluginMgr, _ := controllervolumetesting.GetTestVolumePluginMgr((t))
|
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
||||||
dsw := NewDesiredStateOfWorld(volumePluginMgr)
|
dsw := NewDesiredStateOfWorld(volumePluginMgr)
|
||||||
node1Name := "node1-name"
|
node1Name := "node1-name"
|
||||||
pod1Name := types.UniquePodName("pod1-name")
|
pod1Name := types.UniquePodName("pod1-uid")
|
||||||
volume1Name := api.UniqueVolumeName("volume1-name")
|
volume1Name := api.UniqueVolumeName("volume1-name")
|
||||||
volume1Spec := controllervolumetesting.GetTestVolumeSpec(string(volume1Name), volume1Name)
|
volume1Spec := controllervolumetesting.GetTestVolumeSpec(string(volume1Name), volume1Name)
|
||||||
dsw.AddNode(node1Name)
|
dsw.AddNode(node1Name)
|
||||||
@@ -986,7 +987,7 @@ func Test_GetVolumesToAttach_Positive_TwoNodesThreeVolumes(t *testing.T) {
|
|||||||
generatedVolume2Name1,
|
generatedVolume2Name1,
|
||||||
generatedVolume2Name2)
|
generatedVolume2Name2)
|
||||||
}
|
}
|
||||||
pod3Name := types.UniquePodName("pod3-name")
|
pod3Name := types.UniquePodName("pod3-uid")
|
||||||
volume3Name := api.UniqueVolumeName("volume3-name")
|
volume3Name := api.UniqueVolumeName("volume3-name")
|
||||||
volume3Spec := controllervolumetesting.GetTestVolumeSpec(string(volume3Name), volume3Name)
|
volume3Spec := controllervolumetesting.GetTestVolumeSpec(string(volume3Name), volume3Name)
|
||||||
generatedVolume3Name, podAddErr := dsw.AddPod(pod3Name, volume3Spec, node1Name)
|
generatedVolume3Name, podAddErr := dsw.AddPod(pod3Name, volume3Spec, node1Name)
|
||||||
|
|||||||
@@ -23,13 +23,16 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/golang/glog"
|
"github.com/golang/glog"
|
||||||
"k8s.io/kubernetes/pkg/controller/volume/attacherdetacher"
|
|
||||||
"k8s.io/kubernetes/pkg/controller/volume/cache"
|
"k8s.io/kubernetes/pkg/controller/volume/cache"
|
||||||
"k8s.io/kubernetes/pkg/util/wait"
|
"k8s.io/kubernetes/pkg/util/wait"
|
||||||
|
"k8s.io/kubernetes/pkg/volume/util/operationexecutor"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Reconciler runs a periodic loop to reconcile the desired state of the with
|
// Reconciler runs a periodic loop to reconcile the desired state of the with
|
||||||
// the actual state of the world by triggering attach detach operations.
|
// the actual state of the world by triggering attach detach operations.
|
||||||
|
// Note: This is distinct from the Reconciler implemented by the kubelet volume
|
||||||
|
// manager. This reconciles state for the attach/detach controller. That
|
||||||
|
// reconciles state for the kubelet volume manager.
|
||||||
type Reconciler interface {
|
type Reconciler interface {
|
||||||
// Starts running the reconciliation loop which executes periodically, checks
|
// Starts running the reconciliation loop which executes periodically, checks
|
||||||
// if volumes that should be attached are attached and volumes that should
|
// if volumes that should be attached are attached and volumes that should
|
||||||
@@ -52,7 +55,7 @@ func NewReconciler(
|
|||||||
maxWaitForUnmountDuration time.Duration,
|
maxWaitForUnmountDuration time.Duration,
|
||||||
desiredStateOfWorld cache.DesiredStateOfWorld,
|
desiredStateOfWorld cache.DesiredStateOfWorld,
|
||||||
actualStateOfWorld cache.ActualStateOfWorld,
|
actualStateOfWorld cache.ActualStateOfWorld,
|
||||||
attacherDetacher attacherdetacher.AttacherDetacher) Reconciler {
|
attacherDetacher operationexecutor.OperationExecutor) Reconciler {
|
||||||
return &reconciler{
|
return &reconciler{
|
||||||
loopPeriod: loopPeriod,
|
loopPeriod: loopPeriod,
|
||||||
maxWaitForUnmountDuration: maxWaitForUnmountDuration,
|
maxWaitForUnmountDuration: maxWaitForUnmountDuration,
|
||||||
@@ -67,7 +70,7 @@ type reconciler struct {
|
|||||||
maxWaitForUnmountDuration time.Duration
|
maxWaitForUnmountDuration time.Duration
|
||||||
desiredStateOfWorld cache.DesiredStateOfWorld
|
desiredStateOfWorld cache.DesiredStateOfWorld
|
||||||
actualStateOfWorld cache.ActualStateOfWorld
|
actualStateOfWorld cache.ActualStateOfWorld
|
||||||
attacherDetacher attacherdetacher.AttacherDetacher
|
attacherDetacher operationexecutor.OperationExecutor
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rc *reconciler) Run(stopCh <-chan struct{}) {
|
func (rc *reconciler) Run(stopCh <-chan struct{}) {
|
||||||
@@ -86,7 +89,7 @@ func (rc *reconciler) reconciliationLoopFunc() func() {
|
|||||||
// Volume exists in actual state of world but not desired
|
// Volume exists in actual state of world but not desired
|
||||||
if !attachedVolume.MountedByNode {
|
if !attachedVolume.MountedByNode {
|
||||||
glog.V(5).Infof("Attempting to start DetachVolume for volume %q to node %q", attachedVolume.VolumeName, attachedVolume.NodeName)
|
glog.V(5).Infof("Attempting to start DetachVolume for volume %q to node %q", attachedVolume.VolumeName, attachedVolume.NodeName)
|
||||||
err := rc.attacherDetacher.DetachVolume(attachedVolume, rc.actualStateOfWorld)
|
err := rc.attacherDetacher.DetachVolume(attachedVolume.AttachedVolume, rc.actualStateOfWorld)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
glog.Infof("Started DetachVolume for volume %q to node %q", attachedVolume.VolumeName, attachedVolume.NodeName)
|
glog.Infof("Started DetachVolume for volume %q to node %q", attachedVolume.VolumeName, attachedVolume.NodeName)
|
||||||
}
|
}
|
||||||
@@ -98,7 +101,7 @@ func (rc *reconciler) reconciliationLoopFunc() func() {
|
|||||||
}
|
}
|
||||||
if timeElapsed > rc.maxWaitForUnmountDuration {
|
if timeElapsed > rc.maxWaitForUnmountDuration {
|
||||||
glog.V(5).Infof("Attempting to start DetachVolume for volume %q to node %q. Volume is not safe to detach, but maxWaitForUnmountDuration expired.", attachedVolume.VolumeName, attachedVolume.NodeName)
|
glog.V(5).Infof("Attempting to start DetachVolume for volume %q to node %q. Volume is not safe to detach, but maxWaitForUnmountDuration expired.", attachedVolume.VolumeName, attachedVolume.NodeName)
|
||||||
err := rc.attacherDetacher.DetachVolume(attachedVolume, rc.actualStateOfWorld)
|
err := rc.attacherDetacher.DetachVolume(attachedVolume.AttachedVolume, rc.actualStateOfWorld)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
glog.Infof("Started DetachVolume for volume %q to node %q due to maxWaitForUnmountDuration expiry.", attachedVolume.VolumeName, attachedVolume.NodeName)
|
glog.Infof("Started DetachVolume for volume %q to node %q due to maxWaitForUnmountDuration expiry.", attachedVolume.VolumeName, attachedVolume.NodeName)
|
||||||
}
|
}
|
||||||
@@ -121,7 +124,7 @@ func (rc *reconciler) reconciliationLoopFunc() func() {
|
|||||||
} else {
|
} else {
|
||||||
// Volume/Node doesn't exist, spawn a goroutine to attach it
|
// Volume/Node doesn't exist, spawn a goroutine to attach it
|
||||||
glog.V(5).Infof("Attempting to start AttachVolume for volume %q to node %q", volumeToAttach.VolumeName, volumeToAttach.NodeName)
|
glog.V(5).Infof("Attempting to start AttachVolume for volume %q to node %q", volumeToAttach.VolumeName, volumeToAttach.NodeName)
|
||||||
err := rc.attacherDetacher.AttachVolume(volumeToAttach, rc.actualStateOfWorld)
|
err := rc.attacherDetacher.AttachVolume(volumeToAttach.VolumeToAttach, rc.actualStateOfWorld)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
glog.Infof("Started AttachVolume for volume %q to node %q", volumeToAttach.VolumeName, volumeToAttach.NodeName)
|
glog.Infof("Started AttachVolume for volume %q to node %q", volumeToAttach.VolumeName, volumeToAttach.NodeName)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -21,12 +21,12 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"k8s.io/kubernetes/pkg/api"
|
"k8s.io/kubernetes/pkg/api"
|
||||||
"k8s.io/kubernetes/pkg/controller/volume/attacherdetacher"
|
|
||||||
"k8s.io/kubernetes/pkg/controller/volume/cache"
|
"k8s.io/kubernetes/pkg/controller/volume/cache"
|
||||||
controllervolumetesting "k8s.io/kubernetes/pkg/controller/volume/testing"
|
controllervolumetesting "k8s.io/kubernetes/pkg/controller/volume/testing"
|
||||||
"k8s.io/kubernetes/pkg/types"
|
|
||||||
"k8s.io/kubernetes/pkg/util/wait"
|
"k8s.io/kubernetes/pkg/util/wait"
|
||||||
volumetesting "k8s.io/kubernetes/pkg/volume/testing"
|
volumetesting "k8s.io/kubernetes/pkg/volume/testing"
|
||||||
|
"k8s.io/kubernetes/pkg/volume/util/operationexecutor"
|
||||||
|
"k8s.io/kubernetes/pkg/volume/util/types"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@@ -38,10 +38,10 @@ const (
|
|||||||
// Verifies there are no calls to attach or detach.
|
// Verifies there are no calls to attach or detach.
|
||||||
func Test_Run_Positive_DoNothing(t *testing.T) {
|
func Test_Run_Positive_DoNothing(t *testing.T) {
|
||||||
// Arrange
|
// Arrange
|
||||||
volumePluginMgr, fakePlugin := controllervolumetesting.GetTestVolumePluginMgr((t))
|
volumePluginMgr, fakePlugin := volumetesting.GetTestVolumePluginMgr(t)
|
||||||
dsw := cache.NewDesiredStateOfWorld(volumePluginMgr)
|
dsw := cache.NewDesiredStateOfWorld(volumePluginMgr)
|
||||||
asw := cache.NewActualStateOfWorld(volumePluginMgr)
|
asw := cache.NewActualStateOfWorld(volumePluginMgr)
|
||||||
ad := attacherdetacher.NewAttacherDetacher(volumePluginMgr)
|
ad := operationexecutor.NewOperationExecutor(volumePluginMgr)
|
||||||
reconciler := NewReconciler(
|
reconciler := NewReconciler(
|
||||||
reconcilerLoopPeriod, maxWaitForUnmountDuration, dsw, asw, ad)
|
reconcilerLoopPeriod, maxWaitForUnmountDuration, dsw, asw, ad)
|
||||||
|
|
||||||
@@ -61,13 +61,13 @@ func Test_Run_Positive_DoNothing(t *testing.T) {
|
|||||||
// Verifies there is one attach call and no detach calls.
|
// Verifies there is one attach call and no detach calls.
|
||||||
func Test_Run_Positive_OneDesiredVolumeAttach(t *testing.T) {
|
func Test_Run_Positive_OneDesiredVolumeAttach(t *testing.T) {
|
||||||
// Arrange
|
// Arrange
|
||||||
volumePluginMgr, fakePlugin := controllervolumetesting.GetTestVolumePluginMgr((t))
|
volumePluginMgr, fakePlugin := volumetesting.GetTestVolumePluginMgr(t)
|
||||||
dsw := cache.NewDesiredStateOfWorld(volumePluginMgr)
|
dsw := cache.NewDesiredStateOfWorld(volumePluginMgr)
|
||||||
asw := cache.NewActualStateOfWorld(volumePluginMgr)
|
asw := cache.NewActualStateOfWorld(volumePluginMgr)
|
||||||
ad := attacherdetacher.NewAttacherDetacher(volumePluginMgr)
|
ad := operationexecutor.NewOperationExecutor(volumePluginMgr)
|
||||||
reconciler := NewReconciler(
|
reconciler := NewReconciler(
|
||||||
reconcilerLoopPeriod, maxWaitForUnmountDuration, dsw, asw, ad)
|
reconcilerLoopPeriod, maxWaitForUnmountDuration, dsw, asw, ad)
|
||||||
podName := types.UniquePodName("pod-name")
|
podName := types.UniquePodName("pod-uid")
|
||||||
volumeName := api.UniqueVolumeName("volume-name")
|
volumeName := api.UniqueVolumeName("volume-name")
|
||||||
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
||||||
nodeName := "node-name"
|
nodeName := "node-name"
|
||||||
@@ -102,13 +102,13 @@ func Test_Run_Positive_OneDesiredVolumeAttach(t *testing.T) {
|
|||||||
// Verifies there is one detach call and no (new) attach calls.
|
// Verifies there is one detach call and no (new) attach calls.
|
||||||
func Test_Run_Positive_OneDesiredVolumeAttachThenDetachWithUnmountedVolume(t *testing.T) {
|
func Test_Run_Positive_OneDesiredVolumeAttachThenDetachWithUnmountedVolume(t *testing.T) {
|
||||||
// Arrange
|
// Arrange
|
||||||
volumePluginMgr, fakePlugin := controllervolumetesting.GetTestVolumePluginMgr((t))
|
volumePluginMgr, fakePlugin := volumetesting.GetTestVolumePluginMgr(t)
|
||||||
dsw := cache.NewDesiredStateOfWorld(volumePluginMgr)
|
dsw := cache.NewDesiredStateOfWorld(volumePluginMgr)
|
||||||
asw := cache.NewActualStateOfWorld(volumePluginMgr)
|
asw := cache.NewActualStateOfWorld(volumePluginMgr)
|
||||||
ad := attacherdetacher.NewAttacherDetacher(volumePluginMgr)
|
ad := operationexecutor.NewOperationExecutor(volumePluginMgr)
|
||||||
reconciler := NewReconciler(
|
reconciler := NewReconciler(
|
||||||
reconcilerLoopPeriod, maxWaitForUnmountDuration, dsw, asw, ad)
|
reconcilerLoopPeriod, maxWaitForUnmountDuration, dsw, asw, ad)
|
||||||
podName := types.UniquePodName("pod-name")
|
podName := types.UniquePodName("pod-uid")
|
||||||
volumeName := api.UniqueVolumeName("volume-name")
|
volumeName := api.UniqueVolumeName("volume-name")
|
||||||
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
||||||
nodeName := "node-name"
|
nodeName := "node-name"
|
||||||
@@ -164,13 +164,13 @@ func Test_Run_Positive_OneDesiredVolumeAttachThenDetachWithUnmountedVolume(t *te
|
|||||||
// Verifies there is one detach call and no (new) attach calls.
|
// Verifies there is one detach call and no (new) attach calls.
|
||||||
func Test_Run_Positive_OneDesiredVolumeAttachThenDetachWithMountedVolume(t *testing.T) {
|
func Test_Run_Positive_OneDesiredVolumeAttachThenDetachWithMountedVolume(t *testing.T) {
|
||||||
// Arrange
|
// Arrange
|
||||||
volumePluginMgr, fakePlugin := controllervolumetesting.GetTestVolumePluginMgr((t))
|
volumePluginMgr, fakePlugin := volumetesting.GetTestVolumePluginMgr(t)
|
||||||
dsw := cache.NewDesiredStateOfWorld(volumePluginMgr)
|
dsw := cache.NewDesiredStateOfWorld(volumePluginMgr)
|
||||||
asw := cache.NewActualStateOfWorld(volumePluginMgr)
|
asw := cache.NewActualStateOfWorld(volumePluginMgr)
|
||||||
ad := attacherdetacher.NewAttacherDetacher(volumePluginMgr)
|
ad := operationexecutor.NewOperationExecutor(volumePluginMgr)
|
||||||
reconciler := NewReconciler(
|
reconciler := NewReconciler(
|
||||||
reconcilerLoopPeriod, maxWaitForUnmountDuration, dsw, asw, ad)
|
reconcilerLoopPeriod, maxWaitForUnmountDuration, dsw, asw, ad)
|
||||||
podName := types.UniquePodName("pod-name")
|
podName := types.UniquePodName("pod-uid")
|
||||||
volumeName := api.UniqueVolumeName("volume-name")
|
volumeName := api.UniqueVolumeName("volume-name")
|
||||||
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
||||||
nodeName := "node-name"
|
nodeName := "node-name"
|
||||||
|
|||||||
@@ -1,102 +0,0 @@
|
|||||||
/*
|
|
||||||
Copyright 2016 The Kubernetes Authors All rights reserved.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package testing
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"k8s.io/kubernetes/pkg/api"
|
|
||||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
|
||||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
|
||||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/fake"
|
|
||||||
"k8s.io/kubernetes/pkg/types"
|
|
||||||
"k8s.io/kubernetes/pkg/util/io"
|
|
||||||
"k8s.io/kubernetes/pkg/util/mount"
|
|
||||||
"k8s.io/kubernetes/pkg/volume"
|
|
||||||
volumetesting "k8s.io/kubernetes/pkg/volume/testing"
|
|
||||||
)
|
|
||||||
|
|
||||||
// GetTestVolumePluginMgr creates, initializes, and returns a test volume
|
|
||||||
// plugin manager.
|
|
||||||
func GetTestVolumePluginMgr(t *testing.T) (*volume.VolumePluginMgr, *volumetesting.FakeVolumePlugin) {
|
|
||||||
plugins := []volume.VolumePlugin{}
|
|
||||||
|
|
||||||
// plugins = append(plugins, aws_ebs.ProbeVolumePlugins()...)
|
|
||||||
// plugins = append(plugins, gce_pd.ProbeVolumePlugins()...)
|
|
||||||
// plugins = append(plugins, cinder.ProbeVolumePlugins()...)
|
|
||||||
volumeTestingPlugins := volumetesting.ProbeVolumePlugins(volume.VolumeConfig{})
|
|
||||||
plugins = append(plugins, volumeTestingPlugins...)
|
|
||||||
|
|
||||||
volumePluginMgr := testVolumePluginMgr{}
|
|
||||||
|
|
||||||
if err := volumePluginMgr.InitPlugins(plugins, &volumePluginMgr); err != nil {
|
|
||||||
t.Fatalf("Could not initialize volume plugins for Attach/Detach Controller: %+v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return &volumePluginMgr.VolumePluginMgr, volumeTestingPlugins[0].(*volumetesting.FakeVolumePlugin)
|
|
||||||
}
|
|
||||||
|
|
||||||
type testVolumePluginMgr struct {
|
|
||||||
volume.VolumePluginMgr
|
|
||||||
}
|
|
||||||
|
|
||||||
// VolumeHost implementation
|
|
||||||
// This is an unfortunate requirement of the current factoring of volume plugin
|
|
||||||
// initializing code. It requires kubelet specific methods used by the mounting
|
|
||||||
// code to be implemented by all initializers even if the initializer does not
|
|
||||||
// do mounting (like this attach/detach controller).
|
|
||||||
// Issue kubernetes/kubernetes/issues/14217 to fix this.
|
|
||||||
func (vpm *testVolumePluginMgr) GetPluginDir(podUID string) string {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
func (vpm *testVolumePluginMgr) GetPodVolumeDir(podUID types.UID, pluginName, volumeName string) string {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
func (vpm *testVolumePluginMgr) GetPodPluginDir(podUID types.UID, pluginName string) string {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
func (vpm *testVolumePluginMgr) GetKubeClient() internalclientset.Interface {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (vpm *testVolumePluginMgr) NewWrapperMounter(volName string, spec volume.Spec, pod *api.Pod, opts volume.VolumeOptions) (volume.Mounter, error) {
|
|
||||||
return nil, fmt.Errorf("NewWrapperMounter not supported by Attach/Detach controller's VolumeHost implementation")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (vpm *testVolumePluginMgr) NewWrapperUnmounter(volName string, spec volume.Spec, podUID types.UID) (volume.Unmounter, error) {
|
|
||||||
return nil, fmt.Errorf("NewWrapperUnmounter not supported by Attach/Detach controller's VolumeHost implementation")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (vpm *testVolumePluginMgr) GetCloudProvider() cloudprovider.Interface {
|
|
||||||
return &fake.FakeCloud{}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (vpm *testVolumePluginMgr) GetMounter() mount.Interface {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (vpm *testVolumePluginMgr) GetWriter() io.Writer {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (vpm *testVolumePluginMgr) GetHostName() string {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
@@ -71,6 +71,7 @@ import (
|
|||||||
"k8s.io/kubernetes/pkg/kubelet/util/format"
|
"k8s.io/kubernetes/pkg/kubelet/util/format"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/util/ioutils"
|
"k8s.io/kubernetes/pkg/kubelet/util/ioutils"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/util/queue"
|
"k8s.io/kubernetes/pkg/kubelet/util/queue"
|
||||||
|
kubeletvolume "k8s.io/kubernetes/pkg/kubelet/volume"
|
||||||
"k8s.io/kubernetes/pkg/runtime"
|
"k8s.io/kubernetes/pkg/runtime"
|
||||||
"k8s.io/kubernetes/pkg/securitycontext"
|
"k8s.io/kubernetes/pkg/securitycontext"
|
||||||
"k8s.io/kubernetes/pkg/types"
|
"k8s.io/kubernetes/pkg/types"
|
||||||
@@ -151,9 +152,6 @@ const (
|
|||||||
// Period for performing image garbage collection.
|
// Period for performing image garbage collection.
|
||||||
ImageGCPeriod = 5 * time.Minute
|
ImageGCPeriod = 5 * time.Minute
|
||||||
|
|
||||||
// Maximum period to wait for pod volume setup operations
|
|
||||||
maxWaitForVolumeOps = 20 * time.Minute
|
|
||||||
|
|
||||||
// maxImagesInStatus is the number of max images we store in image status.
|
// maxImagesInStatus is the number of max images we store in image status.
|
||||||
maxImagesInNodeStatus = 50
|
maxImagesInNodeStatus = 50
|
||||||
)
|
)
|
||||||
@@ -299,8 +297,6 @@ func NewMainKubelet(
|
|||||||
}
|
}
|
||||||
containerRefManager := kubecontainer.NewRefManager()
|
containerRefManager := kubecontainer.NewRefManager()
|
||||||
|
|
||||||
volumeManager := newVolumeManager()
|
|
||||||
|
|
||||||
oomWatcher := NewOOMWatcher(cadvisorInterface, recorder)
|
oomWatcher := NewOOMWatcher(cadvisorInterface, recorder)
|
||||||
|
|
||||||
// TODO: remove when internal cbr0 implementation gets removed in favor
|
// TODO: remove when internal cbr0 implementation gets removed in favor
|
||||||
@@ -333,7 +329,6 @@ func NewMainKubelet(
|
|||||||
recorder: recorder,
|
recorder: recorder,
|
||||||
cadvisor: cadvisorInterface,
|
cadvisor: cadvisorInterface,
|
||||||
diskSpaceManager: diskSpaceManager,
|
diskSpaceManager: diskSpaceManager,
|
||||||
volumeManager: volumeManager,
|
|
||||||
cloud: cloud,
|
cloud: cloud,
|
||||||
nodeRef: nodeRef,
|
nodeRef: nodeRef,
|
||||||
nodeLabels: nodeLabels,
|
nodeLabels: nodeLabels,
|
||||||
@@ -496,10 +491,19 @@ func NewMainKubelet(
|
|||||||
containerRefManager,
|
containerRefManager,
|
||||||
recorder)
|
recorder)
|
||||||
|
|
||||||
if err := klet.volumePluginMgr.InitPlugins(volumePlugins, &volumeHost{klet}); err != nil {
|
klet.volumePluginMgr, err =
|
||||||
|
NewInitializedVolumePluginMgr(klet, volumePlugins)
|
||||||
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
klet.volumeManager, err = kubeletvolume.NewVolumeManager(
|
||||||
|
enableControllerAttachDetach,
|
||||||
|
hostname,
|
||||||
|
klet.podManager,
|
||||||
|
klet.kubeClient,
|
||||||
|
klet.volumePluginMgr)
|
||||||
|
|
||||||
runtimeCache, err := kubecontainer.NewRuntimeCache(klet.containerRuntime)
|
runtimeCache, err := kubecontainer.NewRuntimeCache(klet.containerRuntime)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -643,7 +647,7 @@ type Kubelet struct {
|
|||||||
runtimeState *runtimeState
|
runtimeState *runtimeState
|
||||||
|
|
||||||
// Volume plugins.
|
// Volume plugins.
|
||||||
volumePluginMgr volume.VolumePluginMgr
|
volumePluginMgr *volume.VolumePluginMgr
|
||||||
|
|
||||||
// Network plugin.
|
// Network plugin.
|
||||||
networkPlugin network.NetworkPlugin
|
networkPlugin network.NetworkPlugin
|
||||||
@@ -675,10 +679,12 @@ type Kubelet struct {
|
|||||||
// Syncs pods statuses with apiserver; also used as a cache of statuses.
|
// Syncs pods statuses with apiserver; also used as a cache of statuses.
|
||||||
statusManager status.Manager
|
statusManager status.Manager
|
||||||
|
|
||||||
// Manager for the volume maps for the pods.
|
// VolumeManager runs a set of asynchronous loops that figure out which
|
||||||
volumeManager *volumeManager
|
// volumes need to be attached/mounted/unmounted/detached based on the pods
|
||||||
|
// scheduled on this node and makes it so.
|
||||||
|
volumeManager kubeletvolume.VolumeManager
|
||||||
|
|
||||||
//Cloud provider interface
|
// Cloud provider interface.
|
||||||
cloud cloudprovider.Interface
|
cloud cloudprovider.Interface
|
||||||
|
|
||||||
// Reference to this node.
|
// Reference to this node.
|
||||||
@@ -983,6 +989,9 @@ func (kl *Kubelet) Run(updates <-chan kubetypes.PodUpdate) {
|
|||||||
kl.runtimeState.setInitError(err)
|
kl.runtimeState.setInitError(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Start volume manager
|
||||||
|
go kl.volumeManager.Run(wait.NeverStop)
|
||||||
|
|
||||||
if kl.kubeClient != nil {
|
if kl.kubeClient != nil {
|
||||||
// Start syncing node status immediately, this may set up things the runtime needs to run.
|
// Start syncing node status immediately, this may set up things the runtime needs to run.
|
||||||
go wait.Until(kl.syncNodeStatus, kl.nodeStatusUpdateFrequency, wait.NeverStop)
|
go wait.Until(kl.syncNodeStatus, kl.nodeStatusUpdateFrequency, wait.NeverStop)
|
||||||
@@ -1043,7 +1052,7 @@ func (kl *Kubelet) initialNodeStatus() (*api.Node, error) {
|
|||||||
node.Annotations = make(map[string]string)
|
node.Annotations = make(map[string]string)
|
||||||
}
|
}
|
||||||
|
|
||||||
node.Annotations[volumehelper.ControllerManagedAnnotation] = "true"
|
node.Annotations[volumehelper.ControllerManagedAttachAnnotation] = "true"
|
||||||
}
|
}
|
||||||
|
|
||||||
// @question: should this be place after the call to the cloud provider? which also applies labels
|
// @question: should this be place after the call to the cloud provider? which also applies labels
|
||||||
@@ -1405,23 +1414,20 @@ func (kl *Kubelet) GenerateRunContainerOptions(pod *api.Pod, container *api.Cont
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
opts.Hostname = hostname
|
opts.Hostname = hostname
|
||||||
vol, ok := kl.volumeManager.GetVolumes(pod.UID)
|
volumes := kl.volumeManager.GetVolumesForPodAndAppendSupplementalGroups(pod)
|
||||||
if !ok {
|
|
||||||
return nil, fmt.Errorf("impossible: cannot find the mounted volumes for pod %q", format.Pod(pod))
|
|
||||||
}
|
|
||||||
|
|
||||||
opts.PortMappings = makePortMappings(container)
|
opts.PortMappings = makePortMappings(container)
|
||||||
// Docker does not relabel volumes if the container is running
|
// Docker does not relabel volumes if the container is running
|
||||||
// in the host pid or ipc namespaces so the kubelet must
|
// in the host pid or ipc namespaces so the kubelet must
|
||||||
// relabel the volumes
|
// relabel the volumes
|
||||||
if pod.Spec.SecurityContext != nil && (pod.Spec.SecurityContext.HostIPC || pod.Spec.SecurityContext.HostPID) {
|
if pod.Spec.SecurityContext != nil && (pod.Spec.SecurityContext.HostIPC || pod.Spec.SecurityContext.HostPID) {
|
||||||
err = kl.relabelVolumes(pod, vol)
|
err = kl.relabelVolumes(pod, volumes)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
opts.Mounts, err = makeMounts(pod, kl.getPodDir(pod.UID), container, hostname, hostDomainName, podIP, vol)
|
opts.Mounts, err = makeMounts(pod, kl.getPodDir(pod.UID), container, hostname, hostDomainName, podIP, volumes)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -1786,7 +1792,7 @@ func (kl *Kubelet) makePodDataDirs(pod *api.Pod) error {
|
|||||||
// * Create a mirror pod if the pod is a static pod, and does not
|
// * Create a mirror pod if the pod is a static pod, and does not
|
||||||
// already have a mirror pod
|
// already have a mirror pod
|
||||||
// * Create the data directories for the pod if they do not exist
|
// * Create the data directories for the pod if they do not exist
|
||||||
// * Mount volumes and update the volume manager
|
// * Wait for volumes to attach/mount
|
||||||
// * Fetch the pull secrets for the pod
|
// * Fetch the pull secrets for the pod
|
||||||
// * Call the container runtime's SyncPod callback
|
// * Call the container runtime's SyncPod callback
|
||||||
// * Update the traffic shaping for the pod's ingress and egress limits
|
// * Update the traffic shaping for the pod's ingress and egress limits
|
||||||
@@ -1893,9 +1899,8 @@ func (kl *Kubelet) syncPod(o syncPodOptions) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Mount volumes and update the volume manager
|
// Wait for volumes to attach/mount
|
||||||
podVolumes, err := kl.mountExternalVolumes(pod)
|
if err := kl.volumeManager.WaitForAttachAndMount(pod); err != nil {
|
||||||
if err != nil {
|
|
||||||
ref, errGetRef := api.GetReference(pod)
|
ref, errGetRef := api.GetReference(pod)
|
||||||
if errGetRef == nil && ref != nil {
|
if errGetRef == nil && ref != nil {
|
||||||
kl.recorder.Eventf(ref, api.EventTypeWarning, kubecontainer.FailedMountVolume, "Unable to mount volumes for pod %q: %v", format.Pod(pod), err)
|
kl.recorder.Eventf(ref, api.EventTypeWarning, kubecontainer.FailedMountVolume, "Unable to mount volumes for pod %q: %v", format.Pod(pod), err)
|
||||||
@@ -1903,7 +1908,6 @@ func (kl *Kubelet) syncPod(o syncPodOptions) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
kl.volumeManager.SetVolumes(pod.UID, podVolumes)
|
|
||||||
|
|
||||||
// Fetch the pull secrets for the pod
|
// Fetch the pull secrets for the pod
|
||||||
pullSecrets, err := kl.getPullSecretsForPod(pod)
|
pullSecrets, err := kl.getPullSecretsForPod(pod)
|
||||||
@@ -1967,56 +1971,16 @@ func (kl *Kubelet) getPullSecretsForPod(pod *api.Pod) ([]api.Secret, error) {
|
|||||||
return pullSecrets, nil
|
return pullSecrets, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// resolveVolumeName returns the name of the persistent volume (PV) claimed by
|
|
||||||
// a persistent volume claim (PVC) or an error if the claim is not bound.
|
|
||||||
// Returns nil if the volume does not use a PVC.
|
|
||||||
func (kl *Kubelet) resolveVolumeName(pod *api.Pod, volume *api.Volume) (string, error) {
|
|
||||||
claimSource := volume.VolumeSource.PersistentVolumeClaim
|
|
||||||
if claimSource != nil {
|
|
||||||
// resolve real volume behind the claim
|
|
||||||
claim, err := kl.kubeClient.Core().PersistentVolumeClaims(pod.Namespace).Get(claimSource.ClaimName)
|
|
||||||
if err != nil {
|
|
||||||
return "", fmt.Errorf("Cannot find claim %s/%s for volume %s", pod.Namespace, claimSource.ClaimName, volume.Name)
|
|
||||||
}
|
|
||||||
if claim.Status.Phase != api.ClaimBound {
|
|
||||||
return "", fmt.Errorf("Claim for volume %s/%s is not bound yet", pod.Namespace, claimSource.ClaimName)
|
|
||||||
}
|
|
||||||
// Use the real bound volume instead of PersistentVolume.Name
|
|
||||||
return claim.Spec.VolumeName, nil
|
|
||||||
}
|
|
||||||
return volume.Name, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Stores all volumes defined by the set of pods into a map.
|
|
||||||
// It stores real volumes there, i.e. persistent volume claims are resolved
|
|
||||||
// to volumes that are bound to them.
|
|
||||||
// Keys for each entry are in the format (POD_ID)/(VOLUME_NAME)
|
|
||||||
func (kl *Kubelet) getDesiredVolumes(pods []*api.Pod) map[string]api.Volume {
|
|
||||||
desiredVolumes := make(map[string]api.Volume)
|
|
||||||
for _, pod := range pods {
|
|
||||||
for _, volume := range pod.Spec.Volumes {
|
|
||||||
volumeName, err := kl.resolveVolumeName(pod, &volume)
|
|
||||||
if err != nil {
|
|
||||||
glog.V(3).Infof("%v", err)
|
|
||||||
// Ignore the error and hope it's resolved next time
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
identifier := path.Join(string(pod.UID), volumeName)
|
|
||||||
desiredVolumes[identifier] = volume
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return desiredVolumes
|
|
||||||
}
|
|
||||||
|
|
||||||
// cleanupOrphanedPodDirs removes the volumes of pods that should not be
|
// cleanupOrphanedPodDirs removes the volumes of pods that should not be
|
||||||
// running and that have no containers running.
|
// running and that have no containers running.
|
||||||
func (kl *Kubelet) cleanupOrphanedPodDirs(pods []*api.Pod, runningPods []*kubecontainer.Pod) error {
|
func (kl *Kubelet) cleanupOrphanedPodDirs(
|
||||||
active := sets.NewString()
|
pods []*api.Pod, runningPods []*kubecontainer.Pod) error {
|
||||||
|
allPods := sets.NewString()
|
||||||
for _, pod := range pods {
|
for _, pod := range pods {
|
||||||
active.Insert(string(pod.UID))
|
allPods.Insert(string(pod.UID))
|
||||||
}
|
}
|
||||||
for _, pod := range runningPods {
|
for _, pod := range runningPods {
|
||||||
active.Insert(string(pod.ID))
|
allPods.Insert(string(pod.ID))
|
||||||
}
|
}
|
||||||
|
|
||||||
found, err := kl.listPodsFromDisk()
|
found, err := kl.listPodsFromDisk()
|
||||||
@@ -2025,16 +1989,19 @@ func (kl *Kubelet) cleanupOrphanedPodDirs(pods []*api.Pod, runningPods []*kubeco
|
|||||||
}
|
}
|
||||||
errlist := []error{}
|
errlist := []error{}
|
||||||
for _, uid := range found {
|
for _, uid := range found {
|
||||||
if active.Has(string(uid)) {
|
if allPods.Has(string(uid)) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if volumes, err := kl.getPodVolumes(uid); err != nil || len(volumes) != 0 {
|
if podVolumesExist := kl.podVolumesExist(uid); podVolumesExist {
|
||||||
glog.V(3).Infof("Orphaned pod %q found, but volumes are not cleaned up; err: %v, volumes: %v ", uid, err, volumes)
|
// If volumes have not been unmounted/detached, do not delete directory.
|
||||||
|
// Doing so may result in corruption of data.
|
||||||
|
glog.V(3).Infof("Orphaned pod %q found, but volumes are not cleaned up; err: %v", uid, err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
glog.V(3).Infof("Orphaned pod %q found, removing", uid)
|
glog.V(3).Infof("Orphaned pod %q found, removing", uid)
|
||||||
if err := os.RemoveAll(kl.getPodDir(uid)); err != nil {
|
if err := os.RemoveAll(kl.getPodDir(uid)); err != nil {
|
||||||
|
glog.Infof("Failed to remove orphaned pod %q dir; err: %v", uid, err)
|
||||||
errlist = append(errlist, err)
|
errlist = append(errlist, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -2086,88 +2053,6 @@ func (kl *Kubelet) cleanupBandwidthLimits(allPods []*api.Pod) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Compares the map of current volumes to the map of desired volumes.
|
|
||||||
// If an active volume does not have a respective desired volume, clean it up.
|
|
||||||
// This method is blocking:
|
|
||||||
// 1) it talks to API server to find volumes bound to persistent volume claims
|
|
||||||
// 2) it talks to cloud to detach volumes
|
|
||||||
func (kl *Kubelet) cleanupOrphanedVolumes(pods []*api.Pod, runningPods []*kubecontainer.Pod) error {
|
|
||||||
desiredVolumes := kl.getDesiredVolumes(pods)
|
|
||||||
currentVolumes := kl.getPodVolumesFromDisk()
|
|
||||||
|
|
||||||
runningSet := sets.String{}
|
|
||||||
for _, pod := range runningPods {
|
|
||||||
runningSet.Insert(string(pod.ID))
|
|
||||||
}
|
|
||||||
|
|
||||||
for name, cleaner := range currentVolumes {
|
|
||||||
if _, ok := desiredVolumes[name]; !ok {
|
|
||||||
parts := strings.Split(name, "/")
|
|
||||||
if runningSet.Has(parts[0]) {
|
|
||||||
glog.Infof("volume %q, still has a container running (%q), skipping teardown", name, parts[0])
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
//TODO (jonesdl) We should somehow differentiate between volumes that are supposed
|
|
||||||
//to be deleted and volumes that are leftover after a crash.
|
|
||||||
glog.V(3).Infof("Orphaned volume %q found, tearing down volume", name)
|
|
||||||
// TODO(yifan): Refactor this hacky string manipulation.
|
|
||||||
kl.volumeManager.DeleteVolumes(types.UID(parts[0]))
|
|
||||||
// Get path reference count
|
|
||||||
volumePath := cleaner.Unmounter.GetPath()
|
|
||||||
refs, err := mount.GetMountRefs(kl.mounter, volumePath)
|
|
||||||
if err != nil {
|
|
||||||
glog.Errorf("Could not get mount path references for %q: %v", volumePath, err)
|
|
||||||
}
|
|
||||||
//TODO (jonesdl) This should not block other kubelet synchronization procedures
|
|
||||||
err = cleaner.Unmounter.TearDown()
|
|
||||||
if err != nil {
|
|
||||||
glog.Errorf("Could not tear down volume %q at %q: %v", name, volumePath, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// volume is unmounted. some volumes also require detachment from the node.
|
|
||||||
if cleaner.Detacher != nil && len(refs) == 1 {
|
|
||||||
// There is a bug in this code, where len(refs) is zero in some
|
|
||||||
// cases, and so RemoveVolumeInUse sometimes never gets called.
|
|
||||||
// The Attach/Detach Refactor should fix this, in the mean time,
|
|
||||||
// the controller timeout for safe mount is set to 3 minutes, so
|
|
||||||
// it will still detach the volume.
|
|
||||||
detacher := *cleaner.Detacher
|
|
||||||
devicePath, _, err := mount.GetDeviceNameFromMount(kl.mounter, refs[0])
|
|
||||||
if err != nil {
|
|
||||||
glog.Errorf("Could not find device path %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = detacher.UnmountDevice(refs[0], kl.mounter); err != nil {
|
|
||||||
glog.Errorf("Could not unmount the global mount for %q: %v", name, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
pdName := path.Base(refs[0])
|
|
||||||
if kl.enableControllerAttachDetach {
|
|
||||||
// Attach/Detach controller is enabled and this volume type
|
|
||||||
// implments a detacher
|
|
||||||
uniqueDeviceName := volumehelper.GetUniqueVolumeName(
|
|
||||||
cleaner.PluginName, pdName)
|
|
||||||
kl.volumeManager.RemoveVolumeInUse(
|
|
||||||
api.UniqueVolumeName(uniqueDeviceName))
|
|
||||||
} else {
|
|
||||||
// Attach/Detach controller is disabled
|
|
||||||
err = detacher.Detach(pdName, kl.hostname)
|
|
||||||
if err != nil {
|
|
||||||
glog.Errorf("Could not detach volume %q at %q: %v", name, volumePath, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
if err = detacher.WaitForDetach(devicePath, maxWaitForVolumeOps); err != nil {
|
|
||||||
glog.Errorf("Error while waiting for detach: %v", err)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// pastActiveDeadline returns true if the pod has been active for more than
|
// pastActiveDeadline returns true if the pod has been active for more than
|
||||||
// ActiveDeadlineSeconds.
|
// ActiveDeadlineSeconds.
|
||||||
func (kl *Kubelet) pastActiveDeadline(pod *api.Pod) bool {
|
func (kl *Kubelet) pastActiveDeadline(pod *api.Pod) bool {
|
||||||
@@ -2360,16 +2245,6 @@ func (kl *Kubelet) HandlePodCleanups() error {
|
|||||||
// Note that we pass all pods (including terminated pods) to the function,
|
// Note that we pass all pods (including terminated pods) to the function,
|
||||||
// so that we don't remove volumes associated with terminated but not yet
|
// so that we don't remove volumes associated with terminated but not yet
|
||||||
// deleted pods.
|
// deleted pods.
|
||||||
err = kl.cleanupOrphanedVolumes(allPods, runningPods)
|
|
||||||
if err != nil {
|
|
||||||
glog.Errorf("Failed cleaning up orphaned volumes: %v", err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove any orphaned pod directories.
|
|
||||||
// Note that we pass all pods (including terminated pods) to the function,
|
|
||||||
// so that we don't remove directories associated with terminated but not yet
|
|
||||||
// deleted pods.
|
|
||||||
err = kl.cleanupOrphanedPodDirs(allPods, runningPods)
|
err = kl.cleanupOrphanedPodDirs(allPods, runningPods)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Errorf("Failed cleaning up orphaned pod directories: %v", err)
|
glog.Errorf("Failed cleaning up orphaned pod directories: %v", err)
|
||||||
|
|||||||
@@ -33,7 +33,7 @@ func TestGetContainerInfo(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
testKubelet := newTestKubelet(t)
|
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||||
fakeRuntime := testKubelet.fakeRuntime
|
fakeRuntime := testKubelet.fakeRuntime
|
||||||
kubelet := testKubelet.kubelet
|
kubelet := testKubelet.kubelet
|
||||||
cadvisorReq := &cadvisorapi.ContainerInfoRequest{}
|
cadvisorReq := &cadvisorapi.ContainerInfoRequest{}
|
||||||
@@ -69,7 +69,7 @@ func TestGetRawContainerInfoRoot(t *testing.T) {
|
|||||||
Name: containerPath,
|
Name: containerPath,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
testKubelet := newTestKubelet(t)
|
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||||
kubelet := testKubelet.kubelet
|
kubelet := testKubelet.kubelet
|
||||||
mockCadvisor := testKubelet.fakeCadvisor
|
mockCadvisor := testKubelet.fakeCadvisor
|
||||||
cadvisorReq := &cadvisorapi.ContainerInfoRequest{}
|
cadvisorReq := &cadvisorapi.ContainerInfoRequest{}
|
||||||
@@ -96,7 +96,7 @@ func TestGetRawContainerInfoSubcontainers(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
testKubelet := newTestKubelet(t)
|
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||||
kubelet := testKubelet.kubelet
|
kubelet := testKubelet.kubelet
|
||||||
mockCadvisor := testKubelet.fakeCadvisor
|
mockCadvisor := testKubelet.fakeCadvisor
|
||||||
cadvisorReq := &cadvisorapi.ContainerInfoRequest{}
|
cadvisorReq := &cadvisorapi.ContainerInfoRequest{}
|
||||||
@@ -114,7 +114,7 @@ func TestGetRawContainerInfoSubcontainers(t *testing.T) {
|
|||||||
|
|
||||||
func TestGetContainerInfoWhenCadvisorFailed(t *testing.T) {
|
func TestGetContainerInfoWhenCadvisorFailed(t *testing.T) {
|
||||||
containerID := "ab2cdf"
|
containerID := "ab2cdf"
|
||||||
testKubelet := newTestKubelet(t)
|
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||||
kubelet := testKubelet.kubelet
|
kubelet := testKubelet.kubelet
|
||||||
mockCadvisor := testKubelet.fakeCadvisor
|
mockCadvisor := testKubelet.fakeCadvisor
|
||||||
fakeRuntime := testKubelet.fakeRuntime
|
fakeRuntime := testKubelet.fakeRuntime
|
||||||
@@ -149,7 +149,7 @@ func TestGetContainerInfoWhenCadvisorFailed(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestGetContainerInfoOnNonExistContainer(t *testing.T) {
|
func TestGetContainerInfoOnNonExistContainer(t *testing.T) {
|
||||||
testKubelet := newTestKubelet(t)
|
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||||
kubelet := testKubelet.kubelet
|
kubelet := testKubelet.kubelet
|
||||||
mockCadvisor := testKubelet.fakeCadvisor
|
mockCadvisor := testKubelet.fakeCadvisor
|
||||||
fakeRuntime := testKubelet.fakeRuntime
|
fakeRuntime := testKubelet.fakeRuntime
|
||||||
@@ -163,7 +163,7 @@ func TestGetContainerInfoOnNonExistContainer(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestGetContainerInfoWhenContainerRuntimeFailed(t *testing.T) {
|
func TestGetContainerInfoWhenContainerRuntimeFailed(t *testing.T) {
|
||||||
testKubelet := newTestKubelet(t)
|
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||||
kubelet := testKubelet.kubelet
|
kubelet := testKubelet.kubelet
|
||||||
mockCadvisor := testKubelet.fakeCadvisor
|
mockCadvisor := testKubelet.fakeCadvisor
|
||||||
fakeRuntime := testKubelet.fakeRuntime
|
fakeRuntime := testKubelet.fakeRuntime
|
||||||
@@ -184,7 +184,7 @@ func TestGetContainerInfoWhenContainerRuntimeFailed(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestGetContainerInfoWithNoContainers(t *testing.T) {
|
func TestGetContainerInfoWithNoContainers(t *testing.T) {
|
||||||
testKubelet := newTestKubelet(t)
|
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||||
kubelet := testKubelet.kubelet
|
kubelet := testKubelet.kubelet
|
||||||
mockCadvisor := testKubelet.fakeCadvisor
|
mockCadvisor := testKubelet.fakeCadvisor
|
||||||
|
|
||||||
@@ -202,7 +202,7 @@ func TestGetContainerInfoWithNoContainers(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestGetContainerInfoWithNoMatchingContainers(t *testing.T) {
|
func TestGetContainerInfoWithNoMatchingContainers(t *testing.T) {
|
||||||
testKubelet := newTestKubelet(t)
|
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||||
fakeRuntime := testKubelet.fakeRuntime
|
fakeRuntime := testKubelet.fakeRuntime
|
||||||
kubelet := testKubelet.kubelet
|
kubelet := testKubelet.kubelet
|
||||||
mockCadvisor := testKubelet.fakeCadvisor
|
mockCadvisor := testKubelet.fakeCadvisor
|
||||||
|
|||||||
@@ -24,7 +24,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func TestKubeletDirs(t *testing.T) {
|
func TestKubeletDirs(t *testing.T) {
|
||||||
testKubelet := newTestKubelet(t)
|
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||||
kubelet := testKubelet.kubelet
|
kubelet := testKubelet.kubelet
|
||||||
root := kubelet.rootDirectory
|
root := kubelet.rootDirectory
|
||||||
|
|
||||||
@@ -86,7 +86,7 @@ func TestKubeletDirs(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestKubeletDirsCompat(t *testing.T) {
|
func TestKubeletDirsCompat(t *testing.T) {
|
||||||
testKubelet := newTestKubelet(t)
|
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||||
kubelet := testKubelet.kubelet
|
kubelet := testKubelet.kubelet
|
||||||
root := kubelet.rootDirectory
|
root := kubelet.rootDirectory
|
||||||
if err := os.MkdirAll(root, 0750); err != nil {
|
if err := os.MkdirAll(root, 0750); err != nil {
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
68
pkg/kubelet/kubelet_volumes.go
Normal file
68
pkg/kubelet/kubelet_volumes.go
Normal file
@@ -0,0 +1,68 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2014 The Kubernetes Authors All rights reserved.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package kubelet
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/golang/glog"
|
||||||
|
"k8s.io/kubernetes/pkg/api"
|
||||||
|
"k8s.io/kubernetes/pkg/types"
|
||||||
|
"k8s.io/kubernetes/pkg/volume"
|
||||||
|
volumetypes "k8s.io/kubernetes/pkg/volume/util/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ListVolumesForPod returns a map of the mounted volumes for the given pod.
|
||||||
|
// The key in the map is the OuterVolumeSpecName (i.e. pod.Spec.Volumes[x].Name)
|
||||||
|
func (kl *Kubelet) ListVolumesForPod(podUID types.UID) (map[string]volume.Volume, bool) {
|
||||||
|
volumesToReturn := make(map[string]volume.Volume)
|
||||||
|
podVolumes := kl.volumeManager.GetMountedVolumesForPod(
|
||||||
|
volumetypes.UniquePodName(podUID))
|
||||||
|
for outerVolumeSpecName, volume := range podVolumes {
|
||||||
|
volumesToReturn[outerVolumeSpecName] = volume.Mounter
|
||||||
|
}
|
||||||
|
|
||||||
|
return volumesToReturn, len(volumesToReturn) > 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// podVolumesExist checks wiht the volume manager and returns true any of the
|
||||||
|
// pods for the specified volume are mounted.
|
||||||
|
func (kl *Kubelet) podVolumesExist(podUID types.UID) bool {
|
||||||
|
if mountedVolumes :=
|
||||||
|
kl.volumeManager.GetMountedVolumesForPod(
|
||||||
|
volumetypes.UniquePodName(podUID)); len(mountedVolumes) > 0 {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// newVolumeMounterFromPlugins attempts to find a plugin by volume spec, pod
|
||||||
|
// and volume options and then creates a Mounter.
|
||||||
|
// Returns a valid Unmounter or an error.
|
||||||
|
func (kl *Kubelet) newVolumeMounterFromPlugins(spec *volume.Spec, pod *api.Pod, opts volume.VolumeOptions) (volume.Mounter, error) {
|
||||||
|
plugin, err := kl.volumePluginMgr.FindPluginBySpec(spec)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("can't use volume plugins for %s: %v", spec.Name(), err)
|
||||||
|
}
|
||||||
|
physicalMounter, err := plugin.NewMounter(spec, pod, opts)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to instantiate mounter for volume: %s using plugin: %s with a root cause: %v", spec.Name(), plugin.GetPluginName(), err)
|
||||||
|
}
|
||||||
|
glog.V(10).Infof("Using volume plugin %q to mount %s", plugin.GetPluginName(), spec.Name())
|
||||||
|
return physicalMounter, nil
|
||||||
|
}
|
||||||
@@ -38,9 +38,12 @@ import (
|
|||||||
podtest "k8s.io/kubernetes/pkg/kubelet/pod/testing"
|
podtest "k8s.io/kubernetes/pkg/kubelet/pod/testing"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/server/stats"
|
"k8s.io/kubernetes/pkg/kubelet/server/stats"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/status"
|
"k8s.io/kubernetes/pkg/kubelet/status"
|
||||||
|
kubeletvolume "k8s.io/kubernetes/pkg/kubelet/volume"
|
||||||
"k8s.io/kubernetes/pkg/types"
|
"k8s.io/kubernetes/pkg/types"
|
||||||
"k8s.io/kubernetes/pkg/util"
|
"k8s.io/kubernetes/pkg/util"
|
||||||
utiltesting "k8s.io/kubernetes/pkg/util/testing"
|
utiltesting "k8s.io/kubernetes/pkg/util/testing"
|
||||||
|
"k8s.io/kubernetes/pkg/volume"
|
||||||
|
volumetest "k8s.io/kubernetes/pkg/volume/testing"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestRunOnce(t *testing.T) {
|
func TestRunOnce(t *testing.T) {
|
||||||
@@ -73,7 +76,6 @@ func TestRunOnce(t *testing.T) {
|
|||||||
containerRefManager: kubecontainer.NewRefManager(),
|
containerRefManager: kubecontainer.NewRefManager(),
|
||||||
podManager: podManager,
|
podManager: podManager,
|
||||||
os: &containertest.FakeOS{},
|
os: &containertest.FakeOS{},
|
||||||
volumeManager: newVolumeManager(),
|
|
||||||
diskSpaceManager: diskSpaceManager,
|
diskSpaceManager: diskSpaceManager,
|
||||||
containerRuntime: fakeRuntime,
|
containerRuntime: fakeRuntime,
|
||||||
reasonCache: NewReasonCache(),
|
reasonCache: NewReasonCache(),
|
||||||
@@ -84,6 +86,19 @@ func TestRunOnce(t *testing.T) {
|
|||||||
}
|
}
|
||||||
kb.containerManager = cm.NewStubContainerManager()
|
kb.containerManager = cm.NewStubContainerManager()
|
||||||
|
|
||||||
|
plug := &volumetest.FakeVolumePlugin{PluginName: "fake", Host: nil}
|
||||||
|
kb.volumePluginMgr, err =
|
||||||
|
NewInitializedVolumePluginMgr(kb, []volume.VolumePlugin{plug})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to initialize VolumePluginMgr: %v", err)
|
||||||
|
}
|
||||||
|
kb.volumeManager, err = kubeletvolume.NewVolumeManager(
|
||||||
|
true,
|
||||||
|
kb.hostname,
|
||||||
|
kb.podManager,
|
||||||
|
kb.kubeClient,
|
||||||
|
kb.volumePluginMgr)
|
||||||
|
|
||||||
kb.networkPlugin, _ = network.InitNetworkPlugin([]network.NetworkPlugin{}, "", nettest.NewFakeHost(nil), componentconfig.HairpinNone, kb.nonMasqueradeCIDR)
|
kb.networkPlugin, _ = network.InitNetworkPlugin([]network.NetworkPlugin{}, "", nettest.NewFakeHost(nil), componentconfig.HairpinNone, kb.nonMasqueradeCIDR)
|
||||||
// TODO: Factor out "StatsProvider" from Kubelet so we don't have a cyclic dependency
|
// TODO: Factor out "StatsProvider" from Kubelet so we don't have a cyclic dependency
|
||||||
volumeStatsAggPeriod := time.Second * 10
|
volumeStatsAggPeriod := time.Second * 10
|
||||||
|
|||||||
622
pkg/kubelet/volume/cache/actual_state_of_world.go
vendored
Normal file
622
pkg/kubelet/volume/cache/actual_state_of_world.go
vendored
Normal file
@@ -0,0 +1,622 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2016 The Kubernetes Authors All rights reserved.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
/*
|
||||||
|
Package cache implements data structures used by the kubelet volume manager to
|
||||||
|
keep track of attached volumes and the pods that mounted them.
|
||||||
|
*/
|
||||||
|
package cache
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/golang/glog"
|
||||||
|
|
||||||
|
"k8s.io/kubernetes/pkg/api"
|
||||||
|
"k8s.io/kubernetes/pkg/types"
|
||||||
|
"k8s.io/kubernetes/pkg/volume"
|
||||||
|
"k8s.io/kubernetes/pkg/volume/util/operationexecutor"
|
||||||
|
volumetypes "k8s.io/kubernetes/pkg/volume/util/types"
|
||||||
|
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ActualStateOfWorld defines a set of thread-safe operations for the kubelet
|
||||||
|
// volume manager's actual state of the world cache.
|
||||||
|
// This cache contains volumes->pods i.e. a set of all volumes attached to this
|
||||||
|
// node and the pods that the manager believes have successfully mounted the
|
||||||
|
// volume.
|
||||||
|
// Note: This is distinct from the ActualStateOfWorld implemented by the
|
||||||
|
// attach/detach controller. They both keep track of different objects. This
|
||||||
|
// contains kubelet volume manager specific state.
|
||||||
|
type ActualStateOfWorld interface {
|
||||||
|
// ActualStateOfWorld must implement the methods required to allow
|
||||||
|
// operationexecutor to interact with it.
|
||||||
|
operationexecutor.ActualStateOfWorldMounterUpdater
|
||||||
|
|
||||||
|
// ActualStateOfWorld must implement the methods required to allow
|
||||||
|
// operationexecutor to interact with it.
|
||||||
|
operationexecutor.ActualStateOfWorldAttacherUpdater
|
||||||
|
|
||||||
|
// AddVolume adds the given volume to the cache indicating the specified
|
||||||
|
// volume is attached to this node. A unique volume name is generated from
|
||||||
|
// the volumeSpec and returned on success.
|
||||||
|
// If a volume with the same generated name already exists, this is a noop.
|
||||||
|
// If no volume plugin can support the given volumeSpec or more than one
|
||||||
|
// plugin can support it, an error is returned.
|
||||||
|
AddVolume(volumeSpec *volume.Spec) (api.UniqueVolumeName, error)
|
||||||
|
|
||||||
|
// AddPodToVolume adds the given pod to the given volume in the cache
|
||||||
|
// indicating the specified volume has been successfully mounted to the
|
||||||
|
// specified pod.
|
||||||
|
// If a pod with the same unique name already exists under the specified
|
||||||
|
// volume, this is a no-op.
|
||||||
|
// If a volume with the name volumeName does not exist in the list of
|
||||||
|
// attached volumes, an error is returned.
|
||||||
|
AddPodToVolume(podName volumetypes.UniquePodName, podUID types.UID, volumeName api.UniqueVolumeName, mounter volume.Mounter, outerVolumeSpecName string, volumeGidValue string) error
|
||||||
|
|
||||||
|
// MarkRemountRequired marks each volume that is successfully attached and
|
||||||
|
// mounted for the specified pod as requiring remount (if the plugin for the
|
||||||
|
// volume indicates it requires remounting on pod updates). Atomically
|
||||||
|
// updating volumes depend on this to update the contents of the volume on
|
||||||
|
// pod update.
|
||||||
|
MarkRemountRequired(podName volumetypes.UniquePodName)
|
||||||
|
|
||||||
|
// SetVolumeGloballyMounted sets the GloballyMounted value for the given
|
||||||
|
// volume. When set to true this value indicates that the volume is mounted
|
||||||
|
// to the underlying device at a global mount point. This global mount point
|
||||||
|
// must unmounted prior to detach.
|
||||||
|
// If a volume with the name volumeName does not exist in the list of
|
||||||
|
// attached volumes, an error is returned.
|
||||||
|
SetVolumeGloballyMounted(volumeName api.UniqueVolumeName, globallyMounted bool) error
|
||||||
|
|
||||||
|
// DeletePodFromVolume removes the given pod from the given volume in the
|
||||||
|
// cache indicating the volume has been successfully unmounted from the pod.
|
||||||
|
// If a pod with the same unique name does not exist under the specified
|
||||||
|
// volume, this is a no-op.
|
||||||
|
// If a volume with the name volumeName does not exist in the list of
|
||||||
|
// attached volumes, an error is returned.
|
||||||
|
DeletePodFromVolume(podName volumetypes.UniquePodName, volumeName api.UniqueVolumeName) error
|
||||||
|
|
||||||
|
// DeleteVolume removes the given volume from the list of attached volumes
|
||||||
|
// in the cache indicating the volume has been successfully detached from
|
||||||
|
// this node.
|
||||||
|
// If a volume with the name volumeName does not exist in the list of
|
||||||
|
// attached volumes, this is a no-op.
|
||||||
|
// If a volume with the name volumeName exists and its list of mountedPods
|
||||||
|
// is not empty, an error is returned.
|
||||||
|
DeleteVolume(volumeName api.UniqueVolumeName) error
|
||||||
|
|
||||||
|
// PodExistsInVolume returns true if the given pod exists in the list of
|
||||||
|
// mountedPods for the given volume in the cache, indicating that the volume
|
||||||
|
// is attached to this node and the pod has successfully mounted it.
|
||||||
|
// If a pod with the same unique name does not exist under the specified
|
||||||
|
// volume, false is returned.
|
||||||
|
// If a volume with the name volumeName does not exist in the list of
|
||||||
|
// attached volumes, a volumeNotAttachedError is returned indicating the
|
||||||
|
// given volume is not yet attached.
|
||||||
|
// If a the given volumeName/podName combo exists but the value of
|
||||||
|
// remountRequired is true, a remountRequiredError is returned indicating
|
||||||
|
// the given volume has been successfully mounted to this pod but should be
|
||||||
|
// remounted to reflect changes in the referencing pod. Atomically updating
|
||||||
|
// volumes, depend on this to update the contents of the volume.
|
||||||
|
// All volume mounting calls should be idempotent so a second mount call for
|
||||||
|
// volumes that do not need to update contents should not fail.
|
||||||
|
PodExistsInVolume(podName volumetypes.UniquePodName, volumeName api.UniqueVolumeName) (bool, error)
|
||||||
|
|
||||||
|
// GetMountedVolumes generates and returns a list of volumes and the pods
|
||||||
|
// they are successfully attached and mounted for based on the current
|
||||||
|
// actual state of the world.
|
||||||
|
GetMountedVolumes() []MountedVolume
|
||||||
|
|
||||||
|
// GetMountedVolumesForPod generates and returns a list of volumes that are
|
||||||
|
// successfully attached and mounted for the specified pod based on the
|
||||||
|
// current actual state of the world.
|
||||||
|
GetMountedVolumesForPod(podName volumetypes.UniquePodName) []MountedVolume
|
||||||
|
|
||||||
|
// GetAttachedVolumes generates and returns a list of all attached volumes.
|
||||||
|
GetAttachedVolumes() []AttachedVolume
|
||||||
|
|
||||||
|
// GetUnmountedVolumes generates and returns a list of attached volumes that
|
||||||
|
// have no mountedPods. This list can be used to determine which volumes are
|
||||||
|
// no longer referenced and may be detached.
|
||||||
|
GetUnmountedVolumes() []AttachedVolume
|
||||||
|
}
|
||||||
|
|
||||||
|
// MountedVolume represents a volume that has successfully been mounted to a pod.
|
||||||
|
type MountedVolume struct {
|
||||||
|
operationexecutor.MountedVolume
|
||||||
|
}
|
||||||
|
|
||||||
|
// AttachedVolume represents a volume that is attached to a node.
|
||||||
|
type AttachedVolume struct {
|
||||||
|
operationexecutor.AttachedVolume
|
||||||
|
|
||||||
|
// GloballyMounted indicates that the volume is mounted to the underlying
|
||||||
|
// device at a global mount point. This global mount point must unmounted
|
||||||
|
// prior to detach.
|
||||||
|
GloballyMounted bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewActualStateOfWorld returns a new instance of ActualStateOfWorld.
|
||||||
|
func NewActualStateOfWorld(
|
||||||
|
nodeName string,
|
||||||
|
volumePluginMgr *volume.VolumePluginMgr) ActualStateOfWorld {
|
||||||
|
return &actualStateOfWorld{
|
||||||
|
nodeName: nodeName,
|
||||||
|
attachedVolumes: make(map[api.UniqueVolumeName]attachedVolume),
|
||||||
|
volumePluginMgr: volumePluginMgr,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsVolumeNotAttachedError returns true if the specified error is a
|
||||||
|
// volumeNotAttachedError.
|
||||||
|
func IsVolumeNotAttachedError(err error) bool {
|
||||||
|
_, ok := err.(volumeNotAttachedError)
|
||||||
|
return ok
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsRemountRequiredError returns true if the specified error is a
|
||||||
|
// remountRequiredError.
|
||||||
|
func IsRemountRequiredError(err error) bool {
|
||||||
|
_, ok := err.(remountRequiredError)
|
||||||
|
return ok
|
||||||
|
}
|
||||||
|
|
||||||
|
type actualStateOfWorld struct {
|
||||||
|
// nodeName is the name of this node. This value is passed to Attach/Detach
|
||||||
|
nodeName string
|
||||||
|
// attachedVolumes is a map containing the set of volumes the kubelet volume
|
||||||
|
// manager believes to be successfully attached to this node. Volume types
|
||||||
|
// that do not implement an attacher interface are assumed to be in this
|
||||||
|
// state by default.
|
||||||
|
// The key in this map is the name of the volume and the value is an object
|
||||||
|
// containing more information about the attached volume.
|
||||||
|
attachedVolumes map[api.UniqueVolumeName]attachedVolume
|
||||||
|
// volumePluginMgr is the volume plugin manager used to create volume
|
||||||
|
// plugin objects.
|
||||||
|
volumePluginMgr *volume.VolumePluginMgr
|
||||||
|
sync.RWMutex
|
||||||
|
}
|
||||||
|
|
||||||
|
// attachedVolume represents a volume the kubelet volume manager believes to be
|
||||||
|
// successfully attached to a node it is managing. Volume types that do not
|
||||||
|
// implement an attacher are assumed to be in this state.
|
||||||
|
type attachedVolume struct {
|
||||||
|
// volumeName contains the unique identifier for this volume.
|
||||||
|
volumeName api.UniqueVolumeName
|
||||||
|
|
||||||
|
// mountedPods is a map containing the set of pods that this volume has been
|
||||||
|
// successfully mounted to. The key in this map is the name of the pod and
|
||||||
|
// the value is a mountedPod object containing more information about the
|
||||||
|
// pod.
|
||||||
|
mountedPods map[volumetypes.UniquePodName]mountedPod
|
||||||
|
|
||||||
|
// spec is the volume spec containing the specification for this volume.
|
||||||
|
// Used to generate the volume plugin object, and passed to plugin methods.
|
||||||
|
// In particular, the Unmount method uses spec.Name() as the volumeSpecName
|
||||||
|
// in the mount path:
|
||||||
|
// /var/lib/kubelet/pods/{podUID}/volumes/{escapeQualifiedPluginName}/{volumeSpecName}/
|
||||||
|
spec *volume.Spec
|
||||||
|
|
||||||
|
// pluginName is the Unescaped Qualified name of the volume plugin used to
|
||||||
|
// attach and mount this volume. It is stored separately in case the full
|
||||||
|
// volume spec (everything except the name) can not be reconstructed for a
|
||||||
|
// volume that should be unmounted (which would be the case for a mount path
|
||||||
|
// read from disk without a full volume spec).
|
||||||
|
pluginName string
|
||||||
|
|
||||||
|
// pluginIsAttachable indicates the volume plugin used to attach and mount
|
||||||
|
// this volume implements the volume.Attacher interface
|
||||||
|
pluginIsAttachable bool
|
||||||
|
|
||||||
|
// globallyMounted indicates that the volume is mounted to the underlying
|
||||||
|
// device at a global mount point. This global mount point must unmounted
|
||||||
|
// prior to detach.
|
||||||
|
globallyMounted bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// The mountedPod object represents a pod for which the kubelet volume manager
|
||||||
|
// believes the underlying volume has been successfully been mounted.
|
||||||
|
type mountedPod struct {
|
||||||
|
// the name of the pod
|
||||||
|
podName volumetypes.UniquePodName
|
||||||
|
|
||||||
|
// the UID of the pod
|
||||||
|
podUID types.UID
|
||||||
|
|
||||||
|
// mounter used to mount
|
||||||
|
mounter volume.Mounter
|
||||||
|
|
||||||
|
// outerVolumeSpecName is the volume.Spec.Name() of the volume as referenced
|
||||||
|
// directly in the pod. If the volume was referenced through a persistent
|
||||||
|
// volume claim, this contains the volume.Spec.Name() of the persistent
|
||||||
|
// volume claim
|
||||||
|
outerVolumeSpecName string
|
||||||
|
|
||||||
|
// remountRequired indicates the underlying volume has been successfully
|
||||||
|
// mounted to this pod but it should be remounted to reflect changes in the
|
||||||
|
// referencing pod.
|
||||||
|
// Atomically updating volumes depend on this to update the contents of the
|
||||||
|
// volume. All volume mounting calls should be idempotent so a second mount
|
||||||
|
// call for volumes that do not need to update contents should not fail.
|
||||||
|
remountRequired bool
|
||||||
|
|
||||||
|
// volumeGidValue contains the value of the GID annotation, if present.
|
||||||
|
volumeGidValue string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (asw *actualStateOfWorld) MarkVolumeAsAttached(
|
||||||
|
volumeSpec *volume.Spec, nodeName string) error {
|
||||||
|
_, err := asw.AddVolume(volumeSpec)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (asw *actualStateOfWorld) MarkVolumeAsDetached(
|
||||||
|
volumeName api.UniqueVolumeName, nodeName string) {
|
||||||
|
asw.DeleteVolume(volumeName)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (asw *actualStateOfWorld) MarkVolumeAsMounted(
|
||||||
|
podName volumetypes.UniquePodName,
|
||||||
|
podUID types.UID,
|
||||||
|
volumeName api.UniqueVolumeName,
|
||||||
|
mounter volume.Mounter,
|
||||||
|
outerVolumeSpecName string,
|
||||||
|
volumeGidValue string) error {
|
||||||
|
return asw.AddPodToVolume(
|
||||||
|
podName,
|
||||||
|
podUID,
|
||||||
|
volumeName,
|
||||||
|
mounter,
|
||||||
|
outerVolumeSpecName,
|
||||||
|
volumeGidValue)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (asw *actualStateOfWorld) MarkVolumeAsUnmounted(
|
||||||
|
podName volumetypes.UniquePodName, volumeName api.UniqueVolumeName) error {
|
||||||
|
return asw.DeletePodFromVolume(podName, volumeName)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (asw *actualStateOfWorld) MarkDeviceAsMounted(
|
||||||
|
volumeName api.UniqueVolumeName) error {
|
||||||
|
return asw.SetVolumeGloballyMounted(volumeName, true /* globallyMounted */)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (asw *actualStateOfWorld) MarkDeviceAsUnmounted(
|
||||||
|
volumeName api.UniqueVolumeName) error {
|
||||||
|
return asw.SetVolumeGloballyMounted(volumeName, false /* globallyMounted */)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (asw *actualStateOfWorld) AddVolume(
|
||||||
|
volumeSpec *volume.Spec) (api.UniqueVolumeName, error) {
|
||||||
|
asw.Lock()
|
||||||
|
defer asw.Unlock()
|
||||||
|
|
||||||
|
volumePlugin, err := asw.volumePluginMgr.FindPluginBySpec(volumeSpec)
|
||||||
|
if err != nil || volumePlugin == nil {
|
||||||
|
return "", fmt.Errorf(
|
||||||
|
"failed to get Plugin from volumeSpec for volume %q err=%v",
|
||||||
|
volumeSpec.Name(),
|
||||||
|
err)
|
||||||
|
}
|
||||||
|
|
||||||
|
volumeName, err :=
|
||||||
|
volumehelper.GetUniqueVolumeNameFromSpec(volumePlugin, volumeSpec)
|
||||||
|
if err != nil {
|
||||||
|
return "", fmt.Errorf(
|
||||||
|
"failed to GetUniqueVolumeNameFromSpec for volumeSpec %q using volume plugin %q err=%v",
|
||||||
|
volumeSpec.Name(),
|
||||||
|
volumePlugin.GetPluginName(),
|
||||||
|
err)
|
||||||
|
}
|
||||||
|
|
||||||
|
pluginIsAttachable := false
|
||||||
|
if _, ok := volumePlugin.(volume.AttachableVolumePlugin); ok {
|
||||||
|
pluginIsAttachable = true
|
||||||
|
}
|
||||||
|
|
||||||
|
volumeObj, volumeExists := asw.attachedVolumes[volumeName]
|
||||||
|
if !volumeExists {
|
||||||
|
volumeObj = attachedVolume{
|
||||||
|
volumeName: volumeName,
|
||||||
|
spec: volumeSpec,
|
||||||
|
mountedPods: make(map[volumetypes.UniquePodName]mountedPod),
|
||||||
|
pluginName: volumePlugin.GetPluginName(),
|
||||||
|
pluginIsAttachable: pluginIsAttachable,
|
||||||
|
globallyMounted: false,
|
||||||
|
}
|
||||||
|
asw.attachedVolumes[volumeName] = volumeObj
|
||||||
|
}
|
||||||
|
|
||||||
|
return volumeObj.volumeName, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (asw *actualStateOfWorld) AddPodToVolume(
|
||||||
|
podName volumetypes.UniquePodName,
|
||||||
|
podUID types.UID,
|
||||||
|
volumeName api.UniqueVolumeName,
|
||||||
|
mounter volume.Mounter,
|
||||||
|
outerVolumeSpecName string,
|
||||||
|
volumeGidValue string) error {
|
||||||
|
asw.Lock()
|
||||||
|
defer asw.Unlock()
|
||||||
|
|
||||||
|
volumeObj, volumeExists := asw.attachedVolumes[volumeName]
|
||||||
|
if !volumeExists {
|
||||||
|
return fmt.Errorf(
|
||||||
|
"no volume with the name %q exists in the list of attached volumes",
|
||||||
|
volumeName)
|
||||||
|
}
|
||||||
|
|
||||||
|
podObj, podExists := volumeObj.mountedPods[podName]
|
||||||
|
if !podExists {
|
||||||
|
podObj = mountedPod{
|
||||||
|
podName: podName,
|
||||||
|
podUID: podUID,
|
||||||
|
mounter: mounter,
|
||||||
|
outerVolumeSpecName: outerVolumeSpecName,
|
||||||
|
volumeGidValue: volumeGidValue,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If pod exists, reset remountRequired value
|
||||||
|
podObj.remountRequired = false
|
||||||
|
asw.attachedVolumes[volumeName].mountedPods[podName] = podObj
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (asw *actualStateOfWorld) MarkRemountRequired(
|
||||||
|
podName volumetypes.UniquePodName) {
|
||||||
|
asw.Lock()
|
||||||
|
defer asw.Unlock()
|
||||||
|
for volumeName, volumeObj := range asw.attachedVolumes {
|
||||||
|
for mountedPodName, podObj := range volumeObj.mountedPods {
|
||||||
|
if mountedPodName != podName {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
volumePlugin, err :=
|
||||||
|
asw.volumePluginMgr.FindPluginBySpec(volumeObj.spec)
|
||||||
|
if err != nil || volumePlugin == nil {
|
||||||
|
// Log and continue processing
|
||||||
|
glog.Errorf(
|
||||||
|
"MarkRemountRequired failed to FindPluginBySpec for pod %q (podUid %q) volume: %q (volSpecName: %q)",
|
||||||
|
podObj.podName,
|
||||||
|
podObj.podUID,
|
||||||
|
volumeObj.volumeName,
|
||||||
|
volumeObj.spec.Name())
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if volumePlugin.RequiresRemount() {
|
||||||
|
podObj.remountRequired = true
|
||||||
|
asw.attachedVolumes[volumeName].mountedPods[podName] = podObj
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (asw *actualStateOfWorld) SetVolumeGloballyMounted(
|
||||||
|
volumeName api.UniqueVolumeName, globallyMounted bool) error {
|
||||||
|
asw.Lock()
|
||||||
|
defer asw.Unlock()
|
||||||
|
|
||||||
|
volumeObj, volumeExists := asw.attachedVolumes[volumeName]
|
||||||
|
if !volumeExists {
|
||||||
|
return fmt.Errorf(
|
||||||
|
"no volume with the name %q exists in the list of attached volumes",
|
||||||
|
volumeName)
|
||||||
|
}
|
||||||
|
|
||||||
|
volumeObj.globallyMounted = globallyMounted
|
||||||
|
asw.attachedVolumes[volumeName] = volumeObj
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (asw *actualStateOfWorld) DeletePodFromVolume(
|
||||||
|
podName volumetypes.UniquePodName, volumeName api.UniqueVolumeName) error {
|
||||||
|
asw.Lock()
|
||||||
|
defer asw.Unlock()
|
||||||
|
|
||||||
|
volumeObj, volumeExists := asw.attachedVolumes[volumeName]
|
||||||
|
if !volumeExists {
|
||||||
|
return fmt.Errorf(
|
||||||
|
"no volume with the name %q exists in the list of attached volumes",
|
||||||
|
volumeName)
|
||||||
|
}
|
||||||
|
|
||||||
|
_, podExists := volumeObj.mountedPods[podName]
|
||||||
|
if podExists {
|
||||||
|
delete(asw.attachedVolumes[volumeName].mountedPods, podName)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (asw *actualStateOfWorld) DeleteVolume(volumeName api.UniqueVolumeName) error {
|
||||||
|
asw.Lock()
|
||||||
|
defer asw.Unlock()
|
||||||
|
|
||||||
|
volumeObj, volumeExists := asw.attachedVolumes[volumeName]
|
||||||
|
if !volumeExists {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(volumeObj.mountedPods) != 0 {
|
||||||
|
return fmt.Errorf(
|
||||||
|
"failed to DeleteVolume %q, it still has %v mountedPods",
|
||||||
|
volumeName,
|
||||||
|
len(volumeObj.mountedPods))
|
||||||
|
}
|
||||||
|
|
||||||
|
delete(asw.attachedVolumes, volumeName)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (asw *actualStateOfWorld) PodExistsInVolume(
|
||||||
|
podName volumetypes.UniquePodName, volumeName api.UniqueVolumeName) (bool, error) {
|
||||||
|
asw.RLock()
|
||||||
|
defer asw.RUnlock()
|
||||||
|
|
||||||
|
volumeObj, volumeExists := asw.attachedVolumes[volumeName]
|
||||||
|
if !volumeExists {
|
||||||
|
return false, newVolumeNotAttachedError(volumeName)
|
||||||
|
}
|
||||||
|
|
||||||
|
podObj, podExists := volumeObj.mountedPods[podName]
|
||||||
|
if podExists && podObj.remountRequired {
|
||||||
|
return true, newRemountRequiredError(volumeObj.volumeName, podObj.podName)
|
||||||
|
}
|
||||||
|
|
||||||
|
return podExists, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (asw *actualStateOfWorld) GetMountedVolumes() []MountedVolume {
|
||||||
|
asw.RLock()
|
||||||
|
defer asw.RUnlock()
|
||||||
|
mountedVolume := make([]MountedVolume, 0 /* len */, len(asw.attachedVolumes) /* cap */)
|
||||||
|
for _, volumeObj := range asw.attachedVolumes {
|
||||||
|
for _, podObj := range volumeObj.mountedPods {
|
||||||
|
mountedVolume = append(
|
||||||
|
mountedVolume,
|
||||||
|
getMountedVolume(&podObj, &volumeObj))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return mountedVolume
|
||||||
|
}
|
||||||
|
|
||||||
|
func (asw *actualStateOfWorld) GetMountedVolumesForPod(
|
||||||
|
podName volumetypes.UniquePodName) []MountedVolume {
|
||||||
|
asw.RLock()
|
||||||
|
defer asw.RUnlock()
|
||||||
|
mountedVolume := make([]MountedVolume, 0 /* len */, len(asw.attachedVolumes) /* cap */)
|
||||||
|
for _, volumeObj := range asw.attachedVolumes {
|
||||||
|
for mountedPodName, podObj := range volumeObj.mountedPods {
|
||||||
|
if mountedPodName == podName {
|
||||||
|
mountedVolume = append(
|
||||||
|
mountedVolume,
|
||||||
|
getMountedVolume(&podObj, &volumeObj))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return mountedVolume
|
||||||
|
}
|
||||||
|
|
||||||
|
func (asw *actualStateOfWorld) GetAttachedVolumes() []AttachedVolume {
|
||||||
|
asw.RLock()
|
||||||
|
defer asw.RUnlock()
|
||||||
|
unmountedVolumes := make([]AttachedVolume, 0 /* len */, len(asw.attachedVolumes) /* cap */)
|
||||||
|
for _, volumeObj := range asw.attachedVolumes {
|
||||||
|
unmountedVolumes = append(
|
||||||
|
unmountedVolumes,
|
||||||
|
asw.getAttachedVolume(&volumeObj))
|
||||||
|
}
|
||||||
|
|
||||||
|
return unmountedVolumes
|
||||||
|
}
|
||||||
|
|
||||||
|
func (asw *actualStateOfWorld) GetUnmountedVolumes() []AttachedVolume {
|
||||||
|
asw.RLock()
|
||||||
|
defer asw.RUnlock()
|
||||||
|
unmountedVolumes := make([]AttachedVolume, 0 /* len */, len(asw.attachedVolumes) /* cap */)
|
||||||
|
for _, volumeObj := range asw.attachedVolumes {
|
||||||
|
if len(volumeObj.mountedPods) == 0 {
|
||||||
|
unmountedVolumes = append(
|
||||||
|
unmountedVolumes,
|
||||||
|
asw.getAttachedVolume(&volumeObj))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return unmountedVolumes
|
||||||
|
}
|
||||||
|
|
||||||
|
func (asw *actualStateOfWorld) getAttachedVolume(
|
||||||
|
attachedVolume *attachedVolume) AttachedVolume {
|
||||||
|
return AttachedVolume{
|
||||||
|
AttachedVolume: operationexecutor.AttachedVolume{
|
||||||
|
VolumeName: attachedVolume.volumeName,
|
||||||
|
VolumeSpec: attachedVolume.spec,
|
||||||
|
NodeName: asw.nodeName,
|
||||||
|
PluginIsAttachable: attachedVolume.pluginIsAttachable},
|
||||||
|
GloballyMounted: attachedVolume.globallyMounted}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Compile-time check to ensure volumeNotAttachedError implements the error interface
|
||||||
|
var _ error = volumeNotAttachedError{}
|
||||||
|
|
||||||
|
// volumeNotAttachedError is an error returned when PodExistsInVolume() fails to
|
||||||
|
// find specified volume in the list of attached volumes.
|
||||||
|
type volumeNotAttachedError struct {
|
||||||
|
volumeName api.UniqueVolumeName
|
||||||
|
}
|
||||||
|
|
||||||
|
func (err volumeNotAttachedError) Error() string {
|
||||||
|
return fmt.Sprintf(
|
||||||
|
"volumeName %q does not exist in the list of attached volumes",
|
||||||
|
err.volumeName)
|
||||||
|
}
|
||||||
|
|
||||||
|
func newVolumeNotAttachedError(volumeName api.UniqueVolumeName) error {
|
||||||
|
return volumeNotAttachedError{
|
||||||
|
volumeName: volumeName,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Compile-time check to ensure remountRequiredError implements the error interface
|
||||||
|
var _ error = remountRequiredError{}
|
||||||
|
|
||||||
|
// remountRequiredError is an error returned when PodExistsInVolume() found
|
||||||
|
// volume/pod attached/mounted but remountRequired was true, indicating the
|
||||||
|
// given volume should be remounted to the pod to reflect changes in the
|
||||||
|
// referencing pod.
|
||||||
|
type remountRequiredError struct {
|
||||||
|
volumeName api.UniqueVolumeName
|
||||||
|
podName volumetypes.UniquePodName
|
||||||
|
}
|
||||||
|
|
||||||
|
func (err remountRequiredError) Error() string {
|
||||||
|
return fmt.Sprintf(
|
||||||
|
"volumeName %q is mounted to %q but should be remounted",
|
||||||
|
err.volumeName, err.podName)
|
||||||
|
}
|
||||||
|
|
||||||
|
func newRemountRequiredError(
|
||||||
|
volumeName api.UniqueVolumeName, podName volumetypes.UniquePodName) error {
|
||||||
|
return remountRequiredError{
|
||||||
|
volumeName: volumeName,
|
||||||
|
podName: podName,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// getMountedVolume constructs and returns a MountedVolume object from the given
|
||||||
|
// mountedPod and attachedVolume objects.
|
||||||
|
func getMountedVolume(
|
||||||
|
mountedPod *mountedPod, attachedVolume *attachedVolume) MountedVolume {
|
||||||
|
return MountedVolume{
|
||||||
|
MountedVolume: operationexecutor.MountedVolume{
|
||||||
|
PodName: mountedPod.podName,
|
||||||
|
VolumeName: attachedVolume.volumeName,
|
||||||
|
InnerVolumeSpecName: attachedVolume.spec.Name(),
|
||||||
|
OuterVolumeSpecName: mountedPod.outerVolumeSpecName,
|
||||||
|
PluginName: attachedVolume.pluginName,
|
||||||
|
PodUID: mountedPod.podUID,
|
||||||
|
Mounter: mountedPod.mounter,
|
||||||
|
VolumeGidValue: mountedPod.volumeGidValue}}
|
||||||
|
}
|
||||||
357
pkg/kubelet/volume/cache/actual_state_of_world_test.go
vendored
Normal file
357
pkg/kubelet/volume/cache/actual_state_of_world_test.go
vendored
Normal file
@@ -0,0 +1,357 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2016 The Kubernetes Authors All rights reserved.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package cache
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"k8s.io/kubernetes/pkg/api"
|
||||||
|
"k8s.io/kubernetes/pkg/volume"
|
||||||
|
volumetesting "k8s.io/kubernetes/pkg/volume/testing"
|
||||||
|
volumetypes "k8s.io/kubernetes/pkg/volume/util/types"
|
||||||
|
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Calls AddVolume() once to add volume
|
||||||
|
// Verifies newly added volume exists in GetAttachedVolumes()
|
||||||
|
func Test_AddVolume_Positive_NewVolume(t *testing.T) {
|
||||||
|
// Arrange
|
||||||
|
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
||||||
|
asw := NewActualStateOfWorld("mynode" /* nodeName */, volumePluginMgr)
|
||||||
|
pod := &api.Pod{
|
||||||
|
ObjectMeta: api.ObjectMeta{
|
||||||
|
Name: "pod1",
|
||||||
|
UID: "pod1uid",
|
||||||
|
},
|
||||||
|
Spec: api.PodSpec{
|
||||||
|
Volumes: []api.Volume{
|
||||||
|
{
|
||||||
|
Name: "volume-name",
|
||||||
|
VolumeSource: api.VolumeSource{
|
||||||
|
GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{
|
||||||
|
PDName: "fake-device1",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
volumeSpec := &volume.Spec{Volume: &pod.Spec.Volumes[0]}
|
||||||
|
|
||||||
|
// Act
|
||||||
|
generatedVolumeName, err := asw.AddVolume(volumeSpec)
|
||||||
|
|
||||||
|
// Assert
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("AddVolume failed. Expected: <no error> Actual: <%v>", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
verifyVolumeExistsInAttachedVolumes(t, generatedVolumeName, asw)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Calls AddVolume() twice to add the same volume
|
||||||
|
// Verifies newly added volume exists in GetAttachedVolumes() and second call
|
||||||
|
// doesn't fail
|
||||||
|
func Test_AddVolume_Positive_ExistingVolume(t *testing.T) {
|
||||||
|
// Arrange
|
||||||
|
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
||||||
|
asw := NewActualStateOfWorld("mynode" /* nodeName */, volumePluginMgr)
|
||||||
|
pod := &api.Pod{
|
||||||
|
ObjectMeta: api.ObjectMeta{
|
||||||
|
Name: "pod1",
|
||||||
|
UID: "pod1uid",
|
||||||
|
},
|
||||||
|
Spec: api.PodSpec{
|
||||||
|
Volumes: []api.Volume{
|
||||||
|
{
|
||||||
|
Name: "volume-name",
|
||||||
|
VolumeSource: api.VolumeSource{
|
||||||
|
GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{
|
||||||
|
PDName: "fake-device1",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
volumeSpec := &volume.Spec{Volume: &pod.Spec.Volumes[0]}
|
||||||
|
generatedVolumeName, err := asw.AddVolume(volumeSpec)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("AddVolume failed. Expected: <no error> Actual: <%v>", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Act
|
||||||
|
generatedVolumeName, err = asw.AddVolume(volumeSpec)
|
||||||
|
|
||||||
|
// Assert
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("AddVolume failed. Expected: <no error> Actual: <%v>", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
verifyVolumeExistsInAttachedVolumes(t, generatedVolumeName, asw)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Populates data struct with a volume
|
||||||
|
// Calls AddPodToVolume() to add a pod to the volume
|
||||||
|
// Verifies volume/pod combo exist using PodExistsInVolume()
|
||||||
|
func Test_AddPodToVolume_Positive_ExistingVolumeNewNode(t *testing.T) {
|
||||||
|
// Arrange
|
||||||
|
volumePluginMgr, plugin := volumetesting.GetTestVolumePluginMgr(t)
|
||||||
|
asw := NewActualStateOfWorld("mynode" /* nodeName */, volumePluginMgr)
|
||||||
|
|
||||||
|
pod := &api.Pod{
|
||||||
|
ObjectMeta: api.ObjectMeta{
|
||||||
|
Name: "pod1",
|
||||||
|
UID: "pod1uid",
|
||||||
|
},
|
||||||
|
Spec: api.PodSpec{
|
||||||
|
Volumes: []api.Volume{
|
||||||
|
{
|
||||||
|
Name: "volume-name",
|
||||||
|
VolumeSource: api.VolumeSource{
|
||||||
|
GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{
|
||||||
|
PDName: "fake-device1",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
volumeSpec := &volume.Spec{Volume: &pod.Spec.Volumes[0]}
|
||||||
|
volumeName, err := volumehelper.GetUniqueVolumeNameFromSpec(
|
||||||
|
plugin, volumeSpec)
|
||||||
|
|
||||||
|
generatedVolumeName, err := asw.AddVolume(volumeSpec)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("AddVolume failed. Expected: <no error> Actual: <%v>", err)
|
||||||
|
}
|
||||||
|
podName := volumehelper.GetUniquePodName(pod)
|
||||||
|
|
||||||
|
mounter, err := plugin.NewMounter(volumeSpec, pod, volume.VolumeOptions{})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("NewUnmounter failed. Expected: <no error> Actual: <%v>", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Act
|
||||||
|
err = asw.AddPodToVolume(
|
||||||
|
podName, pod.UID, volumeName, mounter, volumeSpec.Name(), "" /* volumeGidValue */)
|
||||||
|
|
||||||
|
// Assert
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("AddPodToVolume failed. Expected: <no error> Actual: <%v>", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
verifyVolumeExistsInAttachedVolumes(t, generatedVolumeName, asw)
|
||||||
|
verifyPodExistsInVolumeAsw(t, podName, generatedVolumeName, asw)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Populates data struct with a volume
|
||||||
|
// Calls AddPodToVolume() twice to add the same pod to the volume
|
||||||
|
// Verifies volume/pod combo exist using PodExistsInVolume() and the second call
|
||||||
|
// did not fail.
|
||||||
|
func Test_AddPodToVolume_Positive_ExistingVolumeExistingNode(t *testing.T) {
|
||||||
|
// Arrange
|
||||||
|
volumePluginMgr, plugin := volumetesting.GetTestVolumePluginMgr(t)
|
||||||
|
asw := NewActualStateOfWorld("mynode" /* nodeName */, volumePluginMgr)
|
||||||
|
|
||||||
|
pod := &api.Pod{
|
||||||
|
ObjectMeta: api.ObjectMeta{
|
||||||
|
Name: "pod1",
|
||||||
|
UID: "pod1uid",
|
||||||
|
},
|
||||||
|
Spec: api.PodSpec{
|
||||||
|
Volumes: []api.Volume{
|
||||||
|
{
|
||||||
|
Name: "volume-name",
|
||||||
|
VolumeSource: api.VolumeSource{
|
||||||
|
GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{
|
||||||
|
PDName: "fake-device1",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
volumeSpec := &volume.Spec{Volume: &pod.Spec.Volumes[0]}
|
||||||
|
volumeName, err := volumehelper.GetUniqueVolumeNameFromSpec(
|
||||||
|
plugin, volumeSpec)
|
||||||
|
|
||||||
|
generatedVolumeName, err := asw.AddVolume(volumeSpec)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("AddVolume failed. Expected: <no error> Actual: <%v>", err)
|
||||||
|
}
|
||||||
|
podName := volumehelper.GetUniquePodName(pod)
|
||||||
|
|
||||||
|
mounter, err := plugin.NewMounter(volumeSpec, pod, volume.VolumeOptions{})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("NewUnmounter failed. Expected: <no error> Actual: <%v>", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = asw.AddPodToVolume(
|
||||||
|
podName, pod.UID, volumeName, mounter, volumeSpec.Name(), "" /* volumeGidValue */)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("AddPodToVolume failed. Expected: <no error> Actual: <%v>", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Act
|
||||||
|
err = asw.AddPodToVolume(
|
||||||
|
podName, pod.UID, volumeName, mounter, volumeSpec.Name(), "" /* volumeGidValue */)
|
||||||
|
|
||||||
|
// Assert
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("AddPodToVolume failed. Expected: <no error> Actual: <%v>", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
verifyVolumeExistsInAttachedVolumes(t, generatedVolumeName, asw)
|
||||||
|
verifyPodExistsInVolumeAsw(t, podName, generatedVolumeName, asw)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Calls AddPodToVolume() to add pod to empty data stuct
|
||||||
|
// Verifies call fails with "volume does not exist" error.
|
||||||
|
func Test_AddPodToVolume_Negative_VolumeDoesntExist(t *testing.T) {
|
||||||
|
// Arrange
|
||||||
|
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
||||||
|
asw := NewActualStateOfWorld("mynode" /* nodeName */, volumePluginMgr)
|
||||||
|
|
||||||
|
pod := &api.Pod{
|
||||||
|
ObjectMeta: api.ObjectMeta{
|
||||||
|
Name: "pod1",
|
||||||
|
UID: "pod1uid",
|
||||||
|
},
|
||||||
|
Spec: api.PodSpec{
|
||||||
|
Volumes: []api.Volume{
|
||||||
|
{
|
||||||
|
Name: "volume-name",
|
||||||
|
VolumeSource: api.VolumeSource{
|
||||||
|
GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{
|
||||||
|
PDName: "fake-device1",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
volumeSpec := &volume.Spec{Volume: &pod.Spec.Volumes[0]}
|
||||||
|
plugin, err := volumePluginMgr.FindPluginBySpec(volumeSpec)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf(
|
||||||
|
"volumePluginMgr.FindPluginBySpec failed to find volume plugin for %#v with: %v",
|
||||||
|
volumeSpec,
|
||||||
|
err)
|
||||||
|
}
|
||||||
|
volumeName, err := volumehelper.GetUniqueVolumeNameFromSpec(
|
||||||
|
plugin, volumeSpec)
|
||||||
|
|
||||||
|
podName := volumehelper.GetUniquePodName(pod)
|
||||||
|
|
||||||
|
mounter, err := plugin.NewMounter(volumeSpec, pod, volume.VolumeOptions{})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("NewUnmounter failed. Expected: <no error> Actual: <%v>", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Act
|
||||||
|
err = asw.AddPodToVolume(
|
||||||
|
podName, pod.UID, volumeName, mounter, volumeSpec.Name(), "" /* volumeGidValue */)
|
||||||
|
|
||||||
|
// Assert
|
||||||
|
if err == nil {
|
||||||
|
t.Fatalf("AddPodToVolume did not fail. Expected: <\"no volume with the name ... exists in the list of attached volumes\"> Actual: <no error>")
|
||||||
|
}
|
||||||
|
|
||||||
|
verifyVolumeDoesntExistInAttachedVolumes(t, volumeName, asw)
|
||||||
|
verifyPodDoesntExistInVolumeAsw(
|
||||||
|
t,
|
||||||
|
podName,
|
||||||
|
volumeName,
|
||||||
|
false, /* expectVolumeToExist */
|
||||||
|
asw)
|
||||||
|
}
|
||||||
|
|
||||||
|
func verifyVolumeExistsInAttachedVolumes(
|
||||||
|
t *testing.T, expectedVolumeName api.UniqueVolumeName, asw ActualStateOfWorld) {
|
||||||
|
attachedVolumes := asw.GetAttachedVolumes()
|
||||||
|
for _, volume := range attachedVolumes {
|
||||||
|
if volume.VolumeName == expectedVolumeName {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Fatalf(
|
||||||
|
"Could not find volume %v in the list of attached volumes for actual state of world %+v",
|
||||||
|
expectedVolumeName,
|
||||||
|
attachedVolumes)
|
||||||
|
}
|
||||||
|
|
||||||
|
func verifyVolumeDoesntExistInAttachedVolumes(
|
||||||
|
t *testing.T, volumeToCheck api.UniqueVolumeName, asw ActualStateOfWorld) {
|
||||||
|
attachedVolumes := asw.GetAttachedVolumes()
|
||||||
|
for _, volume := range attachedVolumes {
|
||||||
|
if volume.VolumeName == volumeToCheck {
|
||||||
|
t.Fatalf(
|
||||||
|
"Found volume %v in the list of attached volumes. Expected it not to exist.",
|
||||||
|
volumeToCheck)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func verifyPodExistsInVolumeAsw(
|
||||||
|
t *testing.T,
|
||||||
|
expectedPodName volumetypes.UniquePodName,
|
||||||
|
expectedVolumeName api.UniqueVolumeName,
|
||||||
|
asw ActualStateOfWorld) {
|
||||||
|
podExistsInVolume, err :=
|
||||||
|
asw.PodExistsInVolume(expectedPodName, expectedVolumeName)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf(
|
||||||
|
"ASW PodExistsInVolume failed. Expected: <no error> Actual: <%v>", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !podExistsInVolume {
|
||||||
|
t.Fatalf(
|
||||||
|
"ASW PodExistsInVolume result invalid. Expected: <true> Actual: <%v>",
|
||||||
|
podExistsInVolume)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func verifyPodDoesntExistInVolumeAsw(
|
||||||
|
t *testing.T,
|
||||||
|
podToCheck volumetypes.UniquePodName,
|
||||||
|
volumeToCheck api.UniqueVolumeName,
|
||||||
|
expectVolumeToExist bool,
|
||||||
|
asw ActualStateOfWorld) {
|
||||||
|
podExistsInVolume, err :=
|
||||||
|
asw.PodExistsInVolume(podToCheck, volumeToCheck)
|
||||||
|
if !expectVolumeToExist && err == nil {
|
||||||
|
t.Fatalf(
|
||||||
|
"ASW PodExistsInVolume did not return error. Expected: <error indicating volume does not exist> Actual: <%v>", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if expectVolumeToExist && err != nil {
|
||||||
|
t.Fatalf(
|
||||||
|
"ASW PodExistsInVolume failed. Expected: <no error> Actual: <%v>", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if podExistsInVolume {
|
||||||
|
t.Fatalf(
|
||||||
|
"ASW PodExistsInVolume result invalid. Expected: <false> Actual: <%v>",
|
||||||
|
podExistsInVolume)
|
||||||
|
}
|
||||||
|
}
|
||||||
286
pkg/kubelet/volume/cache/desired_state_of_world.go
vendored
Normal file
286
pkg/kubelet/volume/cache/desired_state_of_world.go
vendored
Normal file
@@ -0,0 +1,286 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2016 The Kubernetes Authors All rights reserved.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
/*
|
||||||
|
Package cache implements data structures used by the kubelet volume manager to
|
||||||
|
keep track of attached volumes and the pods that mounted them.
|
||||||
|
*/
|
||||||
|
package cache
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"k8s.io/kubernetes/pkg/api"
|
||||||
|
"k8s.io/kubernetes/pkg/volume"
|
||||||
|
"k8s.io/kubernetes/pkg/volume/util/operationexecutor"
|
||||||
|
"k8s.io/kubernetes/pkg/volume/util/types"
|
||||||
|
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
|
||||||
|
)
|
||||||
|
|
||||||
|
// DesiredStateOfWorld defines a set of thread-safe operations for the kubelet
|
||||||
|
// volume manager's desired state of the world cache.
|
||||||
|
// This cache contains volumes->pods i.e. a set of all volumes that should be
|
||||||
|
// attached to this node and the pods that reference them and should mount the
|
||||||
|
// volume.
|
||||||
|
// Note: This is distinct from the DesiredStateOfWorld implemented by the
|
||||||
|
// attach/detach controller. They both keep track of different objects. This
|
||||||
|
// contains kubelet volume manager specific state.
|
||||||
|
type DesiredStateOfWorld interface {
|
||||||
|
// AddPodToVolume adds the given pod to the given volume in the cache
|
||||||
|
// indicating the specified pod should mount the specified volume.
|
||||||
|
// A unique volumeName is generated from the volumeSpec and returned on
|
||||||
|
// success.
|
||||||
|
// If no volume plugin can support the given volumeSpec or more than one
|
||||||
|
// plugin can support it, an error is returned.
|
||||||
|
// If a volume with the name volumeName does not exist in the list of
|
||||||
|
// volumes that should be attached to this node, the volume is implicitly
|
||||||
|
// added.
|
||||||
|
// If a pod with the same unique name already exists under the specified
|
||||||
|
// volume, this is a no-op.
|
||||||
|
AddPodToVolume(podName types.UniquePodName, pod *api.Pod, volumeSpec *volume.Spec, outerVolumeSpecName string, volumeGidValue string) (api.UniqueVolumeName, error)
|
||||||
|
|
||||||
|
// DeletePodFromVolume removes the given pod from the given volume in the
|
||||||
|
// cache indicating the specified pod no longer requires the specified
|
||||||
|
// volume.
|
||||||
|
// If a pod with the same unique name does not exist under the specified
|
||||||
|
// volume, this is a no-op.
|
||||||
|
// If a volume with the name volumeName does not exist in the list of
|
||||||
|
// attached volumes, this is a no-op.
|
||||||
|
// If after deleting the pod, the specified volume contains no other child
|
||||||
|
// pods, the volume is also deleted.
|
||||||
|
DeletePodFromVolume(podName types.UniquePodName, volumeName api.UniqueVolumeName)
|
||||||
|
|
||||||
|
// VolumeExists returns true if the given volume exists in the list of
|
||||||
|
// volumes that should be attached to this node.
|
||||||
|
// If a pod with the same unique name does not exist under the specified
|
||||||
|
// volume, false is returned.
|
||||||
|
VolumeExists(volumeName api.UniqueVolumeName) bool
|
||||||
|
|
||||||
|
// PodExistsInVolume returns true if the given pod exists in the list of
|
||||||
|
// podsToMount for the given volume in the cache.
|
||||||
|
// If a pod with the same unique name does not exist under the specified
|
||||||
|
// volume, false is returned.
|
||||||
|
// If a volume with the name volumeName does not exist in the list of
|
||||||
|
// attached volumes, false is returned.
|
||||||
|
PodExistsInVolume(podName types.UniquePodName, volumeName api.UniqueVolumeName) bool
|
||||||
|
|
||||||
|
// GetVolumesToMount generates and returns a list of volumes that should be
|
||||||
|
// attached to this node and the pods they should be mounted to based on the
|
||||||
|
// current desired state of the world.
|
||||||
|
GetVolumesToMount() []VolumeToMount
|
||||||
|
}
|
||||||
|
|
||||||
|
// VolumeToMount represents a volume that should be attached to this node and
|
||||||
|
// mounted to the PodName.
|
||||||
|
type VolumeToMount struct {
|
||||||
|
operationexecutor.VolumeToMount
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewDesiredStateOfWorld returns a new instance of DesiredStateOfWorld.
|
||||||
|
func NewDesiredStateOfWorld(volumePluginMgr *volume.VolumePluginMgr) DesiredStateOfWorld {
|
||||||
|
return &desiredStateOfWorld{
|
||||||
|
volumesToMount: make(map[api.UniqueVolumeName]volumeToMount),
|
||||||
|
volumePluginMgr: volumePluginMgr,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type desiredStateOfWorld struct {
|
||||||
|
// volumesToMount is a map containing the set of volumes that should be
|
||||||
|
// attached to this node and mounted to the pods referencing it. The key in
|
||||||
|
// the map is the name of the volume and the value is a volume object
|
||||||
|
// containing more information about the volume.
|
||||||
|
volumesToMount map[api.UniqueVolumeName]volumeToMount
|
||||||
|
// volumePluginMgr is the volume plugin manager used to create volume
|
||||||
|
// plugin objects.
|
||||||
|
volumePluginMgr *volume.VolumePluginMgr
|
||||||
|
sync.RWMutex
|
||||||
|
}
|
||||||
|
|
||||||
|
// The volume object represents a volume that should be attached to this node,
|
||||||
|
// and mounted to podsToMount.
|
||||||
|
type volumeToMount struct {
|
||||||
|
// volumeName contains the unique identifier for this volume.
|
||||||
|
volumeName api.UniqueVolumeName
|
||||||
|
|
||||||
|
// podsToMount is a map containing the set of pods that reference this
|
||||||
|
// volume and should mount it once it is attached. The key in the map is
|
||||||
|
// the name of the pod and the value is a pod object containing more
|
||||||
|
// information about the pod.
|
||||||
|
podsToMount map[types.UniquePodName]podToMount
|
||||||
|
|
||||||
|
// pluginIsAttachable indicates that the plugin for this volume implements
|
||||||
|
// the volume.Attacher interface
|
||||||
|
pluginIsAttachable bool
|
||||||
|
|
||||||
|
// volumeGidValue contains the value of the GID annotation, if present.
|
||||||
|
volumeGidValue string
|
||||||
|
}
|
||||||
|
|
||||||
|
// The pod object represents a pod that references the underlying volume and
|
||||||
|
// should mount it once it is attached.
|
||||||
|
type podToMount struct {
|
||||||
|
// podName contains the name of this pod.
|
||||||
|
podName types.UniquePodName
|
||||||
|
|
||||||
|
// Pod to mount the volume to. Used to create NewMounter.
|
||||||
|
pod *api.Pod
|
||||||
|
|
||||||
|
// volume spec containing the specification for this volume. Used to
|
||||||
|
// generate the volume plugin object, and passed to plugin methods.
|
||||||
|
// For non-PVC volumes this is the same as defined in the pod object. For
|
||||||
|
// PVC volumes it is from the dereferenced PV object.
|
||||||
|
spec *volume.Spec
|
||||||
|
|
||||||
|
// outerVolumeSpecName is the volume.Spec.Name() of the volume as referenced
|
||||||
|
// directly in the pod. If the volume was referenced through a persistent
|
||||||
|
// volume claim, this contains the volume.Spec.Name() of the persistent
|
||||||
|
// volume claim
|
||||||
|
outerVolumeSpecName string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (dsw *desiredStateOfWorld) AddPodToVolume(
|
||||||
|
podName types.UniquePodName,
|
||||||
|
pod *api.Pod,
|
||||||
|
volumeSpec *volume.Spec,
|
||||||
|
outerVolumeSpecName string,
|
||||||
|
volumeGidValue string) (api.UniqueVolumeName, error) {
|
||||||
|
dsw.Lock()
|
||||||
|
defer dsw.Unlock()
|
||||||
|
|
||||||
|
volumePlugin, err := dsw.volumePluginMgr.FindPluginBySpec(volumeSpec)
|
||||||
|
if err != nil || volumePlugin == nil {
|
||||||
|
return "", fmt.Errorf(
|
||||||
|
"failed to get Plugin from volumeSpec for volume %q err=%v",
|
||||||
|
volumeSpec.Name(),
|
||||||
|
err)
|
||||||
|
}
|
||||||
|
|
||||||
|
volumeName, err :=
|
||||||
|
volumehelper.GetUniqueVolumeNameFromSpec(volumePlugin, volumeSpec)
|
||||||
|
if err != nil {
|
||||||
|
return "", fmt.Errorf(
|
||||||
|
"failed to GetUniqueVolumeNameFromSpec for volumeSpec %q using volume plugin %q err=%v",
|
||||||
|
volumeSpec.Name(),
|
||||||
|
volumePlugin.GetPluginName(),
|
||||||
|
err)
|
||||||
|
}
|
||||||
|
|
||||||
|
volumeObj, volumeExists := dsw.volumesToMount[volumeName]
|
||||||
|
if !volumeExists {
|
||||||
|
volumeObj = volumeToMount{
|
||||||
|
volumeName: volumeName,
|
||||||
|
podsToMount: make(map[types.UniquePodName]podToMount),
|
||||||
|
pluginIsAttachable: dsw.isAttachableVolume(volumeSpec),
|
||||||
|
volumeGidValue: volumeGidValue,
|
||||||
|
}
|
||||||
|
dsw.volumesToMount[volumeName] = volumeObj
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create new podToMount object. If it already exists, it is refreshed with
|
||||||
|
// updated values (this is required for volumes that require remounting on
|
||||||
|
// pod update, like Downward API volumes).
|
||||||
|
dsw.volumesToMount[volumeName].podsToMount[podName] = podToMount{
|
||||||
|
podName: podName,
|
||||||
|
pod: pod,
|
||||||
|
spec: volumeSpec,
|
||||||
|
outerVolumeSpecName: outerVolumeSpecName,
|
||||||
|
}
|
||||||
|
|
||||||
|
return volumeName, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (dsw *desiredStateOfWorld) DeletePodFromVolume(
|
||||||
|
podName types.UniquePodName, volumeName api.UniqueVolumeName) {
|
||||||
|
dsw.Lock()
|
||||||
|
defer dsw.Unlock()
|
||||||
|
|
||||||
|
volumeObj, volumeExists := dsw.volumesToMount[volumeName]
|
||||||
|
if !volumeExists {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, podExists := volumeObj.podsToMount[podName]; !podExists {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete pod if it exists
|
||||||
|
delete(dsw.volumesToMount[volumeName].podsToMount, podName)
|
||||||
|
|
||||||
|
if len(dsw.volumesToMount[volumeName].podsToMount) == 0 {
|
||||||
|
// Delete volume if no child pods left
|
||||||
|
delete(dsw.volumesToMount, volumeName)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (dsw *desiredStateOfWorld) VolumeExists(
|
||||||
|
volumeName api.UniqueVolumeName) bool {
|
||||||
|
dsw.RLock()
|
||||||
|
defer dsw.RUnlock()
|
||||||
|
|
||||||
|
_, volumeExists := dsw.volumesToMount[volumeName]
|
||||||
|
return volumeExists
|
||||||
|
}
|
||||||
|
|
||||||
|
func (dsw *desiredStateOfWorld) PodExistsInVolume(
|
||||||
|
podName types.UniquePodName, volumeName api.UniqueVolumeName) bool {
|
||||||
|
dsw.RLock()
|
||||||
|
defer dsw.RUnlock()
|
||||||
|
|
||||||
|
volumeObj, volumeExists := dsw.volumesToMount[volumeName]
|
||||||
|
if !volumeExists {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
_, podExists := volumeObj.podsToMount[podName]
|
||||||
|
return podExists
|
||||||
|
}
|
||||||
|
|
||||||
|
func (dsw *desiredStateOfWorld) GetVolumesToMount() []VolumeToMount {
|
||||||
|
dsw.RLock()
|
||||||
|
defer dsw.RUnlock()
|
||||||
|
|
||||||
|
volumesToMount := make([]VolumeToMount, 0 /* len */, len(dsw.volumesToMount) /* cap */)
|
||||||
|
for volumeName, volumeObj := range dsw.volumesToMount {
|
||||||
|
for podName, podObj := range volumeObj.podsToMount {
|
||||||
|
volumesToMount = append(
|
||||||
|
volumesToMount,
|
||||||
|
VolumeToMount{
|
||||||
|
VolumeToMount: operationexecutor.VolumeToMount{
|
||||||
|
VolumeName: volumeName,
|
||||||
|
PodName: podName,
|
||||||
|
Pod: podObj.pod,
|
||||||
|
VolumeSpec: podObj.spec,
|
||||||
|
PluginIsAttachable: volumeObj.pluginIsAttachable,
|
||||||
|
OuterVolumeSpecName: podObj.outerVolumeSpecName,
|
||||||
|
VolumeGidValue: volumeObj.volumeGidValue}})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return volumesToMount
|
||||||
|
}
|
||||||
|
|
||||||
|
func (dsw *desiredStateOfWorld) isAttachableVolume(volumeSpec *volume.Spec) bool {
|
||||||
|
attachableVolumePlugin, _ :=
|
||||||
|
dsw.volumePluginMgr.FindAttachablePluginBySpec(volumeSpec)
|
||||||
|
if attachableVolumePlugin != nil {
|
||||||
|
volumeAttacher, err := attachableVolumePlugin.NewAttacher()
|
||||||
|
if err == nil && volumeAttacher != nil {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
|
}
|
||||||
234
pkg/kubelet/volume/cache/desired_state_of_world_test.go
vendored
Normal file
234
pkg/kubelet/volume/cache/desired_state_of_world_test.go
vendored
Normal file
@@ -0,0 +1,234 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2016 The Kubernetes Authors All rights reserved.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package cache
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"k8s.io/kubernetes/pkg/api"
|
||||||
|
"k8s.io/kubernetes/pkg/volume"
|
||||||
|
volumetesting "k8s.io/kubernetes/pkg/volume/testing"
|
||||||
|
volumetypes "k8s.io/kubernetes/pkg/volume/util/types"
|
||||||
|
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Calls AddPodToVolume() to add new pod to new volume
|
||||||
|
// Verifies newly added pod/volume exists via
|
||||||
|
// PodExistsInVolume() VolumeExists() and GetVolumesToMount()
|
||||||
|
func Test_AddPodToVolume_Positive_NewPodNewVolume(t *testing.T) {
|
||||||
|
// Arrange
|
||||||
|
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
||||||
|
dsw := NewDesiredStateOfWorld(volumePluginMgr)
|
||||||
|
pod := &api.Pod{
|
||||||
|
ObjectMeta: api.ObjectMeta{
|
||||||
|
Name: "pod3",
|
||||||
|
UID: "pod3uid",
|
||||||
|
},
|
||||||
|
Spec: api.PodSpec{
|
||||||
|
Volumes: []api.Volume{
|
||||||
|
{
|
||||||
|
Name: "volume-name",
|
||||||
|
VolumeSource: api.VolumeSource{
|
||||||
|
GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{
|
||||||
|
PDName: "fake-device1",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
volumeSpec := &volume.Spec{Volume: &pod.Spec.Volumes[0]}
|
||||||
|
podName := volumehelper.GetUniquePodName(pod)
|
||||||
|
|
||||||
|
// Act
|
||||||
|
generatedVolumeName, err := dsw.AddPodToVolume(
|
||||||
|
podName, pod, volumeSpec, volumeSpec.Name(), "" /* volumeGidValue */)
|
||||||
|
|
||||||
|
// Assert
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("AddPodToVolume failed. Expected: <no error> Actual: <%v>", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
verifyVolumeExists(t, generatedVolumeName, dsw)
|
||||||
|
verifyVolumeExistsInVolumesToMount(t, generatedVolumeName, dsw)
|
||||||
|
verifyPodExistsInVolumeDsw(t, podName, generatedVolumeName, dsw)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Calls AddPodToVolume() twice to add the same pod to the same volume
|
||||||
|
// Verifies newly added pod/volume exists via
|
||||||
|
// PodExistsInVolume() VolumeExists() and GetVolumesToMount() and no errors.
|
||||||
|
func Test_AddPodToVolume_Positive_ExistingPodExistingVolume(t *testing.T) {
|
||||||
|
// Arrange
|
||||||
|
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
||||||
|
dsw := NewDesiredStateOfWorld(volumePluginMgr)
|
||||||
|
pod := &api.Pod{
|
||||||
|
ObjectMeta: api.ObjectMeta{
|
||||||
|
Name: "pod3",
|
||||||
|
UID: "pod3uid",
|
||||||
|
},
|
||||||
|
Spec: api.PodSpec{
|
||||||
|
Volumes: []api.Volume{
|
||||||
|
{
|
||||||
|
Name: "volume-name",
|
||||||
|
VolumeSource: api.VolumeSource{
|
||||||
|
GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{
|
||||||
|
PDName: "fake-device1",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
volumeSpec := &volume.Spec{Volume: &pod.Spec.Volumes[0]}
|
||||||
|
podName := volumehelper.GetUniquePodName(pod)
|
||||||
|
|
||||||
|
// Act
|
||||||
|
generatedVolumeName, err := dsw.AddPodToVolume(
|
||||||
|
podName, pod, volumeSpec, volumeSpec.Name(), "" /* volumeGidValue */)
|
||||||
|
|
||||||
|
// Assert
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("AddPodToVolume failed. Expected: <no error> Actual: <%v>", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
verifyVolumeExists(t, generatedVolumeName, dsw)
|
||||||
|
verifyVolumeExistsInVolumesToMount(t, generatedVolumeName, dsw)
|
||||||
|
verifyPodExistsInVolumeDsw(t, podName, generatedVolumeName, dsw)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Populates data struct with a new volume/pod
|
||||||
|
// Calls DeletePodFromVolume() to removes the pod
|
||||||
|
// Verifies newly added pod/volume are deleted
|
||||||
|
func Test_DeletePodFromVolume_Positive_PodExistsVolumeExists(t *testing.T) {
|
||||||
|
// Arrange
|
||||||
|
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
||||||
|
dsw := NewDesiredStateOfWorld(volumePluginMgr)
|
||||||
|
pod := &api.Pod{
|
||||||
|
ObjectMeta: api.ObjectMeta{
|
||||||
|
Name: "pod3",
|
||||||
|
UID: "pod3uid",
|
||||||
|
},
|
||||||
|
Spec: api.PodSpec{
|
||||||
|
Volumes: []api.Volume{
|
||||||
|
{
|
||||||
|
Name: "volume-name",
|
||||||
|
VolumeSource: api.VolumeSource{
|
||||||
|
GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{
|
||||||
|
PDName: "fake-device1",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
volumeSpec := &volume.Spec{Volume: &pod.Spec.Volumes[0]}
|
||||||
|
podName := volumehelper.GetUniquePodName(pod)
|
||||||
|
generatedVolumeName, err := dsw.AddPodToVolume(
|
||||||
|
podName, pod, volumeSpec, volumeSpec.Name(), "" /* volumeGidValue */)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("AddPodToVolume failed. Expected: <no error> Actual: <%v>", err)
|
||||||
|
}
|
||||||
|
verifyVolumeExists(t, generatedVolumeName, dsw)
|
||||||
|
verifyVolumeExistsInVolumesToMount(t, generatedVolumeName, dsw)
|
||||||
|
verifyPodExistsInVolumeDsw(t, podName, generatedVolumeName, dsw)
|
||||||
|
|
||||||
|
// Act
|
||||||
|
dsw.DeletePodFromVolume(podName, generatedVolumeName)
|
||||||
|
|
||||||
|
// Assert
|
||||||
|
verifyVolumeDoesntExist(t, generatedVolumeName, dsw)
|
||||||
|
verifyVolumeDoesntExistInVolumesToMount(t, generatedVolumeName, dsw)
|
||||||
|
verifyPodDoesntExistInVolumeDsw(t, podName, generatedVolumeName, dsw)
|
||||||
|
}
|
||||||
|
|
||||||
|
func verifyVolumeExists(
|
||||||
|
t *testing.T, expectedVolumeName api.UniqueVolumeName, dsw DesiredStateOfWorld) {
|
||||||
|
volumeExists := dsw.VolumeExists(expectedVolumeName)
|
||||||
|
if !volumeExists {
|
||||||
|
t.Fatalf(
|
||||||
|
"VolumeExists(%q) failed. Expected: <true> Actual: <%v>",
|
||||||
|
expectedVolumeName,
|
||||||
|
volumeExists)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func verifyVolumeDoesntExist(
|
||||||
|
t *testing.T, expectedVolumeName api.UniqueVolumeName, dsw DesiredStateOfWorld) {
|
||||||
|
volumeExists := dsw.VolumeExists(expectedVolumeName)
|
||||||
|
if volumeExists {
|
||||||
|
t.Fatalf(
|
||||||
|
"VolumeExists(%q) returned incorrect value. Expected: <false> Actual: <%v>",
|
||||||
|
expectedVolumeName,
|
||||||
|
volumeExists)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func verifyVolumeExistsInVolumesToMount(
|
||||||
|
t *testing.T, expectedVolumeName api.UniqueVolumeName, dsw DesiredStateOfWorld) {
|
||||||
|
volumesToMount := dsw.GetVolumesToMount()
|
||||||
|
for _, volume := range volumesToMount {
|
||||||
|
if volume.VolumeName == expectedVolumeName {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Fatalf(
|
||||||
|
"Could not find volume %v in the list of desired state of world volumes to mount %+v",
|
||||||
|
expectedVolumeName,
|
||||||
|
volumesToMount)
|
||||||
|
}
|
||||||
|
|
||||||
|
func verifyVolumeDoesntExistInVolumesToMount(
|
||||||
|
t *testing.T, volumeToCheck api.UniqueVolumeName, dsw DesiredStateOfWorld) {
|
||||||
|
volumesToMount := dsw.GetVolumesToMount()
|
||||||
|
for _, volume := range volumesToMount {
|
||||||
|
if volume.VolumeName == volumeToCheck {
|
||||||
|
t.Fatalf(
|
||||||
|
"Found volume %v in the list of desired state of world volumes to mount. Expected it not to exist.",
|
||||||
|
volumeToCheck)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func verifyPodExistsInVolumeDsw(
|
||||||
|
t *testing.T,
|
||||||
|
expectedPodName volumetypes.UniquePodName,
|
||||||
|
expectedVolumeName api.UniqueVolumeName,
|
||||||
|
dsw DesiredStateOfWorld) {
|
||||||
|
if podExistsInVolume := dsw.PodExistsInVolume(
|
||||||
|
expectedPodName, expectedVolumeName); !podExistsInVolume {
|
||||||
|
t.Fatalf(
|
||||||
|
"DSW PodExistsInVolume returned incorrect value. Expected: <true> Actual: <%v>",
|
||||||
|
podExistsInVolume)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func verifyPodDoesntExistInVolumeDsw(
|
||||||
|
t *testing.T,
|
||||||
|
expectedPodName volumetypes.UniquePodName,
|
||||||
|
expectedVolumeName api.UniqueVolumeName,
|
||||||
|
dsw DesiredStateOfWorld) {
|
||||||
|
if podExistsInVolume := dsw.PodExistsInVolume(
|
||||||
|
expectedPodName, expectedVolumeName); podExistsInVolume {
|
||||||
|
t.Fatalf(
|
||||||
|
"DSW PodExistsInVolume returned incorrect value. Expected: <true> Actual: <%v>",
|
||||||
|
podExistsInVolume)
|
||||||
|
}
|
||||||
|
}
|
||||||
346
pkg/kubelet/volume/populator/desired_state_of_world_populator.go
Normal file
346
pkg/kubelet/volume/populator/desired_state_of_world_populator.go
Normal file
@@ -0,0 +1,346 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2016 The Kubernetes Authors All rights reserved.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
/*
|
||||||
|
Package populator implements interfaces that monitor and keep the states of the
|
||||||
|
caches in sync with the "ground truth".
|
||||||
|
*/
|
||||||
|
package populator
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/golang/glog"
|
||||||
|
|
||||||
|
"k8s.io/kubernetes/pkg/api"
|
||||||
|
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||||
|
"k8s.io/kubernetes/pkg/kubelet/pod"
|
||||||
|
"k8s.io/kubernetes/pkg/kubelet/volume/cache"
|
||||||
|
"k8s.io/kubernetes/pkg/types"
|
||||||
|
"k8s.io/kubernetes/pkg/util/wait"
|
||||||
|
"k8s.io/kubernetes/pkg/volume"
|
||||||
|
volumetypes "k8s.io/kubernetes/pkg/volume/util/types"
|
||||||
|
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
|
||||||
|
)
|
||||||
|
|
||||||
|
// DesiredStateOfWorldPopulator periodically loops through the list of active
|
||||||
|
// pods and ensures that each one exists in the desired state of the world cache
|
||||||
|
// if it has volumes. It also verifies that the pods in the desired state of the
|
||||||
|
// world cache still exist, if not, it removes them.
|
||||||
|
type DesiredStateOfWorldPopulator interface {
|
||||||
|
Run(stopCh <-chan struct{})
|
||||||
|
|
||||||
|
// ReprocessPod removes the specified pod from the list of processedPods
|
||||||
|
// (if it exists) forcing it to be reprocessed. This is required to enable
|
||||||
|
// remounting volumes on pod updates (volumes like Downward API volumes
|
||||||
|
// depend on this behavior to ensure volume content is updated).
|
||||||
|
ReprocessPod(podName volumetypes.UniquePodName)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewDesiredStateOfWorldPopulator returns a new instance of
|
||||||
|
// DesiredStateOfWorldPopulator.
|
||||||
|
//
|
||||||
|
// kubeClient - used to fetch PV and PVC objects from the API server
|
||||||
|
// loopSleepDuration - the amount of time the populator loop sleeps between
|
||||||
|
// successive executions
|
||||||
|
// podManager - the kubelet podManager that is the source of truth for the pods
|
||||||
|
// that exist on this host
|
||||||
|
// desiredStateOfWorld - the cache to populate
|
||||||
|
func NewDesiredStateOfWorldPopulator(
|
||||||
|
kubeClient internalclientset.Interface,
|
||||||
|
loopSleepDuration time.Duration,
|
||||||
|
podManager pod.Manager,
|
||||||
|
desiredStateOfWorld cache.DesiredStateOfWorld) DesiredStateOfWorldPopulator {
|
||||||
|
return &desiredStateOfWorldPopulator{
|
||||||
|
kubeClient: kubeClient,
|
||||||
|
loopSleepDuration: loopSleepDuration,
|
||||||
|
podManager: podManager,
|
||||||
|
desiredStateOfWorld: desiredStateOfWorld,
|
||||||
|
pods: processedPods{
|
||||||
|
processedPods: make(map[volumetypes.UniquePodName]bool)},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type desiredStateOfWorldPopulator struct {
|
||||||
|
kubeClient internalclientset.Interface
|
||||||
|
loopSleepDuration time.Duration
|
||||||
|
podManager pod.Manager
|
||||||
|
desiredStateOfWorld cache.DesiredStateOfWorld
|
||||||
|
pods processedPods
|
||||||
|
}
|
||||||
|
|
||||||
|
type processedPods struct {
|
||||||
|
processedPods map[volumetypes.UniquePodName]bool
|
||||||
|
sync.RWMutex
|
||||||
|
}
|
||||||
|
|
||||||
|
func (dswp *desiredStateOfWorldPopulator) Run(stopCh <-chan struct{}) {
|
||||||
|
wait.Until(dswp.populatorLoopFunc(), dswp.loopSleepDuration, stopCh)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (dswp *desiredStateOfWorldPopulator) ReprocessPod(
|
||||||
|
podName volumetypes.UniquePodName) {
|
||||||
|
dswp.deleteProcessedPod(podName)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (dswp *desiredStateOfWorldPopulator) populatorLoopFunc() func() {
|
||||||
|
return func() {
|
||||||
|
dswp.findAndAddNewPods()
|
||||||
|
|
||||||
|
dswp.findAndRemoveDeletedPods()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Iterate through all pods and add to desired state of world if they don't
|
||||||
|
// exist but should
|
||||||
|
func (dswp *desiredStateOfWorldPopulator) findAndAddNewPods() {
|
||||||
|
for _, pod := range dswp.podManager.GetPods() {
|
||||||
|
dswp.processPodVolumes(pod)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Iterate through all pods in desired state of world, and remove if they no
|
||||||
|
// longer exist
|
||||||
|
func (dswp *desiredStateOfWorldPopulator) findAndRemoveDeletedPods() {
|
||||||
|
for _, volumeToMount := range dswp.desiredStateOfWorld.GetVolumesToMount() {
|
||||||
|
if _, podExists :=
|
||||||
|
dswp.podManager.GetPodByUID(volumeToMount.Pod.UID); !podExists {
|
||||||
|
glog.V(10).Infof(
|
||||||
|
"Removing volume %q (volSpec=%q) for pod %q from desired state.",
|
||||||
|
volumeToMount.VolumeName,
|
||||||
|
volumeToMount.VolumeSpec.Name(),
|
||||||
|
volumeToMount.PodName)
|
||||||
|
|
||||||
|
dswp.desiredStateOfWorld.DeletePodFromVolume(
|
||||||
|
volumeToMount.PodName, volumeToMount.VolumeName)
|
||||||
|
dswp.deleteProcessedPod(volumeToMount.PodName)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// processPodVolumes processes the volumes in the given pod and adds them to the
|
||||||
|
// desired state of the world.
|
||||||
|
func (dswp *desiredStateOfWorldPopulator) processPodVolumes(pod *api.Pod) {
|
||||||
|
if pod == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
uniquePodName := volumehelper.GetUniquePodName(pod)
|
||||||
|
if dswp.podPreviouslyProcessed(uniquePodName) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Process volume spec for each volume defined in pod
|
||||||
|
for _, podVolume := range pod.Spec.Volumes {
|
||||||
|
volumeSpec, volumeGidValue, err :=
|
||||||
|
dswp.createVolumeSpec(podVolume, pod.Namespace)
|
||||||
|
if err != nil {
|
||||||
|
glog.Errorf(
|
||||||
|
"Error processing volume %q for pod %q/%q: %v",
|
||||||
|
podVolume.Name,
|
||||||
|
pod.Namespace,
|
||||||
|
pod.Name,
|
||||||
|
err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add volume to desired state of world
|
||||||
|
_, err = dswp.desiredStateOfWorld.AddPodToVolume(
|
||||||
|
uniquePodName, pod, volumeSpec, podVolume.Name, volumeGidValue)
|
||||||
|
if err != nil {
|
||||||
|
glog.Errorf(
|
||||||
|
"Failed to add volume %q (specName: %q) for pod %q to desiredStateOfWorld. err=%v",
|
||||||
|
podVolume.Name,
|
||||||
|
volumeSpec.Name(),
|
||||||
|
uniquePodName,
|
||||||
|
err)
|
||||||
|
}
|
||||||
|
|
||||||
|
glog.V(10).Infof(
|
||||||
|
"Added volume %q (volSpec=%q) for pod %q to desired state.",
|
||||||
|
podVolume.Name,
|
||||||
|
volumeSpec.Name(),
|
||||||
|
uniquePodName)
|
||||||
|
}
|
||||||
|
|
||||||
|
dswp.markPodProcessed(uniquePodName)
|
||||||
|
}
|
||||||
|
|
||||||
|
// podPreviouslyProcessed returns true if the volumes for this pod have already
|
||||||
|
// been processed by the populator
|
||||||
|
func (dswp *desiredStateOfWorldPopulator) podPreviouslyProcessed(
|
||||||
|
podName volumetypes.UniquePodName) bool {
|
||||||
|
dswp.pods.RLock()
|
||||||
|
defer dswp.pods.RUnlock()
|
||||||
|
|
||||||
|
_, exists := dswp.pods.processedPods[podName]
|
||||||
|
return exists
|
||||||
|
}
|
||||||
|
|
||||||
|
// markPodProcessed records that the volumes for the specified pod have been
|
||||||
|
// processed by the populator
|
||||||
|
func (dswp *desiredStateOfWorldPopulator) markPodProcessed(
|
||||||
|
podName volumetypes.UniquePodName) {
|
||||||
|
dswp.pods.Lock()
|
||||||
|
defer dswp.pods.Unlock()
|
||||||
|
|
||||||
|
dswp.pods.processedPods[podName] = true
|
||||||
|
}
|
||||||
|
|
||||||
|
// markPodProcessed removes the specified pod from processedPods
|
||||||
|
func (dswp *desiredStateOfWorldPopulator) deleteProcessedPod(
|
||||||
|
podName volumetypes.UniquePodName) {
|
||||||
|
dswp.pods.Lock()
|
||||||
|
defer dswp.pods.Unlock()
|
||||||
|
|
||||||
|
delete(dswp.pods.processedPods, podName)
|
||||||
|
}
|
||||||
|
|
||||||
|
// createVolumeSpec creates and returns a mutatable volume.Spec object for the
|
||||||
|
// specified volume. It dereference any PVC to get PV objects, if needed.
|
||||||
|
func (dswp *desiredStateOfWorldPopulator) createVolumeSpec(
|
||||||
|
podVolume api.Volume, podNamespace string) (*volume.Spec, string, error) {
|
||||||
|
if pvcSource :=
|
||||||
|
podVolume.VolumeSource.PersistentVolumeClaim; pvcSource != nil {
|
||||||
|
glog.V(10).Infof(
|
||||||
|
"Found PVC, ClaimName: %q/%q",
|
||||||
|
podNamespace,
|
||||||
|
pvcSource.ClaimName)
|
||||||
|
|
||||||
|
// If podVolume is a PVC, fetch the real PV behind the claim
|
||||||
|
pvName, pvcUID, err := dswp.getPVCExtractPV(
|
||||||
|
podNamespace, pvcSource.ClaimName)
|
||||||
|
if err != nil {
|
||||||
|
return nil, "", fmt.Errorf(
|
||||||
|
"error processing PVC %q/%q: %v",
|
||||||
|
podNamespace,
|
||||||
|
pvcSource.ClaimName,
|
||||||
|
err)
|
||||||
|
}
|
||||||
|
|
||||||
|
glog.V(10).Infof(
|
||||||
|
"Found bound PV for PVC (ClaimName %q/%q pvcUID %v): pvName=%q",
|
||||||
|
podNamespace,
|
||||||
|
pvcSource.ClaimName,
|
||||||
|
pvcUID,
|
||||||
|
pvName)
|
||||||
|
|
||||||
|
// Fetch actual PV object
|
||||||
|
volumeSpec, volumeGidValue, err :=
|
||||||
|
dswp.getPVSpec(pvName, pvcSource.ReadOnly, pvcUID)
|
||||||
|
if err != nil {
|
||||||
|
return nil, "", fmt.Errorf(
|
||||||
|
"error processing PVC %q/%q: %v",
|
||||||
|
podNamespace,
|
||||||
|
pvcSource.ClaimName,
|
||||||
|
err)
|
||||||
|
}
|
||||||
|
|
||||||
|
glog.V(10).Infof(
|
||||||
|
"Extracted volumeSpec (%v) from bound PV (pvName %q) and PVC (ClaimName %q/%q pvcUID %v)",
|
||||||
|
volumeSpec.Name,
|
||||||
|
pvName,
|
||||||
|
podNamespace,
|
||||||
|
pvcSource.ClaimName,
|
||||||
|
pvcUID)
|
||||||
|
|
||||||
|
return volumeSpec, volumeGidValue, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Do not return the original volume object, since the source could mutate it
|
||||||
|
clonedPodVolumeObj, err := api.Scheme.DeepCopy(podVolume)
|
||||||
|
if err != nil || clonedPodVolumeObj == nil {
|
||||||
|
return nil, "", fmt.Errorf(
|
||||||
|
"failed to deep copy %q volume object. err=%v", podVolume.Name, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
clonedPodVolume, ok := clonedPodVolumeObj.(api.Volume)
|
||||||
|
if !ok {
|
||||||
|
return nil, "", fmt.Errorf(
|
||||||
|
"failed to cast clonedPodVolume %#v to api.Volume",
|
||||||
|
clonedPodVolumeObj)
|
||||||
|
}
|
||||||
|
|
||||||
|
return volume.NewSpecFromVolume(&clonedPodVolume), "", nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// getPVCExtractPV fetches the PVC object with the given namespace and name from
|
||||||
|
// the API server extracts the name of the PV it is pointing to and returns it.
|
||||||
|
// An error is returned if the PVC object's phase is not "Bound".
|
||||||
|
func (dswp *desiredStateOfWorldPopulator) getPVCExtractPV(
|
||||||
|
namespace string, claimName string) (string, types.UID, error) {
|
||||||
|
pvc, err :=
|
||||||
|
dswp.kubeClient.Core().PersistentVolumeClaims(namespace).Get(claimName)
|
||||||
|
if err != nil || pvc == nil {
|
||||||
|
return "", "", fmt.Errorf(
|
||||||
|
"failed to fetch PVC %s/%s from API server. err=%v",
|
||||||
|
namespace,
|
||||||
|
claimName,
|
||||||
|
err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if pvc.Status.Phase != api.ClaimBound || pvc.Spec.VolumeName == "" {
|
||||||
|
return "", "", fmt.Errorf(
|
||||||
|
"PVC %s/%s has non-bound phase (%q) or empty pvc.Spec.VolumeName (%q)",
|
||||||
|
namespace,
|
||||||
|
claimName,
|
||||||
|
pvc.Status.Phase,
|
||||||
|
pvc.Spec.VolumeName)
|
||||||
|
}
|
||||||
|
|
||||||
|
return pvc.Spec.VolumeName, pvc.UID, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// getPVSpec fetches the PV object with the given name from the the API server
|
||||||
|
// and returns a volume.Spec representing it.
|
||||||
|
// An error is returned if the call to fetch the PV object fails.
|
||||||
|
func (dswp *desiredStateOfWorldPopulator) getPVSpec(
|
||||||
|
name string,
|
||||||
|
pvcReadOnly bool,
|
||||||
|
expectedClaimUID types.UID) (*volume.Spec, string, error) {
|
||||||
|
pv, err := dswp.kubeClient.Core().PersistentVolumes().Get(name)
|
||||||
|
if err != nil || pv == nil {
|
||||||
|
return nil, "", fmt.Errorf(
|
||||||
|
"failed to fetch PV %q from API server. err=%v", name, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if pv.Spec.ClaimRef == nil {
|
||||||
|
return nil, "", fmt.Errorf(
|
||||||
|
"found PV object %q but it has a nil pv.Spec.ClaimRef indicating it is not yet bound to the claim",
|
||||||
|
name)
|
||||||
|
}
|
||||||
|
|
||||||
|
if pv.Spec.ClaimRef.UID != expectedClaimUID {
|
||||||
|
return nil, "", fmt.Errorf(
|
||||||
|
"found PV object %q but its pv.Spec.ClaimRef.UID (%q) does not point to claim.UID (%q)",
|
||||||
|
name,
|
||||||
|
pv.Spec.ClaimRef.UID,
|
||||||
|
expectedClaimUID)
|
||||||
|
}
|
||||||
|
|
||||||
|
volumeGidValue := getPVVolumeGidAnnotationValue(pv)
|
||||||
|
return volume.NewSpecFromPersistentVolume(pv, pvcReadOnly), volumeGidValue, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func getPVVolumeGidAnnotationValue(pv *api.PersistentVolume) string {
|
||||||
|
if volumeGid, ok := pv.Annotations[volumehelper.VolumeGidAnnotationKey]; ok {
|
||||||
|
return volumeGid
|
||||||
|
}
|
||||||
|
|
||||||
|
return ""
|
||||||
|
}
|
||||||
295
pkg/kubelet/volume/reconciler/reconciler.go
Normal file
295
pkg/kubelet/volume/reconciler/reconciler.go
Normal file
@@ -0,0 +1,295 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2016 The Kubernetes Authors All rights reserved.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
// Package reconciler implements interfaces that attempt to reconcile the
|
||||||
|
// desired state of the with the actual state of the world by triggering
|
||||||
|
// relevant actions (attach, detach, mount, unmount).
|
||||||
|
package reconciler
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/golang/glog"
|
||||||
|
"k8s.io/kubernetes/pkg/kubelet/volume/cache"
|
||||||
|
"k8s.io/kubernetes/pkg/util/goroutinemap"
|
||||||
|
"k8s.io/kubernetes/pkg/util/wait"
|
||||||
|
"k8s.io/kubernetes/pkg/volume/util/operationexecutor"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Reconciler runs a periodic loop to reconcile the desired state of the world
|
||||||
|
// with the actual state of the world by triggering attach, detach, mount, and
|
||||||
|
// unmount operations.
|
||||||
|
// Note: This is distinct from the Reconciler implemented by the attach/detach
|
||||||
|
// controller. This reconciles state for the kubelet volume manager. That
|
||||||
|
// reconciles state for the attach/detach controller.
|
||||||
|
type Reconciler interface {
|
||||||
|
// Starts running the reconciliation loop which executes periodically, checks
|
||||||
|
// if volumes that should be mounted are mounted and volumes that should
|
||||||
|
// be unmounted are unmounted. If not, it will trigger mount/unmount
|
||||||
|
// operations to rectify.
|
||||||
|
// If attach/detach management is enabled, the manager will also check if
|
||||||
|
// volumes that should be attached are attached and volumes that should
|
||||||
|
// be detached are detached and trigger attach/detach operations as needed.
|
||||||
|
Run(stopCh <-chan struct{})
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewReconciler returns a new instance of Reconciler.
|
||||||
|
//
|
||||||
|
// controllerAttachDetachEnabled - if true, indicates that the attach/detach
|
||||||
|
// controller is responsible for managing the attach/detach operations for
|
||||||
|
// this node, and therefore the volume manager should not
|
||||||
|
// loopSleepDuration - the amount of time the reconciler loop sleeps between
|
||||||
|
// successive executions
|
||||||
|
// waitForAttachTimeout - the amount of time the Mount function will wait for
|
||||||
|
// the volume to be attached
|
||||||
|
// hostName - the hostname for this node, used by Attach and Detach methods
|
||||||
|
// desiredStateOfWorld - cache containing the desired state of the world
|
||||||
|
// actualStateOfWorld - cache containing the actual state of the world
|
||||||
|
// operationExecutor - used to trigger attach/detach/mount/unmount operations
|
||||||
|
// safely (prevents more than one operation from being triggered on the same
|
||||||
|
// volume)
|
||||||
|
func NewReconciler(
|
||||||
|
controllerAttachDetachEnabled bool,
|
||||||
|
loopSleepDuration time.Duration,
|
||||||
|
waitForAttachTimeout time.Duration,
|
||||||
|
hostName string,
|
||||||
|
desiredStateOfWorld cache.DesiredStateOfWorld,
|
||||||
|
actualStateOfWorld cache.ActualStateOfWorld,
|
||||||
|
operationExecutor operationexecutor.OperationExecutor) Reconciler {
|
||||||
|
return &reconciler{
|
||||||
|
controllerAttachDetachEnabled: controllerAttachDetachEnabled,
|
||||||
|
loopSleepDuration: loopSleepDuration,
|
||||||
|
waitForAttachTimeout: waitForAttachTimeout,
|
||||||
|
hostName: hostName,
|
||||||
|
desiredStateOfWorld: desiredStateOfWorld,
|
||||||
|
actualStateOfWorld: actualStateOfWorld,
|
||||||
|
operationExecutor: operationExecutor,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type reconciler struct {
|
||||||
|
controllerAttachDetachEnabled bool
|
||||||
|
loopSleepDuration time.Duration
|
||||||
|
waitForAttachTimeout time.Duration
|
||||||
|
hostName string
|
||||||
|
desiredStateOfWorld cache.DesiredStateOfWorld
|
||||||
|
actualStateOfWorld cache.ActualStateOfWorld
|
||||||
|
operationExecutor operationexecutor.OperationExecutor
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rc *reconciler) Run(stopCh <-chan struct{}) {
|
||||||
|
wait.Until(rc.reconciliationLoopFunc(), rc.loopSleepDuration, stopCh)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rc *reconciler) reconciliationLoopFunc() func() {
|
||||||
|
return func() {
|
||||||
|
// Unmounts are triggered before mounts so that a volume that was
|
||||||
|
// referenced by a pod that was deleted and is now referenced by another
|
||||||
|
// pod is unmounted from the first pod before being mounted to the new
|
||||||
|
// pod.
|
||||||
|
|
||||||
|
// Ensure volumes that should be unmounted are unmounted.
|
||||||
|
for _, mountedVolume := range rc.actualStateOfWorld.GetMountedVolumes() {
|
||||||
|
if !rc.desiredStateOfWorld.PodExistsInVolume(mountedVolume.PodName, mountedVolume.VolumeName) {
|
||||||
|
// Volume is mounted, unmount it
|
||||||
|
glog.V(12).Infof("Attempting to start UnmountVolume for volume %q (spec.Name: %q) from pod %q (UID: %q).",
|
||||||
|
mountedVolume.VolumeName,
|
||||||
|
mountedVolume.OuterVolumeSpecName,
|
||||||
|
mountedVolume.PodName,
|
||||||
|
mountedVolume.PodUID)
|
||||||
|
err := rc.operationExecutor.UnmountVolume(
|
||||||
|
mountedVolume.MountedVolume, rc.actualStateOfWorld)
|
||||||
|
if err != nil && !goroutinemap.IsAlreadyExists(err) {
|
||||||
|
// Ignore goroutinemap.IsAlreadyExists errors, they are expected.
|
||||||
|
// Log all other errors.
|
||||||
|
glog.Errorf(
|
||||||
|
"operationExecutor.UnmountVolume failed for volume %q (spec.Name: %q) pod %q (UID: %q) controllerAttachDetachEnabled: %v with err: %v",
|
||||||
|
mountedVolume.VolumeName,
|
||||||
|
mountedVolume.OuterVolumeSpecName,
|
||||||
|
mountedVolume.PodName,
|
||||||
|
mountedVolume.PodUID,
|
||||||
|
rc.controllerAttachDetachEnabled,
|
||||||
|
err)
|
||||||
|
}
|
||||||
|
if err == nil {
|
||||||
|
glog.Infof("UnmountVolume operation started for volume %q (spec.Name: %q) from pod %q (UID: %q).",
|
||||||
|
mountedVolume.VolumeName,
|
||||||
|
mountedVolume.OuterVolumeSpecName,
|
||||||
|
mountedVolume.PodName,
|
||||||
|
mountedVolume.PodUID)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure volumes that should be attached/mounted are attached/mounted.
|
||||||
|
for _, volumeToMount := range rc.desiredStateOfWorld.GetVolumesToMount() {
|
||||||
|
volMounted, err := rc.actualStateOfWorld.PodExistsInVolume(volumeToMount.PodName, volumeToMount.VolumeName)
|
||||||
|
if cache.IsVolumeNotAttachedError(err) {
|
||||||
|
// Volume is not attached, it should be
|
||||||
|
if rc.controllerAttachDetachEnabled || !volumeToMount.PluginIsAttachable {
|
||||||
|
// Kubelet not responsible for attaching or this volume has a non-attachable volume plugin,
|
||||||
|
// so just add it to actualStateOfWorld without attach.
|
||||||
|
markVolumeAttachErr := rc.actualStateOfWorld.MarkVolumeAsAttached(
|
||||||
|
volumeToMount.VolumeSpec, rc.hostName)
|
||||||
|
if markVolumeAttachErr != nil {
|
||||||
|
glog.Errorf(
|
||||||
|
"actualStateOfWorld.MarkVolumeAsAttached failed for volume %q (spec.Name: %q) pod %q (UID: %q) controllerAttachDetachEnabled: %v with err: %v",
|
||||||
|
volumeToMount.VolumeName,
|
||||||
|
volumeToMount.VolumeSpec.Name(),
|
||||||
|
volumeToMount.PodName,
|
||||||
|
volumeToMount.Pod.UID,
|
||||||
|
rc.controllerAttachDetachEnabled,
|
||||||
|
markVolumeAttachErr)
|
||||||
|
} else {
|
||||||
|
glog.V(12).Infof("actualStateOfWorld.MarkVolumeAsAttached succeeded for volume %q (spec.Name: %q) pod %q (UID: %q)",
|
||||||
|
volumeToMount.VolumeName,
|
||||||
|
volumeToMount.VolumeSpec.Name(),
|
||||||
|
volumeToMount.PodName,
|
||||||
|
volumeToMount.Pod.UID)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Volume is not attached to node, kubelet attach is enabled, volume implements an attacher,
|
||||||
|
// so attach it
|
||||||
|
volumeToAttach := operationexecutor.VolumeToAttach{
|
||||||
|
VolumeName: volumeToMount.VolumeName,
|
||||||
|
VolumeSpec: volumeToMount.VolumeSpec,
|
||||||
|
NodeName: rc.hostName,
|
||||||
|
}
|
||||||
|
glog.V(12).Infof("Attempting to start AttachVolume for volume %q (spec.Name: %q) pod %q (UID: %q)",
|
||||||
|
volumeToMount.VolumeName,
|
||||||
|
volumeToMount.VolumeSpec.Name(),
|
||||||
|
volumeToMount.PodName,
|
||||||
|
volumeToMount.Pod.UID)
|
||||||
|
err := rc.operationExecutor.AttachVolume(volumeToAttach, rc.actualStateOfWorld)
|
||||||
|
if err != nil && !goroutinemap.IsAlreadyExists(err) {
|
||||||
|
// Ignore goroutinemap.IsAlreadyExists errors, they are expected.
|
||||||
|
// Log all other errors.
|
||||||
|
glog.Errorf(
|
||||||
|
"operationExecutor.AttachVolume failed for volume %q (spec.Name: %q) pod %q (UID: %q) controllerAttachDetachEnabled: %v with err: %v",
|
||||||
|
volumeToMount.VolumeName,
|
||||||
|
volumeToMount.VolumeSpec.Name(),
|
||||||
|
volumeToMount.PodName,
|
||||||
|
volumeToMount.Pod.UID,
|
||||||
|
rc.controllerAttachDetachEnabled,
|
||||||
|
err)
|
||||||
|
}
|
||||||
|
if err == nil {
|
||||||
|
glog.Infof("AttachVolume operation started for volume %q (spec.Name: %q) pod %q (UID: %q)",
|
||||||
|
volumeToMount.VolumeName,
|
||||||
|
volumeToMount.VolumeSpec.Name(),
|
||||||
|
volumeToMount.PodName,
|
||||||
|
volumeToMount.Pod.UID)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else if !volMounted || cache.IsRemountRequiredError(err) {
|
||||||
|
// Volume is not mounted, or is already mounted, but requires remounting
|
||||||
|
remountingLogStr := ""
|
||||||
|
if cache.IsRemountRequiredError(err) {
|
||||||
|
remountingLogStr = "Volume is already mounted to pod, but remount was requested."
|
||||||
|
}
|
||||||
|
glog.V(12).Infof("Attempting to start MountVolume for volume %q (spec.Name: %q) to pod %q (UID: %q). %s",
|
||||||
|
volumeToMount.VolumeName,
|
||||||
|
volumeToMount.VolumeSpec.Name(),
|
||||||
|
volumeToMount.PodName,
|
||||||
|
volumeToMount.Pod.UID,
|
||||||
|
remountingLogStr)
|
||||||
|
err := rc.operationExecutor.MountVolume(
|
||||||
|
rc.waitForAttachTimeout,
|
||||||
|
volumeToMount.VolumeToMount,
|
||||||
|
rc.actualStateOfWorld)
|
||||||
|
if err != nil && !goroutinemap.IsAlreadyExists(err) {
|
||||||
|
// Ignore goroutinemap.IsAlreadyExists errors, they are expected.
|
||||||
|
// Log all other errors.
|
||||||
|
glog.Errorf(
|
||||||
|
"operationExecutor.MountVolume failed for volume %q (spec.Name: %q) pod %q (UID: %q) controllerAttachDetachEnabled: %v with err: %v",
|
||||||
|
volumeToMount.VolumeName,
|
||||||
|
volumeToMount.VolumeSpec.Name(),
|
||||||
|
volumeToMount.PodName,
|
||||||
|
volumeToMount.Pod.UID,
|
||||||
|
rc.controllerAttachDetachEnabled,
|
||||||
|
err)
|
||||||
|
}
|
||||||
|
if err == nil {
|
||||||
|
glog.Infof("MountVolume operation started for volume %q (spec.Name: %q) to pod %q (UID: %q). %s",
|
||||||
|
volumeToMount.VolumeName,
|
||||||
|
volumeToMount.VolumeSpec.Name(),
|
||||||
|
volumeToMount.PodName,
|
||||||
|
volumeToMount.Pod.UID,
|
||||||
|
remountingLogStr)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure devices that should be detached/unmounted are detached/unmounted.
|
||||||
|
for _, attachedVolume := range rc.actualStateOfWorld.GetUnmountedVolumes() {
|
||||||
|
if !rc.desiredStateOfWorld.VolumeExists(attachedVolume.VolumeName) {
|
||||||
|
if attachedVolume.GloballyMounted {
|
||||||
|
// Volume is globally mounted to device, unmount it
|
||||||
|
glog.V(12).Infof("Attempting to start UnmountDevice for volume %q (spec.Name: %q)",
|
||||||
|
attachedVolume.VolumeName,
|
||||||
|
attachedVolume.VolumeSpec.Name())
|
||||||
|
err := rc.operationExecutor.UnmountDevice(
|
||||||
|
attachedVolume.AttachedVolume, rc.actualStateOfWorld)
|
||||||
|
if err != nil && !goroutinemap.IsAlreadyExists(err) {
|
||||||
|
// Ignore goroutinemap.IsAlreadyExists errors, they are expected.
|
||||||
|
// Log all other errors.
|
||||||
|
glog.Errorf(
|
||||||
|
"operationExecutor.UnmountDevice failed for volume %q (spec.Name: %q) controllerAttachDetachEnabled: %v with err: %v",
|
||||||
|
attachedVolume.VolumeName,
|
||||||
|
attachedVolume.VolumeSpec.Name(),
|
||||||
|
rc.controllerAttachDetachEnabled,
|
||||||
|
err)
|
||||||
|
}
|
||||||
|
if err == nil {
|
||||||
|
glog.Infof("UnmountDevice operation started for volume %q (spec.Name: %q)",
|
||||||
|
attachedVolume.VolumeName,
|
||||||
|
attachedVolume.VolumeSpec.Name())
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Volume is attached to node, detach it
|
||||||
|
if rc.controllerAttachDetachEnabled || !attachedVolume.PluginIsAttachable {
|
||||||
|
// Kubelet not responsible for detaching or this volume has a non-attachable volume plugin,
|
||||||
|
// so just remove it to actualStateOfWorld without attach.
|
||||||
|
rc.actualStateOfWorld.MarkVolumeAsDetached(
|
||||||
|
attachedVolume.VolumeName, rc.hostName)
|
||||||
|
} else {
|
||||||
|
// Only detach if kubelet detach is enabled
|
||||||
|
glog.V(12).Infof("Attempting to start DetachVolume for volume %q (spec.Name: %q)",
|
||||||
|
attachedVolume.VolumeName,
|
||||||
|
attachedVolume.VolumeSpec.Name())
|
||||||
|
err := rc.operationExecutor.DetachVolume(
|
||||||
|
attachedVolume.AttachedVolume, rc.actualStateOfWorld)
|
||||||
|
if err != nil && !goroutinemap.IsAlreadyExists(err) {
|
||||||
|
// Ignore goroutinemap.IsAlreadyExists errors, they are expected.
|
||||||
|
// Log all other errors.
|
||||||
|
glog.Errorf(
|
||||||
|
"operationExecutor.DetachVolume failed for volume %q (spec.Name: %q) controllerAttachDetachEnabled: %v with err: %v",
|
||||||
|
attachedVolume.VolumeName,
|
||||||
|
attachedVolume.VolumeSpec.Name(),
|
||||||
|
rc.controllerAttachDetachEnabled,
|
||||||
|
err)
|
||||||
|
}
|
||||||
|
if err == nil {
|
||||||
|
glog.Infof("DetachVolume operation started for volume %q (spec.Name: %q)",
|
||||||
|
attachedVolume.VolumeName,
|
||||||
|
attachedVolume.VolumeSpec.Name())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
404
pkg/kubelet/volume/reconciler/reconciler_test.go
Normal file
404
pkg/kubelet/volume/reconciler/reconciler_test.go
Normal file
@@ -0,0 +1,404 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2016 The Kubernetes Authors All rights reserved.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package reconciler
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"k8s.io/kubernetes/pkg/api"
|
||||||
|
"k8s.io/kubernetes/pkg/kubelet/volume/cache"
|
||||||
|
"k8s.io/kubernetes/pkg/util/wait"
|
||||||
|
"k8s.io/kubernetes/pkg/volume"
|
||||||
|
volumetesting "k8s.io/kubernetes/pkg/volume/testing"
|
||||||
|
"k8s.io/kubernetes/pkg/volume/util/operationexecutor"
|
||||||
|
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// reconcilerLoopSleepDuration is the amount of time the reconciler loop
|
||||||
|
// waits between successive executions
|
||||||
|
reconcilerLoopSleepDuration time.Duration = 0 * time.Millisecond
|
||||||
|
|
||||||
|
// waitForAttachTimeout is the maximum amount of time a
|
||||||
|
// operationexecutor.Mount call will wait for a volume to be attached.
|
||||||
|
waitForAttachTimeout time.Duration = 1 * time.Second
|
||||||
|
)
|
||||||
|
|
||||||
|
// Calls Run()
|
||||||
|
// Verifies there are no calls to attach, detach, mount, unmount, etc.
|
||||||
|
func Test_Run_Positive_DoNothing(t *testing.T) {
|
||||||
|
// Arrange
|
||||||
|
nodeName := "myhostname"
|
||||||
|
volumePluginMgr, fakePlugin := volumetesting.GetTestVolumePluginMgr(t)
|
||||||
|
dsw := cache.NewDesiredStateOfWorld(volumePluginMgr)
|
||||||
|
asw := cache.NewActualStateOfWorld(nodeName, volumePluginMgr)
|
||||||
|
oex := operationexecutor.NewOperationExecutor(volumePluginMgr)
|
||||||
|
reconciler := NewReconciler(
|
||||||
|
false, /* controllerAttachDetachEnabled */
|
||||||
|
reconcilerLoopSleepDuration,
|
||||||
|
waitForAttachTimeout,
|
||||||
|
nodeName,
|
||||||
|
dsw,
|
||||||
|
asw,
|
||||||
|
oex)
|
||||||
|
|
||||||
|
// Act
|
||||||
|
go reconciler.Run(wait.NeverStop)
|
||||||
|
|
||||||
|
// Assert
|
||||||
|
assert.NoError(t, volumetesting.VerifyZeroAttachCalls(fakePlugin))
|
||||||
|
assert.NoError(t, volumetesting.VerifyZeroWaitForAttachCallCount(fakePlugin))
|
||||||
|
assert.NoError(t, volumetesting.VerifyZeroMountDeviceCallCount(fakePlugin))
|
||||||
|
assert.NoError(t, volumetesting.VerifyZeroSetUpCallCount(fakePlugin))
|
||||||
|
assert.NoError(t, volumetesting.VerifyZeroTearDownCallCount(fakePlugin))
|
||||||
|
assert.NoError(t, volumetesting.VerifyZeroDetachCallCount(fakePlugin))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Populates desiredStateOfWorld cache with one volume/pod.
|
||||||
|
// Calls Run()
|
||||||
|
// Verifies there is are attach/mount/etc calls and no detach/unmount calls.
|
||||||
|
func Test_Run_Positive_VolumeAttachAndMount(t *testing.T) {
|
||||||
|
// Arrange
|
||||||
|
nodeName := "myhostname"
|
||||||
|
volumePluginMgr, fakePlugin := volumetesting.GetTestVolumePluginMgr(t)
|
||||||
|
dsw := cache.NewDesiredStateOfWorld(volumePluginMgr)
|
||||||
|
asw := cache.NewActualStateOfWorld(nodeName, volumePluginMgr)
|
||||||
|
oex := operationexecutor.NewOperationExecutor(volumePluginMgr)
|
||||||
|
reconciler := NewReconciler(
|
||||||
|
false, /* controllerAttachDetachEnabled */
|
||||||
|
reconcilerLoopSleepDuration,
|
||||||
|
waitForAttachTimeout,
|
||||||
|
nodeName,
|
||||||
|
dsw,
|
||||||
|
asw,
|
||||||
|
oex)
|
||||||
|
pod := &api.Pod{
|
||||||
|
ObjectMeta: api.ObjectMeta{
|
||||||
|
Name: "pod1",
|
||||||
|
UID: "pod1uid",
|
||||||
|
},
|
||||||
|
Spec: api.PodSpec{
|
||||||
|
Volumes: []api.Volume{
|
||||||
|
{
|
||||||
|
Name: "volume-name",
|
||||||
|
VolumeSource: api.VolumeSource{
|
||||||
|
GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{
|
||||||
|
PDName: "fake-device1",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
volumeSpec := &volume.Spec{Volume: &pod.Spec.Volumes[0]}
|
||||||
|
podName := volumehelper.GetUniquePodName(pod)
|
||||||
|
_, err := dsw.AddPodToVolume(
|
||||||
|
podName, pod, volumeSpec, volumeSpec.Name(), "" /* volumeGidValue */)
|
||||||
|
|
||||||
|
// Assert
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("AddPodToVolume failed. Expected: <no error> Actual: <%v>", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Act
|
||||||
|
go reconciler.Run(wait.NeverStop)
|
||||||
|
waitForAttach(t, fakePlugin, asw)
|
||||||
|
|
||||||
|
// Assert
|
||||||
|
assert.NoError(t, volumetesting.VerifyAttachCallCount(
|
||||||
|
1 /* expectedAttachCallCount */, fakePlugin))
|
||||||
|
assert.NoError(t, volumetesting.VerifyWaitForAttachCallCount(
|
||||||
|
1 /* expectedWaitForAttachCallCount */, fakePlugin))
|
||||||
|
assert.NoError(t, volumetesting.VerifyMountDeviceCallCount(
|
||||||
|
1 /* expectedMountDeviceCallCount */, fakePlugin))
|
||||||
|
assert.NoError(t, volumetesting.VerifySetUpCallCount(
|
||||||
|
1 /* expectedSetUpCallCount */, fakePlugin))
|
||||||
|
assert.NoError(t, volumetesting.VerifyZeroTearDownCallCount(fakePlugin))
|
||||||
|
assert.NoError(t, volumetesting.VerifyZeroDetachCallCount(fakePlugin))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Populates desiredStateOfWorld cache with one volume/pod.
|
||||||
|
// Enables controllerAttachDetachEnabled.
|
||||||
|
// Calls Run()
|
||||||
|
// Verifies there is one mount call and no unmount calls.
|
||||||
|
// Verifies there are no attach/detach calls.
|
||||||
|
func Test_Run_Positive_VolumeMountControllerAttachEnabled(t *testing.T) {
|
||||||
|
// Arrange
|
||||||
|
nodeName := "myhostname"
|
||||||
|
volumePluginMgr, fakePlugin := volumetesting.GetTestVolumePluginMgr(t)
|
||||||
|
dsw := cache.NewDesiredStateOfWorld(volumePluginMgr)
|
||||||
|
asw := cache.NewActualStateOfWorld(nodeName, volumePluginMgr)
|
||||||
|
oex := operationexecutor.NewOperationExecutor(volumePluginMgr)
|
||||||
|
reconciler := NewReconciler(
|
||||||
|
true, /* controllerAttachDetachEnabled */
|
||||||
|
reconcilerLoopSleepDuration,
|
||||||
|
waitForAttachTimeout,
|
||||||
|
nodeName,
|
||||||
|
dsw,
|
||||||
|
asw,
|
||||||
|
oex)
|
||||||
|
pod := &api.Pod{
|
||||||
|
ObjectMeta: api.ObjectMeta{
|
||||||
|
Name: "pod1",
|
||||||
|
UID: "pod1uid",
|
||||||
|
},
|
||||||
|
Spec: api.PodSpec{
|
||||||
|
Volumes: []api.Volume{
|
||||||
|
{
|
||||||
|
Name: "volume-name",
|
||||||
|
VolumeSource: api.VolumeSource{
|
||||||
|
GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{
|
||||||
|
PDName: "fake-device1",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
volumeSpec := &volume.Spec{Volume: &pod.Spec.Volumes[0]}
|
||||||
|
podName := volumehelper.GetUniquePodName(pod)
|
||||||
|
_, err := dsw.AddPodToVolume(
|
||||||
|
podName, pod, volumeSpec, volumeSpec.Name(), "" /* volumeGidValue */)
|
||||||
|
|
||||||
|
// Assert
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("AddPodToVolume failed. Expected: <no error> Actual: <%v>", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Act
|
||||||
|
go reconciler.Run(wait.NeverStop)
|
||||||
|
waitForAttach(t, fakePlugin, asw)
|
||||||
|
|
||||||
|
// Assert
|
||||||
|
assert.NoError(t, volumetesting.VerifyZeroAttachCalls(fakePlugin))
|
||||||
|
assert.NoError(t, volumetesting.VerifyWaitForAttachCallCount(
|
||||||
|
1 /* expectedWaitForAttachCallCount */, fakePlugin))
|
||||||
|
assert.NoError(t, volumetesting.VerifyMountDeviceCallCount(
|
||||||
|
1 /* expectedMountDeviceCallCount */, fakePlugin))
|
||||||
|
assert.NoError(t, volumetesting.VerifySetUpCallCount(
|
||||||
|
1 /* expectedSetUpCallCount */, fakePlugin))
|
||||||
|
assert.NoError(t, volumetesting.VerifyZeroTearDownCallCount(fakePlugin))
|
||||||
|
assert.NoError(t, volumetesting.VerifyZeroDetachCallCount(fakePlugin))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Populates desiredStateOfWorld cache with one volume/pod.
|
||||||
|
// Calls Run()
|
||||||
|
// Verifies there is one attach/mount/etc call and no detach calls.
|
||||||
|
// Deletes volume/pod from desired state of world.
|
||||||
|
// Verifies detach/unmount calls are issued.
|
||||||
|
func Test_Run_Positive_VolumeAttachMountUnmountDetach(t *testing.T) {
|
||||||
|
// Arrange
|
||||||
|
nodeName := "myhostname"
|
||||||
|
volumePluginMgr, fakePlugin := volumetesting.GetTestVolumePluginMgr(t)
|
||||||
|
dsw := cache.NewDesiredStateOfWorld(volumePluginMgr)
|
||||||
|
asw := cache.NewActualStateOfWorld(nodeName, volumePluginMgr)
|
||||||
|
oex := operationexecutor.NewOperationExecutor(volumePluginMgr)
|
||||||
|
reconciler := NewReconciler(
|
||||||
|
false, /* controllerAttachDetachEnabled */
|
||||||
|
reconcilerLoopSleepDuration,
|
||||||
|
waitForAttachTimeout,
|
||||||
|
nodeName,
|
||||||
|
dsw,
|
||||||
|
asw,
|
||||||
|
oex)
|
||||||
|
pod := &api.Pod{
|
||||||
|
ObjectMeta: api.ObjectMeta{
|
||||||
|
Name: "pod1",
|
||||||
|
UID: "pod1uid",
|
||||||
|
},
|
||||||
|
Spec: api.PodSpec{
|
||||||
|
Volumes: []api.Volume{
|
||||||
|
{
|
||||||
|
Name: "volume-name",
|
||||||
|
VolumeSource: api.VolumeSource{
|
||||||
|
GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{
|
||||||
|
PDName: "fake-device1",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
volumeSpec := &volume.Spec{Volume: &pod.Spec.Volumes[0]}
|
||||||
|
podName := volumehelper.GetUniquePodName(pod)
|
||||||
|
generatedVolumeName, err := dsw.AddPodToVolume(
|
||||||
|
podName, pod, volumeSpec, volumeSpec.Name(), "" /* volumeGidValue */)
|
||||||
|
|
||||||
|
// Assert
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("AddPodToVolume failed. Expected: <no error> Actual: <%v>", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Act
|
||||||
|
go reconciler.Run(wait.NeverStop)
|
||||||
|
waitForAttach(t, fakePlugin, asw)
|
||||||
|
|
||||||
|
// Assert
|
||||||
|
assert.NoError(t, volumetesting.VerifyAttachCallCount(
|
||||||
|
1 /* expectedAttachCallCount */, fakePlugin))
|
||||||
|
assert.NoError(t, volumetesting.VerifyWaitForAttachCallCount(
|
||||||
|
1 /* expectedWaitForAttachCallCount */, fakePlugin))
|
||||||
|
assert.NoError(t, volumetesting.VerifyMountDeviceCallCount(
|
||||||
|
1 /* expectedMountDeviceCallCount */, fakePlugin))
|
||||||
|
assert.NoError(t, volumetesting.VerifySetUpCallCount(
|
||||||
|
1 /* expectedSetUpCallCount */, fakePlugin))
|
||||||
|
assert.NoError(t, volumetesting.VerifyZeroTearDownCallCount(fakePlugin))
|
||||||
|
assert.NoError(t, volumetesting.VerifyZeroDetachCallCount(fakePlugin))
|
||||||
|
|
||||||
|
// Act
|
||||||
|
dsw.DeletePodFromVolume(podName, generatedVolumeName)
|
||||||
|
waitForDetach(t, fakePlugin, asw)
|
||||||
|
|
||||||
|
// Assert
|
||||||
|
assert.NoError(t, volumetesting.VerifyTearDownCallCount(
|
||||||
|
1 /* expectedTearDownCallCount */, fakePlugin))
|
||||||
|
assert.NoError(t, volumetesting.VerifyDetachCallCount(
|
||||||
|
1 /* expectedDetachCallCount */, fakePlugin))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Populates desiredStateOfWorld cache with one volume/pod.
|
||||||
|
// Enables controllerAttachDetachEnabled.
|
||||||
|
// Calls Run()
|
||||||
|
// Verifies one mount call is made and no unmount calls.
|
||||||
|
// Deletes volume/pod from desired state of world.
|
||||||
|
// Verifies one unmount call is made.
|
||||||
|
// Verifies there are no attach/detach calls made.
|
||||||
|
func Test_Run_Positive_VolumeUnmountControllerAttachEnabled(t *testing.T) {
|
||||||
|
// Arrange
|
||||||
|
nodeName := "myhostname"
|
||||||
|
volumePluginMgr, fakePlugin := volumetesting.GetTestVolumePluginMgr(t)
|
||||||
|
dsw := cache.NewDesiredStateOfWorld(volumePluginMgr)
|
||||||
|
asw := cache.NewActualStateOfWorld(nodeName, volumePluginMgr)
|
||||||
|
oex := operationexecutor.NewOperationExecutor(volumePluginMgr)
|
||||||
|
reconciler := NewReconciler(
|
||||||
|
true, /* controllerAttachDetachEnabled */
|
||||||
|
reconcilerLoopSleepDuration,
|
||||||
|
waitForAttachTimeout,
|
||||||
|
nodeName,
|
||||||
|
dsw,
|
||||||
|
asw,
|
||||||
|
oex)
|
||||||
|
pod := &api.Pod{
|
||||||
|
ObjectMeta: api.ObjectMeta{
|
||||||
|
Name: "pod1",
|
||||||
|
UID: "pod1uid",
|
||||||
|
},
|
||||||
|
Spec: api.PodSpec{
|
||||||
|
Volumes: []api.Volume{
|
||||||
|
{
|
||||||
|
Name: "volume-name",
|
||||||
|
VolumeSource: api.VolumeSource{
|
||||||
|
GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{
|
||||||
|
PDName: "fake-device1",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
volumeSpec := &volume.Spec{Volume: &pod.Spec.Volumes[0]}
|
||||||
|
podName := volumehelper.GetUniquePodName(pod)
|
||||||
|
generatedVolumeName, err := dsw.AddPodToVolume(
|
||||||
|
podName, pod, volumeSpec, volumeSpec.Name(), "" /* volumeGidValue */)
|
||||||
|
|
||||||
|
// Assert
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("AddPodToVolume failed. Expected: <no error> Actual: <%v>", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Act
|
||||||
|
go reconciler.Run(wait.NeverStop)
|
||||||
|
waitForAttach(t, fakePlugin, asw)
|
||||||
|
|
||||||
|
// Assert
|
||||||
|
assert.NoError(t, volumetesting.VerifyZeroAttachCalls(fakePlugin))
|
||||||
|
assert.NoError(t, volumetesting.VerifyWaitForAttachCallCount(
|
||||||
|
1 /* expectedWaitForAttachCallCount */, fakePlugin))
|
||||||
|
assert.NoError(t, volumetesting.VerifyMountDeviceCallCount(
|
||||||
|
1 /* expectedMountDeviceCallCount */, fakePlugin))
|
||||||
|
assert.NoError(t, volumetesting.VerifySetUpCallCount(
|
||||||
|
1 /* expectedSetUpCallCount */, fakePlugin))
|
||||||
|
assert.NoError(t, volumetesting.VerifyZeroTearDownCallCount(fakePlugin))
|
||||||
|
assert.NoError(t, volumetesting.VerifyZeroDetachCallCount(fakePlugin))
|
||||||
|
|
||||||
|
// Act
|
||||||
|
dsw.DeletePodFromVolume(podName, generatedVolumeName)
|
||||||
|
waitForDetach(t, fakePlugin, asw)
|
||||||
|
|
||||||
|
// Assert
|
||||||
|
assert.NoError(t, volumetesting.VerifyTearDownCallCount(
|
||||||
|
1 /* expectedTearDownCallCount */, fakePlugin))
|
||||||
|
assert.NoError(t, volumetesting.VerifyZeroDetachCallCount(fakePlugin))
|
||||||
|
}
|
||||||
|
|
||||||
|
func waitForAttach(
|
||||||
|
t *testing.T,
|
||||||
|
fakePlugin *volumetesting.FakeVolumePlugin,
|
||||||
|
asw cache.ActualStateOfWorld) {
|
||||||
|
err := retryWithExponentialBackOff(
|
||||||
|
time.Duration(5*time.Millisecond),
|
||||||
|
func() (bool, error) {
|
||||||
|
mountedVolumes := asw.GetMountedVolumes()
|
||||||
|
if len(mountedVolumes) > 0 {
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return false, nil
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Timed out waiting for len of asw.GetMountedVolumes() to become non-zero.")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func waitForDetach(
|
||||||
|
t *testing.T,
|
||||||
|
fakePlugin *volumetesting.FakeVolumePlugin,
|
||||||
|
asw cache.ActualStateOfWorld) {
|
||||||
|
err := retryWithExponentialBackOff(
|
||||||
|
time.Duration(5*time.Millisecond),
|
||||||
|
func() (bool, error) {
|
||||||
|
attachedVolumes := asw.GetAttachedVolumes()
|
||||||
|
if len(attachedVolumes) == 0 {
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return false, nil
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Timed out waiting for len of asw.attachedVolumes() to become zero.")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func retryWithExponentialBackOff(initialDuration time.Duration, fn wait.ConditionFunc) error {
|
||||||
|
backoff := wait.Backoff{
|
||||||
|
Duration: initialDuration,
|
||||||
|
Factor: 3,
|
||||||
|
Jitter: 0,
|
||||||
|
Steps: 6,
|
||||||
|
}
|
||||||
|
return wait.ExponentialBackoff(backoff, fn)
|
||||||
|
}
|
||||||
364
pkg/kubelet/volume/volume_manager.go
Normal file
364
pkg/kubelet/volume/volume_manager.go
Normal file
@@ -0,0 +1,364 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2016 The Kubernetes Authors All rights reserved.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package volumemanager
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strconv"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/golang/glog"
|
||||||
|
"k8s.io/kubernetes/pkg/api"
|
||||||
|
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||||
|
"k8s.io/kubernetes/pkg/kubelet/container"
|
||||||
|
"k8s.io/kubernetes/pkg/kubelet/pod"
|
||||||
|
"k8s.io/kubernetes/pkg/kubelet/util/format"
|
||||||
|
"k8s.io/kubernetes/pkg/kubelet/volume/cache"
|
||||||
|
"k8s.io/kubernetes/pkg/kubelet/volume/populator"
|
||||||
|
"k8s.io/kubernetes/pkg/kubelet/volume/reconciler"
|
||||||
|
"k8s.io/kubernetes/pkg/util/runtime"
|
||||||
|
"k8s.io/kubernetes/pkg/util/sets"
|
||||||
|
"k8s.io/kubernetes/pkg/util/wait"
|
||||||
|
"k8s.io/kubernetes/pkg/volume"
|
||||||
|
"k8s.io/kubernetes/pkg/volume/util/operationexecutor"
|
||||||
|
"k8s.io/kubernetes/pkg/volume/util/types"
|
||||||
|
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// reconcilerLoopSleepPeriod is the amount of time the reconciler loop waits
|
||||||
|
// between successive executions
|
||||||
|
reconcilerLoopSleepPeriod time.Duration = 100 * time.Millisecond
|
||||||
|
|
||||||
|
// desiredStateOfWorldPopulatorLoopSleepPeriod is the amount of time the
|
||||||
|
// DesiredStateOfWorldPopulator loop waits between successive executions
|
||||||
|
desiredStateOfWorldPopulatorLoopSleepPeriod time.Duration = 100 * time.Millisecond
|
||||||
|
|
||||||
|
// podAttachAndMountTimeout is the maximum amount of time the
|
||||||
|
// GetVolumesForPod call will wait for all volumes in the specified pod to
|
||||||
|
// be attached and mounted. Set to 20 minutes because we've seen cloud
|
||||||
|
// operations take several minutes to complete for some volume plugins in
|
||||||
|
// some cases. While the GetVolumesForPod method is waiting it only blocks
|
||||||
|
// other operations on the same pod, other pods are not affected.
|
||||||
|
podAttachAndMountTimeout time.Duration = 20 * time.Minute
|
||||||
|
|
||||||
|
// podAttachAndMountRetryInterval is the amount of time the GetVolumesForPod
|
||||||
|
// call waits before retrying
|
||||||
|
podAttachAndMountRetryInterval time.Duration = 300 * time.Millisecond
|
||||||
|
|
||||||
|
// waitForAttachTimeout is the maximum amount of time a
|
||||||
|
// operationexecutor.Mount call will wait for a volume to be attached.
|
||||||
|
// Set to 10 minutes because we've seen attach operations take several
|
||||||
|
// minutes to complete for some volume plugins in some cases. While this
|
||||||
|
// operation is waiting it only blocks other operations on the same device,
|
||||||
|
// other devices are not affected.
|
||||||
|
waitForAttachTimeout time.Duration = 10 * time.Minute
|
||||||
|
)
|
||||||
|
|
||||||
|
// VolumeManager runs a set of asynchronous loops that figure out which volumes
|
||||||
|
// need to be attached/mounted/unmounted/detached based on the pods scheduled on
|
||||||
|
// this node and makes it so.
|
||||||
|
type VolumeManager interface {
|
||||||
|
// Starts the volume manager and all the asynchronous loops that it controls
|
||||||
|
Run(stopCh <-chan struct{})
|
||||||
|
|
||||||
|
// WaitForAttachAndMount processes the volumes referenced in the specified
|
||||||
|
// pod and blocks until they are all attached and mounted (reflected in
|
||||||
|
// actual state of the world).
|
||||||
|
// An error is returned if all volumes are not attached and mounted within
|
||||||
|
// the duration defined in podAttachAndMountTimeout.
|
||||||
|
WaitForAttachAndMount(pod *api.Pod) error
|
||||||
|
|
||||||
|
// GetMountedVolumesForPod returns a VolumeMap containing the volumes
|
||||||
|
// referenced by the specified pod that are successfully attached and
|
||||||
|
// mounted. The key in the map is the OuterVolumeSpecName (i.e.
|
||||||
|
// pod.Spec.Volumes[x].Name). It returns an empty VolumeMap if pod has no
|
||||||
|
// volumes.
|
||||||
|
GetMountedVolumesForPod(podName types.UniquePodName) container.VolumeMap
|
||||||
|
|
||||||
|
// GetVolumesForPodAndApplySupplementalGroups, like GetVolumesForPod returns
|
||||||
|
// a VolumeMap containing the volumes referenced by the specified pod that
|
||||||
|
// are successfully attached and mounted. The key in the map is the
|
||||||
|
// OuterVolumeSpecName (i.e. pod.Spec.Volumes[x].Name).
|
||||||
|
// It returns an empty VolumeMap if pod has no volumes.
|
||||||
|
// In addition for every volume that specifies a VolumeGidValue, it appends
|
||||||
|
// the SecurityContext.SupplementalGroups for the specified pod.
|
||||||
|
// XXX: https://github.com/kubernetes/kubernetes/issues/27197 mutating the
|
||||||
|
// pod object is bad, and should be avoided.
|
||||||
|
GetVolumesForPodAndAppendSupplementalGroups(pod *api.Pod) container.VolumeMap
|
||||||
|
|
||||||
|
// Returns a list of all volumes that are currently attached according to
|
||||||
|
// the actual state of the world cache and implement the volume.Attacher
|
||||||
|
// interface.
|
||||||
|
GetVolumesInUse() []api.UniqueVolumeName
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewVolumeManager returns a new concrete instance implementing the
|
||||||
|
// VolumeManager interface.
|
||||||
|
//
|
||||||
|
// kubeClient - kubeClient is the kube API client used by DesiredStateOfWorldPopulator
|
||||||
|
// to communicate with the API server to fetch PV and PVC objects
|
||||||
|
// volumePluginMgr - the volume plugin manager used to access volume plugins.
|
||||||
|
// Must be pre-initialized.
|
||||||
|
func NewVolumeManager(
|
||||||
|
controllerAttachDetachEnabled bool,
|
||||||
|
hostName string,
|
||||||
|
podManager pod.Manager,
|
||||||
|
kubeClient internalclientset.Interface,
|
||||||
|
volumePluginMgr *volume.VolumePluginMgr) (VolumeManager, error) {
|
||||||
|
vm := &volumeManager{
|
||||||
|
kubeClient: kubeClient,
|
||||||
|
volumePluginMgr: volumePluginMgr,
|
||||||
|
desiredStateOfWorld: cache.NewDesiredStateOfWorld(volumePluginMgr),
|
||||||
|
actualStateOfWorld: cache.NewActualStateOfWorld(hostName, volumePluginMgr),
|
||||||
|
operationExecutor: operationexecutor.NewOperationExecutor(volumePluginMgr),
|
||||||
|
}
|
||||||
|
|
||||||
|
vm.reconciler = reconciler.NewReconciler(
|
||||||
|
controllerAttachDetachEnabled,
|
||||||
|
reconcilerLoopSleepPeriod,
|
||||||
|
waitForAttachTimeout,
|
||||||
|
hostName,
|
||||||
|
vm.desiredStateOfWorld,
|
||||||
|
vm.actualStateOfWorld,
|
||||||
|
vm.operationExecutor)
|
||||||
|
vm.desiredStateOfWorldPopulator = populator.NewDesiredStateOfWorldPopulator(
|
||||||
|
kubeClient,
|
||||||
|
desiredStateOfWorldPopulatorLoopSleepPeriod,
|
||||||
|
podManager,
|
||||||
|
vm.desiredStateOfWorld)
|
||||||
|
|
||||||
|
return vm, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// volumeManager implements the VolumeManager interface
|
||||||
|
type volumeManager struct {
|
||||||
|
// kubeClient is the kube API client used by DesiredStateOfWorldPopulator to
|
||||||
|
// communicate with the API server to fetch PV and PVC objects
|
||||||
|
kubeClient internalclientset.Interface
|
||||||
|
|
||||||
|
// volumePluginMgr is the volume plugin manager used to access volume
|
||||||
|
// plugins. It must be pre-initialized.
|
||||||
|
volumePluginMgr *volume.VolumePluginMgr
|
||||||
|
|
||||||
|
// desiredStateOfWorld is a data structure containing the desired state of
|
||||||
|
// the world according to the volume manager: i.e. what volumes should be
|
||||||
|
// attached and which pods are referencing the volumes).
|
||||||
|
// The data structure is populated by the desired state of the world
|
||||||
|
// populator using the kubelet pod manager.
|
||||||
|
desiredStateOfWorld cache.DesiredStateOfWorld
|
||||||
|
|
||||||
|
// actualStateOfWorld is a data structure containing the actual state of
|
||||||
|
// the world according to the manager: i.e. which volumes are attached to
|
||||||
|
// this node and what pods the volumes are mounted to.
|
||||||
|
// The data structure is populated upon successful completion of attach,
|
||||||
|
// detach, mount, and unmount actions triggered by the reconciler.
|
||||||
|
actualStateOfWorld cache.ActualStateOfWorld
|
||||||
|
|
||||||
|
// operationExecutor is used to start asynchronous attach, detach, mount,
|
||||||
|
// and unmount operations.
|
||||||
|
operationExecutor operationexecutor.OperationExecutor
|
||||||
|
|
||||||
|
// reconciler runs an asynchronous periodic loop to reconcile the
|
||||||
|
// desiredStateOfWorld with the actualStateOfWorld by triggering attach,
|
||||||
|
// detach, mount, and unmount operations using the operationExecutor.
|
||||||
|
reconciler reconciler.Reconciler
|
||||||
|
|
||||||
|
// desiredStateOfWorldPopulator runs an asynchronous periodic loop to
|
||||||
|
// populate the desiredStateOfWorld using the kubelet PodManager.
|
||||||
|
desiredStateOfWorldPopulator populator.DesiredStateOfWorldPopulator
|
||||||
|
}
|
||||||
|
|
||||||
|
func (vm *volumeManager) Run(stopCh <-chan struct{}) {
|
||||||
|
defer runtime.HandleCrash()
|
||||||
|
glog.Infof("Starting Kubelet Volume Manager")
|
||||||
|
|
||||||
|
go vm.reconciler.Run(stopCh)
|
||||||
|
go vm.desiredStateOfWorldPopulator.Run(stopCh)
|
||||||
|
|
||||||
|
<-stopCh
|
||||||
|
glog.Infof("Shutting down Kubelet Volume Manager")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (vm *volumeManager) GetMountedVolumesForPod(
|
||||||
|
podName types.UniquePodName) container.VolumeMap {
|
||||||
|
return vm.getVolumesForPodHelper(podName, nil /* pod */)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (vm *volumeManager) GetVolumesForPodAndAppendSupplementalGroups(
|
||||||
|
pod *api.Pod) container.VolumeMap {
|
||||||
|
return vm.getVolumesForPodHelper("" /* podName */, pod)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (vm *volumeManager) GetVolumesInUse() []api.UniqueVolumeName {
|
||||||
|
attachedVolumes := vm.actualStateOfWorld.GetAttachedVolumes()
|
||||||
|
volumesInUse :=
|
||||||
|
make([]api.UniqueVolumeName, 0 /* len */, len(attachedVolumes) /* cap */)
|
||||||
|
for _, attachedVolume := range attachedVolumes {
|
||||||
|
if attachedVolume.PluginIsAttachable {
|
||||||
|
volumesInUse = append(volumesInUse, attachedVolume.VolumeName)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return volumesInUse
|
||||||
|
}
|
||||||
|
|
||||||
|
// getVolumesForPodHelper is a helper method implements the common logic for
|
||||||
|
// the GetVolumesForPod methods.
|
||||||
|
// XXX: https://github.com/kubernetes/kubernetes/issues/27197 mutating the pod
|
||||||
|
// object is bad, and should be avoided.
|
||||||
|
func (vm *volumeManager) getVolumesForPodHelper(
|
||||||
|
podName types.UniquePodName, pod *api.Pod) container.VolumeMap {
|
||||||
|
if pod != nil {
|
||||||
|
podName = volumehelper.GetUniquePodName(pod)
|
||||||
|
}
|
||||||
|
podVolumes := make(container.VolumeMap)
|
||||||
|
for _, mountedVolume := range vm.actualStateOfWorld.GetMountedVolumesForPod(podName) {
|
||||||
|
podVolumes[mountedVolume.OuterVolumeSpecName] =
|
||||||
|
container.VolumeInfo{Mounter: mountedVolume.Mounter}
|
||||||
|
if pod != nil {
|
||||||
|
err := applyPersistentVolumeAnnotations(
|
||||||
|
mountedVolume.VolumeGidValue, pod)
|
||||||
|
if err != nil {
|
||||||
|
glog.Errorf("applyPersistentVolumeAnnotations failed for pod %q volume %q with: %v",
|
||||||
|
podName,
|
||||||
|
mountedVolume.VolumeName,
|
||||||
|
err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return podVolumes
|
||||||
|
}
|
||||||
|
|
||||||
|
func (vm *volumeManager) WaitForAttachAndMount(pod *api.Pod) error {
|
||||||
|
expectedVolumes := getExpectedVolumes(pod)
|
||||||
|
if len(expectedVolumes) == 0 {
|
||||||
|
// No volumes to verify
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
glog.V(3).Infof("Waiting for volumes to attach and mount for pod %q", format.Pod(pod))
|
||||||
|
uniquePodName := volumehelper.GetUniquePodName(pod)
|
||||||
|
|
||||||
|
// Some pods expect to have Setup called over and over again to update.
|
||||||
|
// Remount plugins for which this is true. (Atomically updating volumes,
|
||||||
|
// like Downward API, depend on this to update the contents of the volume).
|
||||||
|
vm.desiredStateOfWorldPopulator.ReprocessPod(uniquePodName)
|
||||||
|
vm.actualStateOfWorld.MarkRemountRequired(uniquePodName)
|
||||||
|
|
||||||
|
err := wait.Poll(
|
||||||
|
podAttachAndMountRetryInterval,
|
||||||
|
podAttachAndMountTimeout,
|
||||||
|
vm.verifyVolumesMountedFunc(uniquePodName, expectedVolumes))
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
// Timeout expired
|
||||||
|
ummountedVolumes :=
|
||||||
|
vm.getUnmountedVolumes(uniquePodName, expectedVolumes)
|
||||||
|
if len(ummountedVolumes) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return fmt.Errorf(
|
||||||
|
"timeout expired waiting for volumes to attach/mount for pod %q/%q. list of unattached/unmounted volumes=%v",
|
||||||
|
pod.Name,
|
||||||
|
pod.Namespace,
|
||||||
|
ummountedVolumes)
|
||||||
|
}
|
||||||
|
|
||||||
|
glog.V(3).Infof("All volumes are attached and mounted for pod %q", format.Pod(pod))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// verifyVolumesMountedFunc returns a method that returns true when all expected
|
||||||
|
// volumes are mounted.
|
||||||
|
func (vm *volumeManager) verifyVolumesMountedFunc(
|
||||||
|
podName types.UniquePodName, expectedVolumes []string) wait.ConditionFunc {
|
||||||
|
return func() (done bool, err error) {
|
||||||
|
return len(vm.getUnmountedVolumes(podName, expectedVolumes)) == 0, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// getUnmountedVolumes fetches the current list of mounted volumes from
|
||||||
|
// the actual state of the world, and uses it to process the list of
|
||||||
|
// expectedVolumes. It returns a list of unmounted volumes.
|
||||||
|
func (vm *volumeManager) getUnmountedVolumes(
|
||||||
|
podName types.UniquePodName, expectedVolumes []string) []string {
|
||||||
|
mountedVolumes := sets.NewString()
|
||||||
|
for _, mountedVolume := range vm.actualStateOfWorld.GetMountedVolumesForPod(podName) {
|
||||||
|
mountedVolumes.Insert(mountedVolume.OuterVolumeSpecName)
|
||||||
|
}
|
||||||
|
return filterUnmountedVolumes(mountedVolumes, expectedVolumes)
|
||||||
|
}
|
||||||
|
|
||||||
|
// filterUnmountedVolumes adds each element of expectedVolumes that is not in
|
||||||
|
// mountedVolumes to a list of unmountedVolumes and returns it.
|
||||||
|
func filterUnmountedVolumes(
|
||||||
|
mountedVolumes sets.String, expectedVolumes []string) []string {
|
||||||
|
unmountedVolumes := []string{}
|
||||||
|
for _, expectedVolume := range expectedVolumes {
|
||||||
|
if !mountedVolumes.Has(expectedVolume) {
|
||||||
|
unmountedVolumes = append(unmountedVolumes, expectedVolume)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return unmountedVolumes
|
||||||
|
}
|
||||||
|
|
||||||
|
// getExpectedVolumes returns a list of volumes that must be mounted in order to
|
||||||
|
// consider the volume setup step for this pod satisfied.
|
||||||
|
func getExpectedVolumes(pod *api.Pod) []string {
|
||||||
|
expectedVolumes := []string{}
|
||||||
|
if pod == nil {
|
||||||
|
return expectedVolumes
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, podVolume := range pod.Spec.Volumes {
|
||||||
|
expectedVolumes = append(expectedVolumes, podVolume.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
return expectedVolumes
|
||||||
|
}
|
||||||
|
|
||||||
|
// applyPersistentVolumeAnnotations appends a pod
|
||||||
|
// SecurityContext.SupplementalGroups if a GID annotation is provided.
|
||||||
|
// XXX: https://github.com/kubernetes/kubernetes/issues/27197 mutating the pod
|
||||||
|
// object is bad, and should be avoided.
|
||||||
|
func applyPersistentVolumeAnnotations(
|
||||||
|
volumeGidValue string, pod *api.Pod) error {
|
||||||
|
if volumeGidValue != "" {
|
||||||
|
gid, err := strconv.ParseInt(volumeGidValue, 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf(
|
||||||
|
"Invalid value for %s %v",
|
||||||
|
volumehelper.VolumeGidAnnotationKey,
|
||||||
|
err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if pod.Spec.SecurityContext == nil {
|
||||||
|
pod.Spec.SecurityContext = &api.PodSecurityContext{}
|
||||||
|
}
|
||||||
|
for _, existingGid := range pod.Spec.SecurityContext.SupplementalGroups {
|
||||||
|
if gid == existingGid {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
pod.Spec.SecurityContext.SupplementalGroups =
|
||||||
|
append(pod.Spec.SecurityContext.SupplementalGroups, gid)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
135
pkg/kubelet/volume_host.go
Normal file
135
pkg/kubelet/volume_host.go
Normal file
@@ -0,0 +1,135 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2016 The Kubernetes Authors All rights reserved.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package kubelet
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"net"
|
||||||
|
|
||||||
|
"k8s.io/kubernetes/pkg/api"
|
||||||
|
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||||
|
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||||
|
"k8s.io/kubernetes/pkg/types"
|
||||||
|
"k8s.io/kubernetes/pkg/util/io"
|
||||||
|
"k8s.io/kubernetes/pkg/util/mount"
|
||||||
|
"k8s.io/kubernetes/pkg/volume"
|
||||||
|
)
|
||||||
|
|
||||||
|
// NewInitializedVolumePluginMgr returns a new instance of
|
||||||
|
// volume.VolumePluginMgr initialized with kubelets implementation of the
|
||||||
|
// volume.VolumeHost interface.
|
||||||
|
//
|
||||||
|
// kubelet - used by VolumeHost methods to expose kubelet specific parameters
|
||||||
|
// plugins - used to initialize volumePluginMgr
|
||||||
|
func NewInitializedVolumePluginMgr(
|
||||||
|
kubelet *Kubelet,
|
||||||
|
plugins []volume.VolumePlugin) (*volume.VolumePluginMgr, error) {
|
||||||
|
kvh := &kubeletVolumeHost{
|
||||||
|
kubelet: kubelet,
|
||||||
|
volumePluginMgr: volume.VolumePluginMgr{},
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := kvh.volumePluginMgr.InitPlugins(plugins, kvh); err != nil {
|
||||||
|
return nil, fmt.Errorf(
|
||||||
|
"Could not initialize volume plugins for KubeletVolumePluginMgr: %v",
|
||||||
|
err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &kvh.volumePluginMgr, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Compile-time check to ensure kubeletVolumeHost implements the VolumeHost interface
|
||||||
|
var _ volume.VolumeHost = &kubeletVolumeHost{}
|
||||||
|
|
||||||
|
func (kvh *kubeletVolumeHost) GetPluginDir(pluginName string) string {
|
||||||
|
return kvh.kubelet.getPluginDir(pluginName)
|
||||||
|
}
|
||||||
|
|
||||||
|
type kubeletVolumeHost struct {
|
||||||
|
kubelet *Kubelet
|
||||||
|
volumePluginMgr volume.VolumePluginMgr
|
||||||
|
}
|
||||||
|
|
||||||
|
func (kvh *kubeletVolumeHost) GetPodVolumeDir(podUID types.UID, pluginName string, volumeName string) string {
|
||||||
|
return kvh.kubelet.getPodVolumeDir(podUID, pluginName, volumeName)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (kvh *kubeletVolumeHost) GetPodPluginDir(podUID types.UID, pluginName string) string {
|
||||||
|
return kvh.kubelet.getPodPluginDir(podUID, pluginName)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (kvh *kubeletVolumeHost) GetKubeClient() internalclientset.Interface {
|
||||||
|
return kvh.kubelet.kubeClient
|
||||||
|
}
|
||||||
|
|
||||||
|
func (kvh *kubeletVolumeHost) NewWrapperMounter(
|
||||||
|
volName string,
|
||||||
|
spec volume.Spec,
|
||||||
|
pod *api.Pod,
|
||||||
|
opts volume.VolumeOptions) (volume.Mounter, error) {
|
||||||
|
// The name of wrapper volume is set to "wrapped_{wrapped_volume_name}"
|
||||||
|
wrapperVolumeName := "wrapped_" + volName
|
||||||
|
if spec.Volume != nil {
|
||||||
|
spec.Volume.Name = wrapperVolumeName
|
||||||
|
}
|
||||||
|
|
||||||
|
return kvh.kubelet.newVolumeMounterFromPlugins(&spec, pod, opts)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (kvh *kubeletVolumeHost) NewWrapperUnmounter(volName string, spec volume.Spec, podUID types.UID) (volume.Unmounter, error) {
|
||||||
|
// The name of wrapper volume is set to "wrapped_{wrapped_volume_name}"
|
||||||
|
wrapperVolumeName := "wrapped_" + volName
|
||||||
|
if spec.Volume != nil {
|
||||||
|
spec.Volume.Name = wrapperVolumeName
|
||||||
|
}
|
||||||
|
|
||||||
|
plugin, err := kvh.kubelet.volumePluginMgr.FindPluginBySpec(&spec)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return plugin.NewUnmounter(spec.Name(), podUID)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (kvh *kubeletVolumeHost) GetCloudProvider() cloudprovider.Interface {
|
||||||
|
return kvh.kubelet.cloud
|
||||||
|
}
|
||||||
|
|
||||||
|
func (kvh *kubeletVolumeHost) GetMounter() mount.Interface {
|
||||||
|
return kvh.kubelet.mounter
|
||||||
|
}
|
||||||
|
|
||||||
|
func (kvh *kubeletVolumeHost) GetWriter() io.Writer {
|
||||||
|
return kvh.kubelet.writer
|
||||||
|
}
|
||||||
|
|
||||||
|
func (kvh *kubeletVolumeHost) GetHostName() string {
|
||||||
|
return kvh.kubelet.hostname
|
||||||
|
}
|
||||||
|
|
||||||
|
func (kvh *kubeletVolumeHost) GetHostIP() (net.IP, error) {
|
||||||
|
return kvh.kubelet.GetHostIP()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (kvh *kubeletVolumeHost) GetRootContext() string {
|
||||||
|
rootContext, err := kvh.kubelet.getRootDirContext()
|
||||||
|
if err != nil {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
return rootContext
|
||||||
|
}
|
||||||
@@ -1,103 +0,0 @@
|
|||||||
/*
|
|
||||||
Copyright 2015 The Kubernetes Authors All rights reserved.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package kubelet
|
|
||||||
|
|
||||||
import (
|
|
||||||
"sync"
|
|
||||||
|
|
||||||
"k8s.io/kubernetes/pkg/api"
|
|
||||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
|
||||||
"k8s.io/kubernetes/pkg/types"
|
|
||||||
)
|
|
||||||
|
|
||||||
// volumeManager manages the volumes for the pods running on the kubelet.
|
|
||||||
// Currently it only does book keeping, but it can be expanded to
|
|
||||||
// take care of the volumePlugins.
|
|
||||||
// TODO(saad-ali): note that volumeManager will be completley refactored as part
|
|
||||||
// of mount/unmount refactor.
|
|
||||||
type volumeManager struct {
|
|
||||||
lock sync.RWMutex
|
|
||||||
volumeMaps map[types.UID]kubecontainer.VolumeMap
|
|
||||||
volumesInUse []api.UniqueVolumeName
|
|
||||||
}
|
|
||||||
|
|
||||||
func newVolumeManager() *volumeManager {
|
|
||||||
vm := &volumeManager{
|
|
||||||
volumeMaps: make(map[types.UID]kubecontainer.VolumeMap),
|
|
||||||
volumesInUse: []api.UniqueVolumeName{},
|
|
||||||
}
|
|
||||||
return vm
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetVolumes sets the volume map for a pod.
|
|
||||||
// TODO(yifan): Currently we assume the volume is already mounted, so we only do a book keeping here.
|
|
||||||
func (vm *volumeManager) SetVolumes(podUID types.UID, podVolumes kubecontainer.VolumeMap) {
|
|
||||||
vm.lock.Lock()
|
|
||||||
defer vm.lock.Unlock()
|
|
||||||
vm.volumeMaps[podUID] = podVolumes
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetVolumes returns the volume map which are already mounted on the host machine
|
|
||||||
// for a pod.
|
|
||||||
func (vm *volumeManager) GetVolumes(podUID types.UID) (kubecontainer.VolumeMap, bool) {
|
|
||||||
vm.lock.RLock()
|
|
||||||
defer vm.lock.RUnlock()
|
|
||||||
vol, ok := vm.volumeMaps[podUID]
|
|
||||||
return vol, ok
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeleteVolumes removes the reference to a volume map for a pod.
|
|
||||||
func (vm *volumeManager) DeleteVolumes(podUID types.UID) {
|
|
||||||
vm.lock.Lock()
|
|
||||||
defer vm.lock.Unlock()
|
|
||||||
delete(vm.volumeMaps, podUID)
|
|
||||||
}
|
|
||||||
|
|
||||||
// AddVolumeInUse adds specified volume to volumesInUse list, if it doesn't
|
|
||||||
// already exist
|
|
||||||
func (vm *volumeManager) AddVolumeInUse(uniqueDeviceName api.UniqueVolumeName) {
|
|
||||||
vm.lock.Lock()
|
|
||||||
defer vm.lock.Unlock()
|
|
||||||
for _, volume := range vm.volumesInUse {
|
|
||||||
if volume == uniqueDeviceName {
|
|
||||||
// Volume already exists in list
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
vm.volumesInUse = append(vm.volumesInUse, uniqueDeviceName)
|
|
||||||
}
|
|
||||||
|
|
||||||
// RemoveVolumeInUse removes the specified volume from volumesInUse list, if it
|
|
||||||
// exists
|
|
||||||
func (vm *volumeManager) RemoveVolumeInUse(uniqueDeviceName api.UniqueVolumeName) {
|
|
||||||
vm.lock.Lock()
|
|
||||||
defer vm.lock.Unlock()
|
|
||||||
for i := len(vm.volumesInUse) - 1; i >= 0; i-- {
|
|
||||||
if vm.volumesInUse[i] == uniqueDeviceName {
|
|
||||||
// Volume exists, remove it
|
|
||||||
vm.volumesInUse = append(vm.volumesInUse[:i], vm.volumesInUse[i+1:]...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetVolumesInUse returns the volumesInUse list
|
|
||||||
func (vm *volumeManager) GetVolumesInUse() []api.UniqueVolumeName {
|
|
||||||
vm.lock.RLock()
|
|
||||||
defer vm.lock.RUnlock()
|
|
||||||
return vm.volumesInUse
|
|
||||||
}
|
|
||||||
@@ -1,449 +0,0 @@
|
|||||||
/*
|
|
||||||
Copyright 2014 The Kubernetes Authors All rights reserved.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package kubelet
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
|
||||||
"path"
|
|
||||||
"strconv"
|
|
||||||
|
|
||||||
"github.com/golang/glog"
|
|
||||||
"k8s.io/kubernetes/pkg/api"
|
|
||||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
|
||||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
|
||||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
|
||||||
"k8s.io/kubernetes/pkg/types"
|
|
||||||
"k8s.io/kubernetes/pkg/util"
|
|
||||||
"k8s.io/kubernetes/pkg/util/io"
|
|
||||||
"k8s.io/kubernetes/pkg/util/mount"
|
|
||||||
"k8s.io/kubernetes/pkg/util/strings"
|
|
||||||
"k8s.io/kubernetes/pkg/volume"
|
|
||||||
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
volumeGidAnnotationKey = "pv.beta.kubernetes.io/gid"
|
|
||||||
)
|
|
||||||
|
|
||||||
// This just exports required functions from kubelet proper, for use by volume
|
|
||||||
// plugins.
|
|
||||||
type volumeHost struct {
|
|
||||||
kubelet *Kubelet
|
|
||||||
}
|
|
||||||
|
|
||||||
func (vh *volumeHost) GetPluginDir(pluginName string) string {
|
|
||||||
return vh.kubelet.getPluginDir(pluginName)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (vh *volumeHost) GetPodVolumeDir(podUID types.UID, pluginName string, volumeName string) string {
|
|
||||||
return vh.kubelet.getPodVolumeDir(podUID, pluginName, volumeName)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (vh *volumeHost) GetPodPluginDir(podUID types.UID, pluginName string) string {
|
|
||||||
return vh.kubelet.getPodPluginDir(podUID, pluginName)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (vh *volumeHost) GetKubeClient() clientset.Interface {
|
|
||||||
return vh.kubelet.kubeClient
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewWrapperMounter attempts to create a volume mounter
|
|
||||||
// from a volume Spec, pod and volume options.
|
|
||||||
// Returns a new volume Mounter or an error.
|
|
||||||
func (vh *volumeHost) NewWrapperMounter(volName string, spec volume.Spec, pod *api.Pod, opts volume.VolumeOptions) (volume.Mounter, error) {
|
|
||||||
// The name of wrapper volume is set to "wrapped_{wrapped_volume_name}"
|
|
||||||
wrapperVolumeName := "wrapped_" + volName
|
|
||||||
if spec.Volume != nil {
|
|
||||||
spec.Volume.Name = wrapperVolumeName
|
|
||||||
}
|
|
||||||
|
|
||||||
return vh.kubelet.newVolumeMounterFromPlugins(&spec, pod, opts)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewWrapperUnmounter attempts to create a volume unmounter
|
|
||||||
// from a volume name and pod uid.
|
|
||||||
// Returns a new volume Unmounter or an error.
|
|
||||||
func (vh *volumeHost) NewWrapperUnmounter(volName string, spec volume.Spec, podUID types.UID) (volume.Unmounter, error) {
|
|
||||||
// The name of wrapper volume is set to "wrapped_{wrapped_volume_name}"
|
|
||||||
wrapperVolumeName := "wrapped_" + volName
|
|
||||||
if spec.Volume != nil {
|
|
||||||
spec.Volume.Name = wrapperVolumeName
|
|
||||||
}
|
|
||||||
|
|
||||||
plugin, err := vh.kubelet.volumePluginMgr.FindPluginBySpec(&spec)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return plugin.NewUnmounter(spec.Name(), podUID)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (vh *volumeHost) GetCloudProvider() cloudprovider.Interface {
|
|
||||||
return vh.kubelet.cloud
|
|
||||||
}
|
|
||||||
|
|
||||||
func (vh *volumeHost) GetMounter() mount.Interface {
|
|
||||||
return vh.kubelet.mounter
|
|
||||||
}
|
|
||||||
|
|
||||||
func (vh *volumeHost) GetWriter() io.Writer {
|
|
||||||
return vh.kubelet.writer
|
|
||||||
}
|
|
||||||
|
|
||||||
// Returns the hostname of the host kubelet is running on
|
|
||||||
func (vh *volumeHost) GetHostName() string {
|
|
||||||
return vh.kubelet.hostname
|
|
||||||
}
|
|
||||||
|
|
||||||
// mountExternalVolumes mounts the volumes declared in a pod, attaching them
|
|
||||||
// to the host if necessary, and returns a map containing information about
|
|
||||||
// the volumes for the pod or an error. This method is run multiple times,
|
|
||||||
// and requires that implementations of Attach() and SetUp() be idempotent.
|
|
||||||
//
|
|
||||||
// Note, in the future, the attach-detach controller will handle attaching and
|
|
||||||
// detaching volumes; this call site will be maintained for backward-
|
|
||||||
// compatibility with current behavior of static pods and pods created via the
|
|
||||||
// Kubelet's http API.
|
|
||||||
func (kl *Kubelet) mountExternalVolumes(pod *api.Pod) (kubecontainer.VolumeMap, error) {
|
|
||||||
podVolumes := make(kubecontainer.VolumeMap)
|
|
||||||
for i := range pod.Spec.Volumes {
|
|
||||||
var fsGroup *int64
|
|
||||||
if pod.Spec.SecurityContext != nil && pod.Spec.SecurityContext.FSGroup != nil {
|
|
||||||
fsGroup = pod.Spec.SecurityContext.FSGroup
|
|
||||||
}
|
|
||||||
|
|
||||||
rootContext, err := kl.getRootDirContext()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
var volSpec *volume.Spec
|
|
||||||
if pod.Spec.Volumes[i].VolumeSource.PersistentVolumeClaim != nil {
|
|
||||||
claimName := pod.Spec.Volumes[i].PersistentVolumeClaim.ClaimName
|
|
||||||
pv, err := kl.getPersistentVolumeByClaimName(claimName, pod.Namespace)
|
|
||||||
if err != nil {
|
|
||||||
glog.Errorf("Could not find persistentVolume for claim %s err %v", claimName, err)
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
kl.applyPersistentVolumeAnnotations(pv, pod)
|
|
||||||
volSpec = volume.NewSpecFromPersistentVolume(pv, pod.Spec.Volumes[i].PersistentVolumeClaim.ReadOnly)
|
|
||||||
} else {
|
|
||||||
volSpec = volume.NewSpecFromVolume(&pod.Spec.Volumes[i])
|
|
||||||
}
|
|
||||||
// Try to use a plugin for this volume.
|
|
||||||
mounter, err := kl.newVolumeMounterFromPlugins(volSpec, pod, volume.VolumeOptions{RootContext: rootContext})
|
|
||||||
if err != nil {
|
|
||||||
glog.Errorf("Could not create volume mounter for pod %s: %v", pod.UID, err)
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// some volumes require attachment before mounter's setup.
|
|
||||||
// The plugin can be nil, but non-nil errors are legitimate errors.
|
|
||||||
// For non-nil plugins, Attachment to a node is required before Mounter's setup.
|
|
||||||
attacher, attachablePlugin, err := kl.newVolumeAttacherFromPlugins(volSpec, pod)
|
|
||||||
if err != nil {
|
|
||||||
glog.Errorf("Could not create volume attacher for pod %s: %v", pod.UID, err)
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if attacher != nil {
|
|
||||||
// If the device path is already mounted, avoid an expensive call to the
|
|
||||||
// cloud provider.
|
|
||||||
deviceMountPath := attacher.GetDeviceMountPath(volSpec)
|
|
||||||
notMountPoint, err := kl.mounter.IsLikelyNotMountPoint(deviceMountPath)
|
|
||||||
if err != nil && !os.IsNotExist(err) {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if notMountPoint {
|
|
||||||
if !kl.enableControllerAttachDetach {
|
|
||||||
err = attacher.Attach(volSpec, kl.hostname)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
devicePath, err := attacher.WaitForAttach(volSpec, maxWaitForVolumeOps)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if kl.enableControllerAttachDetach {
|
|
||||||
// Attach/Detach controller is enabled and this volume type
|
|
||||||
// implements an attacher
|
|
||||||
uniqueDeviceName, err := volumehelper.GetUniqueVolumeNameFromSpec(
|
|
||||||
attachablePlugin, volSpec)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
kl.volumeManager.AddVolumeInUse(
|
|
||||||
api.UniqueVolumeName(uniqueDeviceName))
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = attacher.MountDevice(volSpec, devicePath, deviceMountPath, kl.mounter); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
err = mounter.SetUp(fsGroup)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
podVolumes[pod.Spec.Volumes[i].Name] = kubecontainer.VolumeInfo{Mounter: mounter}
|
|
||||||
}
|
|
||||||
return podVolumes, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type volumeTuple struct {
|
|
||||||
Kind string
|
|
||||||
Name string
|
|
||||||
}
|
|
||||||
|
|
||||||
// ListVolumesForPod returns a map of the volumes associated with the given pod
|
|
||||||
func (kl *Kubelet) ListVolumesForPod(podUID types.UID) (map[string]volume.Volume, bool) {
|
|
||||||
result := map[string]volume.Volume{}
|
|
||||||
vm, ok := kl.volumeManager.GetVolumes(podUID)
|
|
||||||
if !ok {
|
|
||||||
return result, false
|
|
||||||
}
|
|
||||||
for name, info := range vm {
|
|
||||||
result[name] = info.Mounter
|
|
||||||
}
|
|
||||||
return result, true
|
|
||||||
}
|
|
||||||
|
|
||||||
// getPodVolumes examines the directory structure for a pod and returns
|
|
||||||
// information about the name and kind of each presently mounted volume, or an
|
|
||||||
// error.
|
|
||||||
func (kl *Kubelet) getPodVolumes(podUID types.UID) ([]*volumeTuple, error) {
|
|
||||||
var volumes []*volumeTuple
|
|
||||||
podVolDir := kl.getPodVolumesDir(podUID)
|
|
||||||
volumeKindDirs, err := ioutil.ReadDir(podVolDir)
|
|
||||||
if err != nil {
|
|
||||||
glog.Errorf("Could not read directory %s: %v", podVolDir, err)
|
|
||||||
}
|
|
||||||
for _, volumeKindDir := range volumeKindDirs {
|
|
||||||
volumeKind := volumeKindDir.Name()
|
|
||||||
volumeKindPath := path.Join(podVolDir, volumeKind)
|
|
||||||
// ioutil.ReadDir exits without returning any healthy dir when encountering the first lstat error
|
|
||||||
// but skipping dirs means no cleanup for healthy volumes. switching to a no-exit api solves this problem
|
|
||||||
volumeNameDirs, volumeNameDirsStat, err := util.ReadDirNoExit(volumeKindPath)
|
|
||||||
if err != nil {
|
|
||||||
return []*volumeTuple{}, fmt.Errorf("could not read directory %s: %v", volumeKindPath, err)
|
|
||||||
}
|
|
||||||
for i, volumeNameDir := range volumeNameDirs {
|
|
||||||
if volumeNameDir != nil {
|
|
||||||
volumes = append(volumes, &volumeTuple{Kind: volumeKind, Name: volumeNameDir.Name()})
|
|
||||||
} else {
|
|
||||||
glog.Errorf("Could not read directory %s: %v", podVolDir, volumeNameDirsStat[i])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return volumes, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// cleaner is a union struct to allow separating detaching from the cleaner.
|
|
||||||
// some volumes require detachment but not all. Unmounter cannot be nil but Detacher is optional.
|
|
||||||
type cleaner struct {
|
|
||||||
PluginName string
|
|
||||||
Unmounter volume.Unmounter
|
|
||||||
Detacher *volume.Detacher
|
|
||||||
}
|
|
||||||
|
|
||||||
// getPodVolumesFromDisk examines directory structure to determine volumes that
|
|
||||||
// are presently active and mounted. Returns a union struct containing a volume.Unmounter
|
|
||||||
// and potentially a volume.Detacher.
|
|
||||||
func (kl *Kubelet) getPodVolumesFromDisk() map[string]cleaner {
|
|
||||||
currentVolumes := make(map[string]cleaner)
|
|
||||||
podUIDs, err := kl.listPodsFromDisk()
|
|
||||||
if err != nil {
|
|
||||||
glog.Errorf("Could not get pods from disk: %v", err)
|
|
||||||
return map[string]cleaner{}
|
|
||||||
}
|
|
||||||
// Find the volumes for each on-disk pod.
|
|
||||||
for _, podUID := range podUIDs {
|
|
||||||
volumes, err := kl.getPodVolumes(podUID)
|
|
||||||
if err != nil {
|
|
||||||
glog.Errorf("%v", err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
for _, volume := range volumes {
|
|
||||||
identifier := fmt.Sprintf("%s/%s", podUID, volume.Name)
|
|
||||||
glog.V(5).Infof("Making a volume.Unmounter for volume %s/%s of pod %s", volume.Kind, volume.Name, podUID)
|
|
||||||
// TODO(thockin) This should instead return a reference to an extant
|
|
||||||
// volume object, except that we don't actually hold on to pod specs
|
|
||||||
// or volume objects.
|
|
||||||
|
|
||||||
// Try to use a plugin for this volume.
|
|
||||||
unmounter, pluginName, err := kl.newVolumeUnmounterFromPlugins(volume.Kind, volume.Name, podUID)
|
|
||||||
if err != nil {
|
|
||||||
glog.Errorf("Could not create volume unmounter for %s: %v", volume.Name, err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
tuple := cleaner{PluginName: pluginName, Unmounter: unmounter}
|
|
||||||
detacher, err := kl.newVolumeDetacherFromPlugins(volume.Kind, volume.Name, podUID)
|
|
||||||
// plugin can be nil but a non-nil error is a legitimate error
|
|
||||||
if err != nil {
|
|
||||||
glog.Errorf("Could not create volume detacher for %s: %v", volume.Name, err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if detacher != nil {
|
|
||||||
tuple.Detacher = &detacher
|
|
||||||
}
|
|
||||||
currentVolumes[identifier] = tuple
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return currentVolumes
|
|
||||||
}
|
|
||||||
|
|
||||||
func (kl *Kubelet) getPersistentVolumeByClaimName(claimName string, namespace string) (*api.PersistentVolume, error) {
|
|
||||||
claim, err := kl.kubeClient.Core().PersistentVolumeClaims(namespace).Get(claimName)
|
|
||||||
if err != nil {
|
|
||||||
glog.Errorf("Error finding claim: %+v\n", claimName)
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
glog.V(5).Infof("Found claim %v ", claim)
|
|
||||||
|
|
||||||
if claim.Spec.VolumeName == "" {
|
|
||||||
return nil, fmt.Errorf("The claim %+v is not yet bound to a volume", claimName)
|
|
||||||
}
|
|
||||||
|
|
||||||
pv, err := kl.kubeClient.Core().PersistentVolumes().Get(claim.Spec.VolumeName)
|
|
||||||
if err != nil {
|
|
||||||
glog.Errorf("Error finding persistent volume for claim: %+v\n", claimName)
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if pv.Spec.ClaimRef == nil {
|
|
||||||
return nil, fmt.Errorf("The volume is not yet bound to the claim. Expected to find the bind on volume.Spec.ClaimRef: %+v", pv)
|
|
||||||
}
|
|
||||||
|
|
||||||
if pv.Spec.ClaimRef.UID != claim.UID {
|
|
||||||
return nil, fmt.Errorf("Expected volume.Spec.ClaimRef.UID %+v but have %+v", pv.Spec.ClaimRef.UID, claim.UID)
|
|
||||||
}
|
|
||||||
|
|
||||||
return pv, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (kl *Kubelet) applyPersistentVolumeAnnotations(pv *api.PersistentVolume, pod *api.Pod) error {
|
|
||||||
// If a GID annotation is provided set the GID attribute.
|
|
||||||
if volumeGid, ok := pv.Annotations[volumeGidAnnotationKey]; ok {
|
|
||||||
gid, err := strconv.ParseInt(volumeGid, 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("Invalid value for %s %v", volumeGidAnnotationKey, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if pod.Spec.SecurityContext == nil {
|
|
||||||
pod.Spec.SecurityContext = &api.PodSecurityContext{}
|
|
||||||
}
|
|
||||||
for _, existingGid := range pod.Spec.SecurityContext.SupplementalGroups {
|
|
||||||
if gid == existingGid {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
pod.Spec.SecurityContext.SupplementalGroups = append(pod.Spec.SecurityContext.SupplementalGroups, gid)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// newVolumeMounterFromPlugins attempts to find a plugin by volume spec, pod
|
|
||||||
// and volume options and then creates a Mounter.
|
|
||||||
// Returns a valid Unmounter or an error.
|
|
||||||
func (kl *Kubelet) newVolumeMounterFromPlugins(spec *volume.Spec, pod *api.Pod, opts volume.VolumeOptions) (volume.Mounter, error) {
|
|
||||||
plugin, err := kl.volumePluginMgr.FindPluginBySpec(spec)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("can't use volume plugins for %s: %v", spec.Name(), err)
|
|
||||||
}
|
|
||||||
physicalMounter, err := plugin.NewMounter(spec, pod, opts)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to instantiate mounter for volume: %s using plugin: %s with a root cause: %v", spec.Name(), plugin.GetPluginName(), err)
|
|
||||||
}
|
|
||||||
glog.V(10).Infof("Using volume plugin %q to mount %s", plugin.GetPluginName(), spec.Name())
|
|
||||||
return physicalMounter, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// newVolumeAttacherFromPlugins attempts to find a plugin from a volume spec
|
|
||||||
// and then create an Attacher.
|
|
||||||
// Returns:
|
|
||||||
// - an attacher if one exists, nil otherwise
|
|
||||||
// - the AttachableVolumePlugin if attacher exists, nil otherewise
|
|
||||||
// - an error if no plugin was found for the volume
|
|
||||||
// or the attacher failed to instantiate, nil otherwise
|
|
||||||
func (kl *Kubelet) newVolumeAttacherFromPlugins(spec *volume.Spec, pod *api.Pod) (volume.Attacher, volume.AttachableVolumePlugin, error) {
|
|
||||||
plugin, err := kl.volumePluginMgr.FindAttachablePluginBySpec(spec)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, fmt.Errorf("can't use volume plugins for %s: %v", spec.Name(), err)
|
|
||||||
}
|
|
||||||
if plugin == nil {
|
|
||||||
// Not found but not an error.
|
|
||||||
return nil, nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
attacher, err := plugin.NewAttacher()
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, fmt.Errorf("failed to instantiate volume attacher for %s: %v", spec.Name(), err)
|
|
||||||
}
|
|
||||||
glog.V(3).Infof("Using volume plugin %q to attach %s/%s", plugin.GetPluginName(), spec.Name())
|
|
||||||
return attacher, plugin, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// newVolumeUnmounterFromPlugins attempts to find a plugin by name and then
|
|
||||||
// create an Unmounter.
|
|
||||||
// Returns a valid Unmounter or an error.
|
|
||||||
func (kl *Kubelet) newVolumeUnmounterFromPlugins(kind string, name string, podUID types.UID) (volume.Unmounter, string, error) {
|
|
||||||
plugName := strings.UnescapeQualifiedNameForDisk(kind)
|
|
||||||
plugin, err := kl.volumePluginMgr.FindPluginByName(plugName)
|
|
||||||
if err != nil {
|
|
||||||
// TODO: Maybe we should launch a cleanup of this dir?
|
|
||||||
return nil, "", fmt.Errorf("can't use volume plugins for %s/%s: %v", podUID, kind, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
unmounter, err := plugin.NewUnmounter(name, podUID)
|
|
||||||
if err != nil {
|
|
||||||
return nil, "", fmt.Errorf("failed to instantiate volume plugin for %s/%s: %v", podUID, kind, err)
|
|
||||||
}
|
|
||||||
glog.V(5).Infof("Using volume plugin %q to unmount %s/%s", plugin.GetPluginName(), podUID, kind)
|
|
||||||
return unmounter, plugin.GetPluginName(), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// newVolumeDetacherFromPlugins attempts to find a plugin by a name and then
|
|
||||||
// create a Detacher.
|
|
||||||
// Returns:
|
|
||||||
// - a detacher if one exists
|
|
||||||
// - an error if no plugin was found for the volume
|
|
||||||
// or the detacher failed to instantiate
|
|
||||||
// - nil if there is no appropriate detacher for this volume
|
|
||||||
func (kl *Kubelet) newVolumeDetacherFromPlugins(kind string, name string, podUID types.UID) (volume.Detacher, error) {
|
|
||||||
plugName := strings.UnescapeQualifiedNameForDisk(kind)
|
|
||||||
plugin, err := kl.volumePluginMgr.FindAttachablePluginByName(plugName)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("can't use volume plugins for %s/%s: %v", podUID, kind, err)
|
|
||||||
}
|
|
||||||
if plugin == nil {
|
|
||||||
// Not found but not an error.
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
detacher, err := plugin.NewDetacher()
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to instantiate volume plugin for %s/%s: %v", podUID, kind, err)
|
|
||||||
}
|
|
||||||
return detacher, nil
|
|
||||||
}
|
|
||||||
@@ -16,10 +16,6 @@ limitations under the License.
|
|||||||
|
|
||||||
package types
|
package types
|
||||||
|
|
||||||
// UniquePodName is an identifier that can be used to uniquely identify a pod
|
|
||||||
// within the cluster.
|
|
||||||
type UniquePodName string
|
|
||||||
|
|
||||||
// NamespacedName comprises a resource name, with a mandatory namespace,
|
// NamespacedName comprises a resource name, with a mandatory namespace,
|
||||||
// rendered as "<namespace>/<name>". Being a type captures intent and
|
// rendered as "<namespace>/<name>". Being a type captures intent and
|
||||||
// helps make sure that UIDs, namespaced names and non-namespaced names
|
// helps make sure that UIDs, namespaced names and non-namespaced names
|
||||||
@@ -37,8 +33,3 @@ type NamespacedName struct {
|
|||||||
func (n NamespacedName) String() string {
|
func (n NamespacedName) String() string {
|
||||||
return n.Namespace + "/" + n.Name
|
return n.Namespace + "/" + n.Name
|
||||||
}
|
}
|
||||||
|
|
||||||
// UniquePodName returns the UniquePodName object representation
|
|
||||||
func (n NamespacedName) UniquePodName() UniquePodName {
|
|
||||||
return UniquePodName(n.String())
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -24,7 +24,6 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/golang/glog"
|
"github.com/golang/glog"
|
||||||
"k8s.io/kubernetes/pkg/api"
|
|
||||||
"k8s.io/kubernetes/pkg/util/exec"
|
"k8s.io/kubernetes/pkg/util/exec"
|
||||||
"k8s.io/kubernetes/pkg/util/mount"
|
"k8s.io/kubernetes/pkg/util/mount"
|
||||||
"k8s.io/kubernetes/pkg/volume"
|
"k8s.io/kubernetes/pkg/volume"
|
||||||
@@ -42,17 +41,12 @@ func (plugin *awsElasticBlockStorePlugin) NewAttacher() (volume.Attacher, error)
|
|||||||
return &awsElasticBlockStoreAttacher{host: plugin.host}, nil
|
return &awsElasticBlockStoreAttacher{host: plugin.host}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (plugin *awsElasticBlockStorePlugin) GetDeviceName(spec *volume.Spec) (string, error) {
|
func (attacher *awsElasticBlockStoreAttacher) Attach(spec *volume.Spec, hostName string) error {
|
||||||
volumeSource, _ := getVolumeSource(spec)
|
volumeSource, readOnly, err := getVolumeSource(spec)
|
||||||
if volumeSource == nil {
|
if err != nil {
|
||||||
return "", fmt.Errorf("Spec does not reference an EBS volume type")
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
return volumeSource.VolumeID, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (attacher *awsElasticBlockStoreAttacher) Attach(spec *volume.Spec, hostName string) error {
|
|
||||||
volumeSource, readOnly := getVolumeSource(spec)
|
|
||||||
volumeID := volumeSource.VolumeID
|
volumeID := volumeSource.VolumeID
|
||||||
|
|
||||||
awsCloud, err := getCloudProvider(attacher.host.GetCloudProvider())
|
awsCloud, err := getCloudProvider(attacher.host.GetCloudProvider())
|
||||||
@@ -86,7 +80,12 @@ func (attacher *awsElasticBlockStoreAttacher) WaitForAttach(spec *volume.Spec, t
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
volumeSource, _ := getVolumeSource(spec)
|
|
||||||
|
volumeSource, _, err := getVolumeSource(spec)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
volumeID := volumeSource.VolumeID
|
volumeID := volumeSource.VolumeID
|
||||||
partition := ""
|
partition := ""
|
||||||
if volumeSource.Partition != 0 {
|
if volumeSource.Partition != 0 {
|
||||||
@@ -136,13 +135,19 @@ func (attacher *awsElasticBlockStoreAttacher) WaitForAttach(spec *volume.Spec, t
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (attacher *awsElasticBlockStoreAttacher) GetDeviceMountPath(spec *volume.Spec) string {
|
func (attacher *awsElasticBlockStoreAttacher) GetDeviceMountPath(
|
||||||
volumeSource, _ := getVolumeSource(spec)
|
spec *volume.Spec) (string, error) {
|
||||||
return makeGlobalPDPath(attacher.host, volumeSource.VolumeID)
|
volumeSource, _, err := getVolumeSource(spec)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
return makeGlobalPDPath(attacher.host, volumeSource.VolumeID), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// FIXME: this method can be further pruned.
|
// FIXME: this method can be further pruned.
|
||||||
func (attacher *awsElasticBlockStoreAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMountPath string, mounter mount.Interface) error {
|
func (attacher *awsElasticBlockStoreAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMountPath string) error {
|
||||||
|
mounter := attacher.host.GetMounter()
|
||||||
notMnt, err := mounter.IsLikelyNotMountPoint(deviceMountPath)
|
notMnt, err := mounter.IsLikelyNotMountPoint(deviceMountPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if os.IsNotExist(err) {
|
if os.IsNotExist(err) {
|
||||||
@@ -155,7 +160,10 @@ func (attacher *awsElasticBlockStoreAttacher) MountDevice(spec *volume.Spec, dev
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
volumeSource, readOnly := getVolumeSource(spec)
|
volumeSource, readOnly, err := getVolumeSource(spec)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
options := []string{}
|
options := []string{}
|
||||||
if readOnly {
|
if readOnly {
|
||||||
@@ -231,7 +239,8 @@ func (detacher *awsElasticBlockStoreDetacher) WaitForDetach(devicePath string, t
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (detacher *awsElasticBlockStoreDetacher) UnmountDevice(deviceMountPath string, mounter mount.Interface) error {
|
func (detacher *awsElasticBlockStoreDetacher) UnmountDevice(deviceMountPath string) error {
|
||||||
|
mounter := detacher.host.GetMounter()
|
||||||
volume := path.Base(deviceMountPath)
|
volume := path.Base(deviceMountPath)
|
||||||
if err := unmountPDAndRemoveGlobalPath(deviceMountPath, mounter); err != nil {
|
if err := unmountPDAndRemoveGlobalPath(deviceMountPath, mounter); err != nil {
|
||||||
glog.Errorf("Error unmounting %q: %v", volume, err)
|
glog.Errorf("Error unmounting %q: %v", volume, err)
|
||||||
@@ -239,18 +248,3 @@ func (detacher *awsElasticBlockStoreDetacher) UnmountDevice(deviceMountPath stri
|
|||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func getVolumeSource(spec *volume.Spec) (*api.AWSElasticBlockStoreVolumeSource, bool) {
|
|
||||||
var readOnly bool
|
|
||||||
var volumeSource *api.AWSElasticBlockStoreVolumeSource
|
|
||||||
|
|
||||||
if spec.Volume != nil && spec.Volume.AWSElasticBlockStore != nil {
|
|
||||||
volumeSource = spec.Volume.AWSElasticBlockStore
|
|
||||||
readOnly = volumeSource.ReadOnly
|
|
||||||
} else {
|
|
||||||
volumeSource = spec.PersistentVolume.Spec.AWSElasticBlockStore
|
|
||||||
readOnly = spec.ReadOnly
|
|
||||||
}
|
|
||||||
|
|
||||||
return volumeSource, readOnly
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -66,9 +66,9 @@ func (plugin *awsElasticBlockStorePlugin) GetPluginName() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (plugin *awsElasticBlockStorePlugin) GetVolumeName(spec *volume.Spec) (string, error) {
|
func (plugin *awsElasticBlockStorePlugin) GetVolumeName(spec *volume.Spec) (string, error) {
|
||||||
volumeSource, _ := getVolumeSource(spec)
|
volumeSource, _, err := getVolumeSource(spec)
|
||||||
if volumeSource == nil {
|
if err != nil {
|
||||||
return "", fmt.Errorf("Spec does not reference an AWS EBS volume type")
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
return volumeSource.VolumeID, nil
|
return volumeSource.VolumeID, nil
|
||||||
@@ -79,6 +79,10 @@ func (plugin *awsElasticBlockStorePlugin) CanSupport(spec *volume.Spec) bool {
|
|||||||
(spec.Volume != nil && spec.Volume.AWSElasticBlockStore != nil)
|
(spec.Volume != nil && spec.Volume.AWSElasticBlockStore != nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (plugin *awsElasticBlockStorePlugin) RequiresRemount() bool {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
func (plugin *awsElasticBlockStorePlugin) GetAccessModes() []api.PersistentVolumeAccessMode {
|
func (plugin *awsElasticBlockStorePlugin) GetAccessModes() []api.PersistentVolumeAccessMode {
|
||||||
return []api.PersistentVolumeAccessMode{
|
return []api.PersistentVolumeAccessMode{
|
||||||
api.ReadWriteOnce,
|
api.ReadWriteOnce,
|
||||||
@@ -93,14 +97,9 @@ func (plugin *awsElasticBlockStorePlugin) NewMounter(spec *volume.Spec, pod *api
|
|||||||
func (plugin *awsElasticBlockStorePlugin) newMounterInternal(spec *volume.Spec, podUID types.UID, manager ebsManager, mounter mount.Interface) (volume.Mounter, error) {
|
func (plugin *awsElasticBlockStorePlugin) newMounterInternal(spec *volume.Spec, podUID types.UID, manager ebsManager, mounter mount.Interface) (volume.Mounter, error) {
|
||||||
// EBSs used directly in a pod have a ReadOnly flag set by the pod author.
|
// EBSs used directly in a pod have a ReadOnly flag set by the pod author.
|
||||||
// EBSs used as a PersistentVolume gets the ReadOnly flag indirectly through the persistent-claim volume used to mount the PV
|
// EBSs used as a PersistentVolume gets the ReadOnly flag indirectly through the persistent-claim volume used to mount the PV
|
||||||
var readOnly bool
|
ebs, readOnly, err := getVolumeSource(spec)
|
||||||
var ebs *api.AWSElasticBlockStoreVolumeSource
|
if err != nil {
|
||||||
if spec.Volume != nil && spec.Volume.AWSElasticBlockStore != nil {
|
return nil, err
|
||||||
ebs = spec.Volume.AWSElasticBlockStore
|
|
||||||
readOnly = ebs.ReadOnly
|
|
||||||
} else {
|
|
||||||
ebs = spec.PersistentVolume.Spec.AWSElasticBlockStore
|
|
||||||
readOnly = spec.ReadOnly
|
|
||||||
}
|
}
|
||||||
|
|
||||||
volumeID := ebs.VolumeID
|
volumeID := ebs.VolumeID
|
||||||
@@ -176,19 +175,16 @@ func (plugin *awsElasticBlockStorePlugin) newProvisionerInternal(options volume.
|
|||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func getVolumeSource(spec *volume.Spec) (*api.AWSElasticBlockStoreVolumeSource, bool) {
|
func getVolumeSource(
|
||||||
var readOnly bool
|
spec *volume.Spec) (*api.AWSElasticBlockStoreVolumeSource, bool, error) {
|
||||||
var volumeSource *api.AWSElasticBlockStoreVolumeSource
|
|
||||||
|
|
||||||
if spec.Volume != nil && spec.Volume.AWSElasticBlockStore != nil {
|
if spec.Volume != nil && spec.Volume.AWSElasticBlockStore != nil {
|
||||||
volumeSource = spec.Volume.AWSElasticBlockStore
|
return spec.Volume.AWSElasticBlockStore, spec.Volume.AWSElasticBlockStore.ReadOnly, nil
|
||||||
readOnly = volumeSource.ReadOnly
|
} else if spec.PersistentVolume != nil &&
|
||||||
} else {
|
spec.PersistentVolume.Spec.AWSElasticBlockStore != nil {
|
||||||
volumeSource = spec.PersistentVolume.Spec.AWSElasticBlockStore
|
return spec.PersistentVolume.Spec.AWSElasticBlockStore, spec.ReadOnly, nil
|
||||||
readOnly = spec.ReadOnly
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return volumeSource, readOnly
|
return nil, false, fmt.Errorf("Spec does not reference an AWS EBS volume type")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Abstract interface to PD operations.
|
// Abstract interface to PD operations.
|
||||||
|
|||||||
@@ -39,14 +39,14 @@ func TestCanSupport(t *testing.T) {
|
|||||||
}
|
}
|
||||||
defer os.RemoveAll(tmpDir)
|
defer os.RemoveAll(tmpDir)
|
||||||
plugMgr := volume.VolumePluginMgr{}
|
plugMgr := volume.VolumePluginMgr{}
|
||||||
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
|
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, nil, nil, "" /* rootContext */))
|
||||||
|
|
||||||
plug, err := plugMgr.FindPluginByName("kubernetes.io/aws-ebs")
|
plug, err := plugMgr.FindPluginByName("kubernetes.io/aws-ebs")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Can't find the plugin by name")
|
t.Errorf("Can't find the plugin by name")
|
||||||
}
|
}
|
||||||
if plug.Name() != "kubernetes.io/aws-ebs" {
|
if plug.GetPluginName() != "kubernetes.io/aws-ebs" {
|
||||||
t.Errorf("Wrong name: %s", plug.Name())
|
t.Errorf("Wrong name: %s", plug.GetPluginName())
|
||||||
}
|
}
|
||||||
if !plug.CanSupport(&volume.Spec{Volume: &api.Volume{VolumeSource: api.VolumeSource{AWSElasticBlockStore: &api.AWSElasticBlockStoreVolumeSource{}}}}) {
|
if !plug.CanSupport(&volume.Spec{Volume: &api.Volume{VolumeSource: api.VolumeSource{AWSElasticBlockStore: &api.AWSElasticBlockStoreVolumeSource{}}}}) {
|
||||||
t.Errorf("Expected true")
|
t.Errorf("Expected true")
|
||||||
@@ -63,7 +63,7 @@ func TestGetAccessModes(t *testing.T) {
|
|||||||
}
|
}
|
||||||
defer os.RemoveAll(tmpDir)
|
defer os.RemoveAll(tmpDir)
|
||||||
plugMgr := volume.VolumePluginMgr{}
|
plugMgr := volume.VolumePluginMgr{}
|
||||||
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
|
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, nil, nil, "" /* rootContext */))
|
||||||
|
|
||||||
plug, err := plugMgr.FindPersistentPluginByName("kubernetes.io/aws-ebs")
|
plug, err := plugMgr.FindPersistentPluginByName("kubernetes.io/aws-ebs")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -112,7 +112,7 @@ func TestPlugin(t *testing.T) {
|
|||||||
}
|
}
|
||||||
defer os.RemoveAll(tmpDir)
|
defer os.RemoveAll(tmpDir)
|
||||||
plugMgr := volume.VolumePluginMgr{}
|
plugMgr := volume.VolumePluginMgr{}
|
||||||
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
|
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, nil, nil, "" /* rootContext */))
|
||||||
|
|
||||||
plug, err := plugMgr.FindPluginByName("kubernetes.io/aws-ebs")
|
plug, err := plugMgr.FindPluginByName("kubernetes.io/aws-ebs")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -254,7 +254,7 @@ func TestPersistentClaimReadOnlyFlag(t *testing.T) {
|
|||||||
}
|
}
|
||||||
defer os.RemoveAll(tmpDir)
|
defer os.RemoveAll(tmpDir)
|
||||||
plugMgr := volume.VolumePluginMgr{}
|
plugMgr := volume.VolumePluginMgr{}
|
||||||
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, clientset, nil))
|
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, clientset, nil, "" /* rootContext */))
|
||||||
plug, _ := plugMgr.FindPluginByName(awsElasticBlockStorePluginName)
|
plug, _ := plugMgr.FindPluginByName(awsElasticBlockStorePluginName)
|
||||||
|
|
||||||
// readOnly bool is supplied by persistent-claim volume source when its mounter creates other volumes
|
// readOnly bool is supplied by persistent-claim volume source when its mounter creates other volumes
|
||||||
@@ -274,7 +274,7 @@ func TestMounterAndUnmounterTypeAssert(t *testing.T) {
|
|||||||
}
|
}
|
||||||
defer os.RemoveAll(tmpDir)
|
defer os.RemoveAll(tmpDir)
|
||||||
plugMgr := volume.VolumePluginMgr{}
|
plugMgr := volume.VolumePluginMgr{}
|
||||||
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
|
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, nil, nil, "" /* rootContext */))
|
||||||
|
|
||||||
plug, err := plugMgr.FindPluginByName("kubernetes.io/aws-ebs")
|
plug, err := plugMgr.FindPluginByName("kubernetes.io/aws-ebs")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|||||||
@@ -59,9 +59,9 @@ func (plugin *azureFilePlugin) GetPluginName() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (plugin *azureFilePlugin) GetVolumeName(spec *volume.Spec) (string, error) {
|
func (plugin *azureFilePlugin) GetVolumeName(spec *volume.Spec) (string, error) {
|
||||||
volumeSource, _ := getVolumeSource(spec)
|
volumeSource, _, err := getVolumeSource(spec)
|
||||||
if volumeSource == nil {
|
if err != nil {
|
||||||
return "", fmt.Errorf("Spec does not reference an AzureFile volume type")
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
return volumeSource.ShareName, nil
|
return volumeSource.ShareName, nil
|
||||||
@@ -73,6 +73,10 @@ func (plugin *azureFilePlugin) CanSupport(spec *volume.Spec) bool {
|
|||||||
(spec.Volume != nil && spec.Volume.AzureFile != nil)
|
(spec.Volume != nil && spec.Volume.AzureFile != nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (plugin *azureFilePlugin) RequiresRemount() bool {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
func (plugin *azureFilePlugin) GetAccessModes() []api.PersistentVolumeAccessMode {
|
func (plugin *azureFilePlugin) GetAccessModes() []api.PersistentVolumeAccessMode {
|
||||||
return []api.PersistentVolumeAccessMode{
|
return []api.PersistentVolumeAccessMode{
|
||||||
api.ReadWriteOnce,
|
api.ReadWriteOnce,
|
||||||
@@ -86,15 +90,11 @@ func (plugin *azureFilePlugin) NewMounter(spec *volume.Spec, pod *api.Pod, _ vol
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (plugin *azureFilePlugin) newMounterInternal(spec *volume.Spec, pod *api.Pod, util azureUtil, mounter mount.Interface) (volume.Mounter, error) {
|
func (plugin *azureFilePlugin) newMounterInternal(spec *volume.Spec, pod *api.Pod, util azureUtil, mounter mount.Interface) (volume.Mounter, error) {
|
||||||
var source *api.AzureFileVolumeSource
|
source, readOnly, err := getVolumeSource(spec)
|
||||||
var readOnly bool
|
if err != nil {
|
||||||
if spec.Volume != nil && spec.Volume.AzureFile != nil {
|
return nil, err
|
||||||
source = spec.Volume.AzureFile
|
|
||||||
readOnly = spec.Volume.AzureFile.ReadOnly
|
|
||||||
} else {
|
|
||||||
source = spec.PersistentVolume.Spec.AzureFile
|
|
||||||
readOnly = spec.ReadOnly
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return &azureFileMounter{
|
return &azureFileMounter{
|
||||||
azureFile: &azureFile{
|
azureFile: &azureFile{
|
||||||
volName: spec.Name(),
|
volName: spec.Name(),
|
||||||
@@ -247,17 +247,14 @@ func (c *azureFileUnmounter) TearDownAt(dir string) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func getVolumeSource(spec *volume.Spec) (*api.AzureFileVolumeSource, bool) {
|
func getVolumeSource(
|
||||||
var readOnly bool
|
spec *volume.Spec) (*api.AzureFileVolumeSource, bool, error) {
|
||||||
var volumeSource *api.AzureFileVolumeSource
|
|
||||||
|
|
||||||
if spec.Volume != nil && spec.Volume.AzureFile != nil {
|
if spec.Volume != nil && spec.Volume.AzureFile != nil {
|
||||||
volumeSource = spec.Volume.AzureFile
|
return spec.Volume.AzureFile, spec.Volume.AzureFile.ReadOnly, nil
|
||||||
readOnly = volumeSource.ReadOnly
|
} else if spec.PersistentVolume != nil &&
|
||||||
} else {
|
spec.PersistentVolume.Spec.AzureFile != nil {
|
||||||
volumeSource = spec.PersistentVolume.Spec.AzureFile
|
return spec.PersistentVolume.Spec.AzureFile, spec.ReadOnly, nil
|
||||||
readOnly = spec.ReadOnly
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return volumeSource, readOnly
|
return nil, false, fmt.Errorf("Spec does not reference an AzureFile volume type")
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -37,14 +37,14 @@ func TestCanSupport(t *testing.T) {
|
|||||||
}
|
}
|
||||||
defer os.RemoveAll(tmpDir)
|
defer os.RemoveAll(tmpDir)
|
||||||
plugMgr := volume.VolumePluginMgr{}
|
plugMgr := volume.VolumePluginMgr{}
|
||||||
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
|
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, nil, nil, "" /* rootContext */))
|
||||||
|
|
||||||
plug, err := plugMgr.FindPluginByName("kubernetes.io/azure-file")
|
plug, err := plugMgr.FindPluginByName("kubernetes.io/azure-file")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Can't find the plugin by name")
|
t.Errorf("Can't find the plugin by name")
|
||||||
}
|
}
|
||||||
if plug.Name() != "kubernetes.io/azure-file" {
|
if plug.GetPluginName() != "kubernetes.io/azure-file" {
|
||||||
t.Errorf("Wrong name: %s", plug.Name())
|
t.Errorf("Wrong name: %s", plug.GetPluginName())
|
||||||
}
|
}
|
||||||
if !plug.CanSupport(&volume.Spec{Volume: &api.Volume{VolumeSource: api.VolumeSource{AzureFile: &api.AzureFileVolumeSource{}}}}) {
|
if !plug.CanSupport(&volume.Spec{Volume: &api.Volume{VolumeSource: api.VolumeSource{AzureFile: &api.AzureFileVolumeSource{}}}}) {
|
||||||
t.Errorf("Expected true")
|
t.Errorf("Expected true")
|
||||||
@@ -61,7 +61,7 @@ func TestGetAccessModes(t *testing.T) {
|
|||||||
}
|
}
|
||||||
defer os.RemoveAll(tmpDir)
|
defer os.RemoveAll(tmpDir)
|
||||||
plugMgr := volume.VolumePluginMgr{}
|
plugMgr := volume.VolumePluginMgr{}
|
||||||
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
|
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, nil, nil, "" /* rootContext */))
|
||||||
|
|
||||||
plug, err := plugMgr.FindPersistentPluginByName("kubernetes.io/azure-file")
|
plug, err := plugMgr.FindPersistentPluginByName("kubernetes.io/azure-file")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -88,7 +88,7 @@ func TestPlugin(t *testing.T) {
|
|||||||
}
|
}
|
||||||
defer os.RemoveAll(tmpDir)
|
defer os.RemoveAll(tmpDir)
|
||||||
plugMgr := volume.VolumePluginMgr{}
|
plugMgr := volume.VolumePluginMgr{}
|
||||||
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
|
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, nil, nil, "" /* rootContext */))
|
||||||
|
|
||||||
plug, err := plugMgr.FindPluginByName("kubernetes.io/azure-file")
|
plug, err := plugMgr.FindPluginByName("kubernetes.io/azure-file")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -185,7 +185,7 @@ func TestPersistentClaimReadOnlyFlag(t *testing.T) {
|
|||||||
client := fake.NewSimpleClientset(pv, claim)
|
client := fake.NewSimpleClientset(pv, claim)
|
||||||
|
|
||||||
plugMgr := volume.VolumePluginMgr{}
|
plugMgr := volume.VolumePluginMgr{}
|
||||||
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost("/tmp/fake", client, nil))
|
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost("/tmp/fake", client, nil, "" /* rootContext */))
|
||||||
plug, _ := plugMgr.FindPluginByName(azureFilePluginName)
|
plug, _ := plugMgr.FindPluginByName(azureFilePluginName)
|
||||||
|
|
||||||
// readOnly bool is supplied by persistent-claim volume source when its mounter creates other volumes
|
// readOnly bool is supplied by persistent-claim volume source when its mounter creates other volumes
|
||||||
@@ -211,7 +211,7 @@ func TestMounterAndUnmounterTypeAssert(t *testing.T) {
|
|||||||
}
|
}
|
||||||
defer os.RemoveAll(tmpDir)
|
defer os.RemoveAll(tmpDir)
|
||||||
plugMgr := volume.VolumePluginMgr{}
|
plugMgr := volume.VolumePluginMgr{}
|
||||||
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
|
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, nil, nil, "" /* rootContext */))
|
||||||
|
|
||||||
plug, err := plugMgr.FindPluginByName("kubernetes.io/azure-file")
|
plug, err := plugMgr.FindPluginByName("kubernetes.io/azure-file")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|||||||
@@ -54,9 +54,9 @@ func (plugin *cephfsPlugin) GetPluginName() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (plugin *cephfsPlugin) GetVolumeName(spec *volume.Spec) (string, error) {
|
func (plugin *cephfsPlugin) GetVolumeName(spec *volume.Spec) (string, error) {
|
||||||
volumeSource, _ := getVolumeSource(spec)
|
volumeSource, _, err := getVolumeSource(spec)
|
||||||
if volumeSource == nil {
|
if err != nil {
|
||||||
return "", fmt.Errorf("Spec does not reference a CephFS volume type")
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
return fmt.Sprintf("%v", volumeSource.Monitors), nil
|
return fmt.Sprintf("%v", volumeSource.Monitors), nil
|
||||||
@@ -66,6 +66,10 @@ func (plugin *cephfsPlugin) CanSupport(spec *volume.Spec) bool {
|
|||||||
return (spec.Volume != nil && spec.Volume.CephFS != nil) || (spec.PersistentVolume != nil && spec.PersistentVolume.Spec.CephFS != nil)
|
return (spec.Volume != nil && spec.Volume.CephFS != nil) || (spec.PersistentVolume != nil && spec.PersistentVolume.Spec.CephFS != nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (plugin *cephfsPlugin) RequiresRemount() bool {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
func (plugin *cephfsPlugin) GetAccessModes() []api.PersistentVolumeAccessMode {
|
func (plugin *cephfsPlugin) GetAccessModes() []api.PersistentVolumeAccessMode {
|
||||||
return []api.PersistentVolumeAccessMode{
|
return []api.PersistentVolumeAccessMode{
|
||||||
api.ReadWriteOnce,
|
api.ReadWriteOnce,
|
||||||
@@ -75,7 +79,10 @@ func (plugin *cephfsPlugin) GetAccessModes() []api.PersistentVolumeAccessMode {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (plugin *cephfsPlugin) NewMounter(spec *volume.Spec, pod *api.Pod, _ volume.VolumeOptions) (volume.Mounter, error) {
|
func (plugin *cephfsPlugin) NewMounter(spec *volume.Spec, pod *api.Pod, _ volume.VolumeOptions) (volume.Mounter, error) {
|
||||||
cephvs := plugin.getVolumeSource(spec)
|
cephvs, _, err := getVolumeSource(spec)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
secret := ""
|
secret := ""
|
||||||
if cephvs.SecretRef != nil {
|
if cephvs.SecretRef != nil {
|
||||||
kubeClient := plugin.host.GetKubeClient()
|
kubeClient := plugin.host.GetKubeClient()
|
||||||
@@ -97,7 +104,11 @@ func (plugin *cephfsPlugin) NewMounter(spec *volume.Spec, pod *api.Pod, _ volume
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (plugin *cephfsPlugin) newMounterInternal(spec *volume.Spec, podUID types.UID, mounter mount.Interface, secret string) (volume.Mounter, error) {
|
func (plugin *cephfsPlugin) newMounterInternal(spec *volume.Spec, podUID types.UID, mounter mount.Interface, secret string) (volume.Mounter, error) {
|
||||||
cephvs := plugin.getVolumeSource(spec)
|
cephvs, _, err := getVolumeSource(spec)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
id := cephvs.User
|
id := cephvs.User
|
||||||
if id == "" {
|
if id == "" {
|
||||||
id = "admin"
|
id = "admin"
|
||||||
@@ -143,14 +154,6 @@ func (plugin *cephfsPlugin) newUnmounterInternal(volName string, podUID types.UI
|
|||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (plugin *cephfsPlugin) getVolumeSource(spec *volume.Spec) *api.CephFSVolumeSource {
|
|
||||||
if spec.Volume != nil && spec.Volume.CephFS != nil {
|
|
||||||
return spec.Volume.CephFS
|
|
||||||
} else {
|
|
||||||
return spec.PersistentVolume.Spec.CephFS
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// CephFS volumes represent a bare host file or directory mount of an CephFS export.
|
// CephFS volumes represent a bare host file or directory mount of an CephFS export.
|
||||||
type cephfs struct {
|
type cephfs struct {
|
||||||
volName string
|
volName string
|
||||||
@@ -289,17 +292,13 @@ func (cephfsVolume *cephfs) execMount(mountpoint string) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func getVolumeSource(spec *volume.Spec) (*api.CephFSVolumeSource, bool) {
|
func getVolumeSource(spec *volume.Spec) (*api.CephFSVolumeSource, bool, error) {
|
||||||
var readOnly bool
|
|
||||||
var volumeSource *api.CephFSVolumeSource
|
|
||||||
|
|
||||||
if spec.Volume != nil && spec.Volume.CephFS != nil {
|
if spec.Volume != nil && spec.Volume.CephFS != nil {
|
||||||
volumeSource = spec.Volume.CephFS
|
return spec.Volume.CephFS, spec.Volume.CephFS.ReadOnly, nil
|
||||||
readOnly = volumeSource.ReadOnly
|
} else if spec.PersistentVolume != nil &&
|
||||||
} else {
|
spec.PersistentVolume.Spec.CephFS != nil {
|
||||||
volumeSource = spec.PersistentVolume.Spec.CephFS
|
return spec.PersistentVolume.Spec.CephFS, spec.ReadOnly, nil
|
||||||
readOnly = spec.ReadOnly
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return volumeSource, readOnly
|
return nil, false, fmt.Errorf("Spec does not reference a CephFS volume type")
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -36,13 +36,13 @@ func TestCanSupport(t *testing.T) {
|
|||||||
}
|
}
|
||||||
defer os.RemoveAll(tmpDir)
|
defer os.RemoveAll(tmpDir)
|
||||||
plugMgr := volume.VolumePluginMgr{}
|
plugMgr := volume.VolumePluginMgr{}
|
||||||
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
|
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, nil, nil, "" /* rootContext */))
|
||||||
plug, err := plugMgr.FindPluginByName("kubernetes.io/cephfs")
|
plug, err := plugMgr.FindPluginByName("kubernetes.io/cephfs")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Can't find the plugin by name")
|
t.Errorf("Can't find the plugin by name")
|
||||||
}
|
}
|
||||||
if plug.Name() != "kubernetes.io/cephfs" {
|
if plug.GetPluginName() != "kubernetes.io/cephfs" {
|
||||||
t.Errorf("Wrong name: %s", plug.Name())
|
t.Errorf("Wrong name: %s", plug.GetPluginName())
|
||||||
}
|
}
|
||||||
if plug.CanSupport(&volume.Spec{Volume: &api.Volume{VolumeSource: api.VolumeSource{}}}) {
|
if plug.CanSupport(&volume.Spec{Volume: &api.Volume{VolumeSource: api.VolumeSource{}}}) {
|
||||||
t.Errorf("Expected false")
|
t.Errorf("Expected false")
|
||||||
@@ -59,7 +59,7 @@ func TestPlugin(t *testing.T) {
|
|||||||
}
|
}
|
||||||
defer os.RemoveAll(tmpDir)
|
defer os.RemoveAll(tmpDir)
|
||||||
plugMgr := volume.VolumePluginMgr{}
|
plugMgr := volume.VolumePluginMgr{}
|
||||||
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
|
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, nil, nil, "" /* rootContext */))
|
||||||
plug, err := plugMgr.FindPluginByName("kubernetes.io/cephfs")
|
plug, err := plugMgr.FindPluginByName("kubernetes.io/cephfs")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Can't find the plugin by name")
|
t.Errorf("Can't find the plugin by name")
|
||||||
|
|||||||
@@ -24,7 +24,6 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/golang/glog"
|
"github.com/golang/glog"
|
||||||
"k8s.io/kubernetes/pkg/api"
|
|
||||||
"k8s.io/kubernetes/pkg/util/exec"
|
"k8s.io/kubernetes/pkg/util/exec"
|
||||||
"k8s.io/kubernetes/pkg/util/mount"
|
"k8s.io/kubernetes/pkg/util/mount"
|
||||||
"k8s.io/kubernetes/pkg/volume"
|
"k8s.io/kubernetes/pkg/volume"
|
||||||
@@ -46,17 +45,12 @@ func (plugin *cinderPlugin) NewAttacher() (volume.Attacher, error) {
|
|||||||
return &cinderDiskAttacher{host: plugin.host}, nil
|
return &cinderDiskAttacher{host: plugin.host}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (plugin *cinderPlugin) GetDeviceName(spec *volume.Spec) (string, error) {
|
func (attacher *cinderDiskAttacher) Attach(spec *volume.Spec, hostName string) error {
|
||||||
volumeSource, _ := getVolumeSource(spec)
|
volumeSource, _, err := getVolumeSource(spec)
|
||||||
if volumeSource == nil {
|
if err != nil {
|
||||||
return "", fmt.Errorf("Spec does not reference a Cinder volume type")
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
return volumeSource.VolumeID, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (attacher *cinderDiskAttacher) Attach(spec *volume.Spec, hostName string) error {
|
|
||||||
volumeSource, _ := getVolumeSource(spec)
|
|
||||||
volumeID := volumeSource.VolumeID
|
volumeID := volumeSource.VolumeID
|
||||||
|
|
||||||
cloud, err := getCloudProvider(attacher.host.GetCloudProvider())
|
cloud, err := getCloudProvider(attacher.host.GetCloudProvider())
|
||||||
@@ -101,7 +95,12 @@ func (attacher *cinderDiskAttacher) WaitForAttach(spec *volume.Spec, timeout tim
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
volumeSource, _ := getVolumeSource(spec)
|
|
||||||
|
volumeSource, _, err := getVolumeSource(spec)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
volumeID := volumeSource.VolumeID
|
volumeID := volumeSource.VolumeID
|
||||||
instanceid, err := cloud.InstanceID()
|
instanceid, err := cloud.InstanceID()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -150,13 +149,19 @@ func (attacher *cinderDiskAttacher) WaitForAttach(spec *volume.Spec, timeout tim
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (attacher *cinderDiskAttacher) GetDeviceMountPath(spec *volume.Spec) string {
|
func (attacher *cinderDiskAttacher) GetDeviceMountPath(
|
||||||
volumeSource, _ := getVolumeSource(spec)
|
spec *volume.Spec) (string, error) {
|
||||||
return makeGlobalPDName(attacher.host, volumeSource.VolumeID)
|
volumeSource, _, err := getVolumeSource(spec)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
return makeGlobalPDName(attacher.host, volumeSource.VolumeID), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// FIXME: this method can be further pruned.
|
// FIXME: this method can be further pruned.
|
||||||
func (attacher *cinderDiskAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMountPath string, mounter mount.Interface) error {
|
func (attacher *cinderDiskAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMountPath string) error {
|
||||||
|
mounter := attacher.host.GetMounter()
|
||||||
notMnt, err := mounter.IsLikelyNotMountPoint(deviceMountPath)
|
notMnt, err := mounter.IsLikelyNotMountPoint(deviceMountPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if os.IsNotExist(err) {
|
if os.IsNotExist(err) {
|
||||||
@@ -169,7 +174,10 @@ func (attacher *cinderDiskAttacher) MountDevice(spec *volume.Spec, devicePath st
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
volumeSource, readOnly := getVolumeSource(spec)
|
volumeSource, readOnly, err := getVolumeSource(spec)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
options := []string{}
|
options := []string{}
|
||||||
if readOnly {
|
if readOnly {
|
||||||
@@ -254,7 +262,8 @@ func (detacher *cinderDiskDetacher) WaitForDetach(devicePath string, timeout tim
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (detacher *cinderDiskDetacher) UnmountDevice(deviceMountPath string, mounter mount.Interface) error {
|
func (detacher *cinderDiskDetacher) UnmountDevice(deviceMountPath string) error {
|
||||||
|
mounter := detacher.host.GetMounter()
|
||||||
volume := path.Base(deviceMountPath)
|
volume := path.Base(deviceMountPath)
|
||||||
if err := unmountPDAndRemoveGlobalPath(deviceMountPath, mounter); err != nil {
|
if err := unmountPDAndRemoveGlobalPath(deviceMountPath, mounter); err != nil {
|
||||||
glog.Errorf("Error unmounting %q: %v", volume, err)
|
glog.Errorf("Error unmounting %q: %v", volume, err)
|
||||||
@@ -263,21 +272,6 @@ func (detacher *cinderDiskDetacher) UnmountDevice(deviceMountPath string, mounte
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func getVolumeSource(spec *volume.Spec) (*api.CinderVolumeSource, bool) {
|
|
||||||
var readOnly bool
|
|
||||||
var volumeSource *api.CinderVolumeSource
|
|
||||||
|
|
||||||
if spec.Volume != nil && spec.Volume.Cinder != nil {
|
|
||||||
volumeSource = spec.Volume.Cinder
|
|
||||||
readOnly = volumeSource.ReadOnly
|
|
||||||
} else {
|
|
||||||
volumeSource = spec.PersistentVolume.Spec.Cinder
|
|
||||||
readOnly = spec.ReadOnly
|
|
||||||
}
|
|
||||||
|
|
||||||
return volumeSource, readOnly
|
|
||||||
}
|
|
||||||
|
|
||||||
// Checks if the specified path exists
|
// Checks if the specified path exists
|
||||||
func pathExists(path string) (bool, error) {
|
func pathExists(path string) (bool, error) {
|
||||||
_, err := os.Stat(path)
|
_, err := os.Stat(path)
|
||||||
|
|||||||
@@ -79,9 +79,9 @@ func (plugin *cinderPlugin) GetPluginName() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (plugin *cinderPlugin) GetVolumeName(spec *volume.Spec) (string, error) {
|
func (plugin *cinderPlugin) GetVolumeName(spec *volume.Spec) (string, error) {
|
||||||
volumeSource, _ := getVolumeSource(spec)
|
volumeSource, _, err := getVolumeSource(spec)
|
||||||
if volumeSource == nil {
|
if err != nil {
|
||||||
return "", fmt.Errorf("Spec does not reference a Cinder volume type")
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
return volumeSource.VolumeID, nil
|
return volumeSource.VolumeID, nil
|
||||||
@@ -91,6 +91,10 @@ func (plugin *cinderPlugin) CanSupport(spec *volume.Spec) bool {
|
|||||||
return (spec.Volume != nil && spec.Volume.Cinder != nil) || (spec.PersistentVolume != nil && spec.PersistentVolume.Spec.Cinder != nil)
|
return (spec.Volume != nil && spec.Volume.Cinder != nil) || (spec.PersistentVolume != nil && spec.PersistentVolume.Spec.Cinder != nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (plugin *cinderPlugin) RequiresRemount() bool {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
func (plugin *cinderPlugin) GetAccessModes() []api.PersistentVolumeAccessMode {
|
func (plugin *cinderPlugin) GetAccessModes() []api.PersistentVolumeAccessMode {
|
||||||
return []api.PersistentVolumeAccessMode{
|
return []api.PersistentVolumeAccessMode{
|
||||||
api.ReadWriteOnce,
|
api.ReadWriteOnce,
|
||||||
@@ -102,16 +106,13 @@ func (plugin *cinderPlugin) NewMounter(spec *volume.Spec, pod *api.Pod, _ volume
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (plugin *cinderPlugin) newMounterInternal(spec *volume.Spec, podUID types.UID, manager cdManager, mounter mount.Interface) (volume.Mounter, error) {
|
func (plugin *cinderPlugin) newMounterInternal(spec *volume.Spec, podUID types.UID, manager cdManager, mounter mount.Interface) (volume.Mounter, error) {
|
||||||
var cinder *api.CinderVolumeSource
|
cinder, readOnly, err := getVolumeSource(spec)
|
||||||
if spec.Volume != nil && spec.Volume.Cinder != nil {
|
if err != nil {
|
||||||
cinder = spec.Volume.Cinder
|
return nil, err
|
||||||
} else {
|
|
||||||
cinder = spec.PersistentVolume.Spec.Cinder
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pdName := cinder.VolumeID
|
pdName := cinder.VolumeID
|
||||||
fsType := cinder.FSType
|
fsType := cinder.FSType
|
||||||
readOnly := cinder.ReadOnly
|
|
||||||
|
|
||||||
return &cinderVolumeMounter{
|
return &cinderVolumeMounter{
|
||||||
cinderVolume: &cinderVolume{
|
cinderVolume: &cinderVolume{
|
||||||
@@ -468,17 +469,13 @@ func (c *cinderVolumeProvisioner) Provision() (*api.PersistentVolume, error) {
|
|||||||
return pv, nil
|
return pv, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func getVolumeSource(spec *volume.Spec) (*api.CinderVolumeSource, bool) {
|
func getVolumeSource(spec *volume.Spec) (*api.CinderVolumeSource, bool, error) {
|
||||||
var readOnly bool
|
|
||||||
var volumeSource *api.CinderVolumeSource
|
|
||||||
|
|
||||||
if spec.Volume != nil && spec.Volume.Cinder != nil {
|
if spec.Volume != nil && spec.Volume.Cinder != nil {
|
||||||
volumeSource = spec.Volume.Cinder
|
return spec.Volume.Cinder, spec.Volume.Cinder.ReadOnly, nil
|
||||||
readOnly = volumeSource.ReadOnly
|
} else if spec.PersistentVolume != nil &&
|
||||||
} else {
|
spec.PersistentVolume.Spec.Cinder != nil {
|
||||||
volumeSource = spec.PersistentVolume.Spec.Cinder
|
return spec.PersistentVolume.Spec.Cinder, spec.ReadOnly, nil
|
||||||
readOnly = spec.ReadOnly
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return volumeSource, readOnly
|
return nil, false, fmt.Errorf("Spec does not reference a Cinder volume type")
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -39,14 +39,14 @@ func TestCanSupport(t *testing.T) {
|
|||||||
}
|
}
|
||||||
defer os.RemoveAll(tmpDir)
|
defer os.RemoveAll(tmpDir)
|
||||||
plugMgr := volume.VolumePluginMgr{}
|
plugMgr := volume.VolumePluginMgr{}
|
||||||
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
|
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, nil, nil, "" /* rootContext */))
|
||||||
|
|
||||||
plug, err := plugMgr.FindPluginByName("kubernetes.io/cinder")
|
plug, err := plugMgr.FindPluginByName("kubernetes.io/cinder")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Can't find the plugin by name")
|
t.Errorf("Can't find the plugin by name")
|
||||||
}
|
}
|
||||||
if plug.Name() != "kubernetes.io/cinder" {
|
if plug.GetPluginName() != "kubernetes.io/cinder" {
|
||||||
t.Errorf("Wrong name: %s", plug.Name())
|
t.Errorf("Wrong name: %s", plug.GetPluginName())
|
||||||
}
|
}
|
||||||
if !plug.CanSupport(&volume.Spec{Volume: &api.Volume{VolumeSource: api.VolumeSource{Cinder: &api.CinderVolumeSource{}}}}) {
|
if !plug.CanSupport(&volume.Spec{Volume: &api.Volume{VolumeSource: api.VolumeSource{Cinder: &api.CinderVolumeSource{}}}}) {
|
||||||
t.Errorf("Expected true")
|
t.Errorf("Expected true")
|
||||||
@@ -135,7 +135,7 @@ func TestPlugin(t *testing.T) {
|
|||||||
}
|
}
|
||||||
defer os.RemoveAll(tmpDir)
|
defer os.RemoveAll(tmpDir)
|
||||||
plugMgr := volume.VolumePluginMgr{}
|
plugMgr := volume.VolumePluginMgr{}
|
||||||
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
|
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, nil, nil, "" /* rootContext */))
|
||||||
|
|
||||||
plug, err := plugMgr.FindPluginByName("kubernetes.io/cinder")
|
plug, err := plugMgr.FindPluginByName("kubernetes.io/cinder")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|||||||
@@ -67,6 +67,10 @@ func (plugin *configMapPlugin) CanSupport(spec *volume.Spec) bool {
|
|||||||
return spec.Volume != nil && spec.Volume.ConfigMap != nil
|
return spec.Volume != nil && spec.Volume.ConfigMap != nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (plugin *configMapPlugin) RequiresRemount() bool {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
func (plugin *configMapPlugin) NewMounter(spec *volume.Spec, pod *api.Pod, opts volume.VolumeOptions) (volume.Mounter, error) {
|
func (plugin *configMapPlugin) NewMounter(spec *volume.Spec, pod *api.Pod, opts volume.VolumeOptions) (volume.Mounter, error) {
|
||||||
return &configMapVolumeMounter{
|
return &configMapVolumeMounter{
|
||||||
configMapVolume: &configMapVolume{spec.Name(), pod.UID, plugin, plugin.host.GetMounter(), plugin.host.GetWriter(), volume.MetricsNil{}},
|
configMapVolume: &configMapVolume{spec.Name(), pod.UID, plugin, plugin.host.GetMounter(), plugin.host.GetWriter(), volume.MetricsNil{}},
|
||||||
|
|||||||
@@ -184,7 +184,7 @@ func newTestHost(t *testing.T, clientset clientset.Interface) (string, volume.Vo
|
|||||||
t.Fatalf("can't make a temp rootdir: %v", err)
|
t.Fatalf("can't make a temp rootdir: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return tempDir, volumetest.NewFakeVolumeHost(tempDir, clientset, empty_dir.ProbeVolumePlugins())
|
return tempDir, volumetest.NewFakeVolumeHost(tempDir, clientset, empty_dir.ProbeVolumePlugins(), "" /* rootContext */)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestCanSupport(t *testing.T) {
|
func TestCanSupport(t *testing.T) {
|
||||||
@@ -196,8 +196,8 @@ func TestCanSupport(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Can't find the plugin by name")
|
t.Errorf("Can't find the plugin by name")
|
||||||
}
|
}
|
||||||
if plugin.Name() != configMapPluginName {
|
if plugin.GetPluginName() != configMapPluginName {
|
||||||
t.Errorf("Wrong name: %s", plugin.Name())
|
t.Errorf("Wrong name: %s", plugin.GetPluginName())
|
||||||
}
|
}
|
||||||
if !plugin.CanSupport(&volume.Spec{Volume: &api.Volume{VolumeSource: api.VolumeSource{ConfigMap: &api.ConfigMapVolumeSource{LocalObjectReference: api.LocalObjectReference{Name: ""}}}}}) {
|
if !plugin.CanSupport(&volume.Spec{Volume: &api.Volume{VolumeSource: api.VolumeSource{ConfigMap: &api.ConfigMapVolumeSource{LocalObjectReference: api.LocalObjectReference{Name: ""}}}}}) {
|
||||||
t.Errorf("Expected true")
|
t.Errorf("Expected true")
|
||||||
|
|||||||
@@ -76,6 +76,10 @@ func (plugin *downwardAPIPlugin) CanSupport(spec *volume.Spec) bool {
|
|||||||
return spec.Volume != nil && spec.Volume.DownwardAPI != nil
|
return spec.Volume != nil && spec.Volume.DownwardAPI != nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (plugin *downwardAPIPlugin) RequiresRemount() bool {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
func (plugin *downwardAPIPlugin) NewMounter(spec *volume.Spec, pod *api.Pod, opts volume.VolumeOptions) (volume.Mounter, error) {
|
func (plugin *downwardAPIPlugin) NewMounter(spec *volume.Spec, pod *api.Pod, opts volume.VolumeOptions) (volume.Mounter, error) {
|
||||||
v := &downwardAPIVolume{
|
v := &downwardAPIVolume{
|
||||||
volName: spec.Name(),
|
volName: spec.Name(),
|
||||||
|
|||||||
@@ -48,7 +48,7 @@ func newTestHost(t *testing.T, clientset clientset.Interface) (string, volume.Vo
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("can't make a temp rootdir: %v", err)
|
t.Fatalf("can't make a temp rootdir: %v", err)
|
||||||
}
|
}
|
||||||
return tempDir, volumetest.NewFakeVolumeHost(tempDir, clientset, empty_dir.ProbeVolumePlugins())
|
return tempDir, volumetest.NewFakeVolumeHost(tempDir, clientset, empty_dir.ProbeVolumePlugins(), "" /* rootContext */)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestCanSupport(t *testing.T) {
|
func TestCanSupport(t *testing.T) {
|
||||||
@@ -61,8 +61,8 @@ func TestCanSupport(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Can't find the plugin by name")
|
t.Errorf("Can't find the plugin by name")
|
||||||
}
|
}
|
||||||
if plugin.Name() != downwardAPIPluginName {
|
if plugin.GetPluginName() != downwardAPIPluginName {
|
||||||
t.Errorf("Wrong name: %s", plugin.Name())
|
t.Errorf("Wrong name: %s", plugin.GetPluginName())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -85,6 +85,10 @@ func (plugin *emptyDirPlugin) CanSupport(spec *volume.Spec) bool {
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (plugin *emptyDirPlugin) RequiresRemount() bool {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
func (plugin *emptyDirPlugin) NewMounter(spec *volume.Spec, pod *api.Pod, opts volume.VolumeOptions) (volume.Mounter, error) {
|
func (plugin *emptyDirPlugin) NewMounter(spec *volume.Spec, pod *api.Pod, opts volume.VolumeOptions) (volume.Mounter, error) {
|
||||||
return plugin.newMounterInternal(spec, pod, plugin.host.GetMounter(), &realMountDetector{plugin.host.GetMounter()}, opts)
|
return plugin.newMounterInternal(spec, pod, plugin.host.GetMounter(), &realMountDetector{plugin.host.GetMounter()}, opts)
|
||||||
}
|
}
|
||||||
@@ -101,7 +105,7 @@ func (plugin *emptyDirPlugin) newMounterInternal(spec *volume.Spec, pod *api.Pod
|
|||||||
mounter: mounter,
|
mounter: mounter,
|
||||||
mountDetector: mountDetector,
|
mountDetector: mountDetector,
|
||||||
plugin: plugin,
|
plugin: plugin,
|
||||||
rootContext: opts.RootContext,
|
rootContext: plugin.host.GetRootContext(),
|
||||||
MetricsProvider: volume.NewMetricsDu(getPath(pod.UID, spec.Name(), plugin.host)),
|
MetricsProvider: volume.NewMetricsDu(getPath(pod.UID, spec.Name(), plugin.host)),
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -33,9 +33,9 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// Construct an instance of a plugin, by name.
|
// Construct an instance of a plugin, by name.
|
||||||
func makePluginUnderTest(t *testing.T, plugName, basePath string) volume.VolumePlugin {
|
func makePluginUnderTest(t *testing.T, plugName, basePath, rootContext string) volume.VolumePlugin {
|
||||||
plugMgr := volume.VolumePluginMgr{}
|
plugMgr := volume.VolumePluginMgr{}
|
||||||
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(basePath, nil, nil))
|
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(basePath, nil, nil, rootContext))
|
||||||
|
|
||||||
plug, err := plugMgr.FindPluginByName(plugName)
|
plug, err := plugMgr.FindPluginByName(plugName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -50,10 +50,10 @@ func TestCanSupport(t *testing.T) {
|
|||||||
t.Fatalf("can't make a temp dir: %v", err)
|
t.Fatalf("can't make a temp dir: %v", err)
|
||||||
}
|
}
|
||||||
defer os.RemoveAll(tmpDir)
|
defer os.RemoveAll(tmpDir)
|
||||||
plug := makePluginUnderTest(t, "kubernetes.io/empty-dir", tmpDir)
|
plug := makePluginUnderTest(t, "kubernetes.io/empty-dir", tmpDir, "" /* rootContext */)
|
||||||
|
|
||||||
if plug.Name() != "kubernetes.io/empty-dir" {
|
if plug.GetPluginName() != "kubernetes.io/empty-dir" {
|
||||||
t.Errorf("Wrong name: %s", plug.Name())
|
t.Errorf("Wrong name: %s", plug.GetPluginName())
|
||||||
}
|
}
|
||||||
if !plug.CanSupport(&volume.Spec{Volume: &api.Volume{VolumeSource: api.VolumeSource{EmptyDir: &api.EmptyDirVolumeSource{}}}}) {
|
if !plug.CanSupport(&volume.Spec{Volume: &api.Volume{VolumeSource: api.VolumeSource{EmptyDir: &api.EmptyDirVolumeSource{}}}}) {
|
||||||
t.Errorf("Expected true")
|
t.Errorf("Expected true")
|
||||||
@@ -130,7 +130,7 @@ func doTestPlugin(t *testing.T, config pluginTestConfig) {
|
|||||||
volumePath = path.Join(basePath, "pods/poduid/volumes/kubernetes.io~empty-dir/test-volume")
|
volumePath = path.Join(basePath, "pods/poduid/volumes/kubernetes.io~empty-dir/test-volume")
|
||||||
metadataDir = path.Join(basePath, "pods/poduid/plugins/kubernetes.io~empty-dir/test-volume")
|
metadataDir = path.Join(basePath, "pods/poduid/plugins/kubernetes.io~empty-dir/test-volume")
|
||||||
|
|
||||||
plug = makePluginUnderTest(t, "kubernetes.io/empty-dir", basePath)
|
plug = makePluginUnderTest(t, "kubernetes.io/empty-dir", basePath, config.rootContext)
|
||||||
volumeName = "test-volume"
|
volumeName = "test-volume"
|
||||||
spec = &api.Volume{
|
spec = &api.Volume{
|
||||||
Name: volumeName,
|
Name: volumeName,
|
||||||
@@ -173,7 +173,7 @@ func doTestPlugin(t *testing.T, config pluginTestConfig) {
|
|||||||
pod,
|
pod,
|
||||||
&physicalMounter,
|
&physicalMounter,
|
||||||
&mountDetector,
|
&mountDetector,
|
||||||
volume.VolumeOptions{RootContext: config.rootContext})
|
volume.VolumeOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Failed to make a new Mounter: %v", err)
|
t.Errorf("Failed to make a new Mounter: %v", err)
|
||||||
}
|
}
|
||||||
@@ -258,13 +258,13 @@ func TestPluginBackCompat(t *testing.T) {
|
|||||||
}
|
}
|
||||||
defer os.RemoveAll(basePath)
|
defer os.RemoveAll(basePath)
|
||||||
|
|
||||||
plug := makePluginUnderTest(t, "kubernetes.io/empty-dir", basePath)
|
plug := makePluginUnderTest(t, "kubernetes.io/empty-dir", basePath, "" /* rootContext */)
|
||||||
|
|
||||||
spec := &api.Volume{
|
spec := &api.Volume{
|
||||||
Name: "vol1",
|
Name: "vol1",
|
||||||
}
|
}
|
||||||
pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}}
|
pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}}
|
||||||
mounter, err := plug.NewMounter(volume.NewSpecFromVolume(spec), pod, volume.VolumeOptions{RootContext: ""})
|
mounter, err := plug.NewMounter(volume.NewSpecFromVolume(spec), pod, volume.VolumeOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Failed to make a new Mounter: %v", err)
|
t.Errorf("Failed to make a new Mounter: %v", err)
|
||||||
}
|
}
|
||||||
@@ -287,13 +287,13 @@ func TestMetrics(t *testing.T) {
|
|||||||
}
|
}
|
||||||
defer os.RemoveAll(tmpDir)
|
defer os.RemoveAll(tmpDir)
|
||||||
|
|
||||||
plug := makePluginUnderTest(t, "kubernetes.io/empty-dir", tmpDir)
|
plug := makePluginUnderTest(t, "kubernetes.io/empty-dir", tmpDir, "" /* rootContext */)
|
||||||
|
|
||||||
spec := &api.Volume{
|
spec := &api.Volume{
|
||||||
Name: "vol1",
|
Name: "vol1",
|
||||||
}
|
}
|
||||||
pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}}
|
pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}}
|
||||||
mounter, err := plug.NewMounter(volume.NewSpecFromVolume(spec), pod, volume.VolumeOptions{RootContext: ""})
|
mounter, err := plug.NewMounter(volume.NewSpecFromVolume(spec), pod, volume.VolumeOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Failed to make a new Mounter: %v", err)
|
t.Errorf("Failed to make a new Mounter: %v", err)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -56,11 +56,12 @@ func (plugin *fcPlugin) GetPluginName() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (plugin *fcPlugin) GetVolumeName(spec *volume.Spec) (string, error) {
|
func (plugin *fcPlugin) GetVolumeName(spec *volume.Spec) (string, error) {
|
||||||
volumeSource, _ := getVolumeSource(spec)
|
volumeSource, _, err := getVolumeSource(spec)
|
||||||
if volumeSource == nil {
|
if err != nil {
|
||||||
return "", fmt.Errorf("Spec does not reference a FibreChannel volume type")
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TargetWWNs are the FibreChannel target world wide names
|
||||||
return fmt.Sprintf("%v", volumeSource.TargetWWNs), nil
|
return fmt.Sprintf("%v", volumeSource.TargetWWNs), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -72,6 +73,10 @@ func (plugin *fcPlugin) CanSupport(spec *volume.Spec) bool {
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (plugin *fcPlugin) RequiresRemount() bool {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
func (plugin *fcPlugin) GetAccessModes() []api.PersistentVolumeAccessMode {
|
func (plugin *fcPlugin) GetAccessModes() []api.PersistentVolumeAccessMode {
|
||||||
return []api.PersistentVolumeAccessMode{
|
return []api.PersistentVolumeAccessMode{
|
||||||
api.ReadWriteOnce,
|
api.ReadWriteOnce,
|
||||||
@@ -87,14 +92,9 @@ func (plugin *fcPlugin) NewMounter(spec *volume.Spec, pod *api.Pod, _ volume.Vol
|
|||||||
func (plugin *fcPlugin) newMounterInternal(spec *volume.Spec, podUID types.UID, manager diskManager, mounter mount.Interface) (volume.Mounter, error) {
|
func (plugin *fcPlugin) newMounterInternal(spec *volume.Spec, podUID types.UID, manager diskManager, mounter mount.Interface) (volume.Mounter, error) {
|
||||||
// fc volumes used directly in a pod have a ReadOnly flag set by the pod author.
|
// fc volumes used directly in a pod have a ReadOnly flag set by the pod author.
|
||||||
// fc volumes used as a PersistentVolume gets the ReadOnly flag indirectly through the persistent-claim volume used to mount the PV
|
// fc volumes used as a PersistentVolume gets the ReadOnly flag indirectly through the persistent-claim volume used to mount the PV
|
||||||
var readOnly bool
|
fc, readOnly, err := getVolumeSource(spec)
|
||||||
var fc *api.FCVolumeSource
|
if err != nil {
|
||||||
if spec.Volume != nil && spec.Volume.FC != nil {
|
return nil, err
|
||||||
fc = spec.Volume.FC
|
|
||||||
readOnly = fc.ReadOnly
|
|
||||||
} else {
|
|
||||||
fc = spec.PersistentVolume.Spec.FC
|
|
||||||
readOnly = spec.ReadOnly
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if fc.Lun == nil {
|
if fc.Lun == nil {
|
||||||
@@ -207,17 +207,13 @@ func (c *fcDiskUnmounter) TearDownAt(dir string) error {
|
|||||||
return diskTearDown(c.manager, *c, dir, c.mounter)
|
return diskTearDown(c.manager, *c, dir, c.mounter)
|
||||||
}
|
}
|
||||||
|
|
||||||
func getVolumeSource(spec *volume.Spec) (*api.FCVolumeSource, bool) {
|
func getVolumeSource(spec *volume.Spec) (*api.FCVolumeSource, bool, error) {
|
||||||
var readOnly bool
|
|
||||||
var volumeSource *api.FCVolumeSource
|
|
||||||
|
|
||||||
if spec.Volume != nil && spec.Volume.FC != nil {
|
if spec.Volume != nil && spec.Volume.FC != nil {
|
||||||
volumeSource = spec.Volume.FC
|
return spec.Volume.FC, spec.Volume.FC.ReadOnly, nil
|
||||||
readOnly = volumeSource.ReadOnly
|
} else if spec.PersistentVolume != nil &&
|
||||||
} else {
|
spec.PersistentVolume.Spec.FC != nil {
|
||||||
volumeSource = spec.PersistentVolume.Spec.FC
|
return spec.PersistentVolume.Spec.FC, spec.ReadOnly, nil
|
||||||
readOnly = spec.ReadOnly
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return volumeSource, readOnly
|
return nil, false, fmt.Errorf("Spec does not reference a FibreChannel volume type")
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -38,14 +38,14 @@ func TestCanSupport(t *testing.T) {
|
|||||||
defer os.RemoveAll(tmpDir)
|
defer os.RemoveAll(tmpDir)
|
||||||
|
|
||||||
plugMgr := volume.VolumePluginMgr{}
|
plugMgr := volume.VolumePluginMgr{}
|
||||||
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
|
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, nil, nil, "" /* rootContext */))
|
||||||
|
|
||||||
plug, err := plugMgr.FindPluginByName("kubernetes.io/fc")
|
plug, err := plugMgr.FindPluginByName("kubernetes.io/fc")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Can't find the plugin by name")
|
t.Errorf("Can't find the plugin by name")
|
||||||
}
|
}
|
||||||
if plug.Name() != "kubernetes.io/fc" {
|
if plug.GetPluginName() != "kubernetes.io/fc" {
|
||||||
t.Errorf("Wrong name: %s", plug.Name())
|
t.Errorf("Wrong name: %s", plug.GetPluginName())
|
||||||
}
|
}
|
||||||
if plug.CanSupport(&volume.Spec{Volume: &api.Volume{VolumeSource: api.VolumeSource{}}}) {
|
if plug.CanSupport(&volume.Spec{Volume: &api.Volume{VolumeSource: api.VolumeSource{}}}) {
|
||||||
t.Errorf("Expected false")
|
t.Errorf("Expected false")
|
||||||
@@ -60,7 +60,7 @@ func TestGetAccessModes(t *testing.T) {
|
|||||||
defer os.RemoveAll(tmpDir)
|
defer os.RemoveAll(tmpDir)
|
||||||
|
|
||||||
plugMgr := volume.VolumePluginMgr{}
|
plugMgr := volume.VolumePluginMgr{}
|
||||||
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
|
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, nil, nil, "" /* rootContext */))
|
||||||
|
|
||||||
plug, err := plugMgr.FindPersistentPluginByName("kubernetes.io/fc")
|
plug, err := plugMgr.FindPersistentPluginByName("kubernetes.io/fc")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -131,7 +131,7 @@ func doTestPlugin(t *testing.T, spec *volume.Spec) {
|
|||||||
defer os.RemoveAll(tmpDir)
|
defer os.RemoveAll(tmpDir)
|
||||||
|
|
||||||
plugMgr := volume.VolumePluginMgr{}
|
plugMgr := volume.VolumePluginMgr{}
|
||||||
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
|
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, nil, nil, "" /* rootContext */))
|
||||||
|
|
||||||
plug, err := plugMgr.FindPluginByName("kubernetes.io/fc")
|
plug, err := plugMgr.FindPluginByName("kubernetes.io/fc")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -274,7 +274,7 @@ func TestPersistentClaimReadOnlyFlag(t *testing.T) {
|
|||||||
client := fake.NewSimpleClientset(pv, claim)
|
client := fake.NewSimpleClientset(pv, claim)
|
||||||
|
|
||||||
plugMgr := volume.VolumePluginMgr{}
|
plugMgr := volume.VolumePluginMgr{}
|
||||||
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, client, nil))
|
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, client, nil, "" /* rootContext */))
|
||||||
plug, _ := plugMgr.FindPluginByName(fcPluginName)
|
plug, _ := plugMgr.FindPluginByName(fcPluginName)
|
||||||
|
|
||||||
// readOnly bool is supplied by persistent-claim volume source when its mounter creates other volumes
|
// readOnly bool is supplied by persistent-claim volume source when its mounter creates other volumes
|
||||||
|
|||||||
@@ -78,9 +78,9 @@ func (plugin *flexVolumePlugin) GetPluginName() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (plugin *flexVolumePlugin) GetVolumeName(spec *volume.Spec) (string, error) {
|
func (plugin *flexVolumePlugin) GetVolumeName(spec *volume.Spec) (string, error) {
|
||||||
volumeSource, _ := getVolumeSource(spec)
|
volumeSource, _, err := getVolumeSource(spec)
|
||||||
if volumeSource == nil {
|
if err != nil {
|
||||||
return "", fmt.Errorf("Spec does not reference a Flex volume type")
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
return volumeSource.Driver, nil
|
return volumeSource.Driver, nil
|
||||||
@@ -88,10 +88,14 @@ func (plugin *flexVolumePlugin) GetVolumeName(spec *volume.Spec) (string, error)
|
|||||||
|
|
||||||
// CanSupport checks whether the plugin can support the input volume spec.
|
// CanSupport checks whether the plugin can support the input volume spec.
|
||||||
func (plugin *flexVolumePlugin) CanSupport(spec *volume.Spec) bool {
|
func (plugin *flexVolumePlugin) CanSupport(spec *volume.Spec) bool {
|
||||||
source := plugin.getVolumeSource(spec)
|
source, _, _ := getVolumeSource(spec)
|
||||||
return (source != nil) && (source.Driver == plugin.driverName)
|
return (source != nil) && (source.Driver == plugin.driverName)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (plugin *flexVolumePlugin) RequiresRemount() bool {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
// GetAccessModes gets the allowed access modes for this plugin.
|
// GetAccessModes gets the allowed access modes for this plugin.
|
||||||
func (plugin *flexVolumePlugin) GetAccessModes() []api.PersistentVolumeAccessMode {
|
func (plugin *flexVolumePlugin) GetAccessModes() []api.PersistentVolumeAccessMode {
|
||||||
return []api.PersistentVolumeAccessMode{
|
return []api.PersistentVolumeAccessMode{
|
||||||
@@ -100,19 +104,12 @@ func (plugin *flexVolumePlugin) GetAccessModes() []api.PersistentVolumeAccessMod
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (plugin *flexVolumePlugin) getVolumeSource(spec *volume.Spec) *api.FlexVolumeSource {
|
|
||||||
var source *api.FlexVolumeSource
|
|
||||||
if spec.Volume != nil && spec.Volume.FlexVolume != nil {
|
|
||||||
source = spec.Volume.FlexVolume
|
|
||||||
} else if spec.PersistentVolume != nil && spec.PersistentVolume.Spec.FlexVolume != nil {
|
|
||||||
source = spec.PersistentVolume.Spec.FlexVolume
|
|
||||||
}
|
|
||||||
return source
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewMounter is the mounter routine to build the volume.
|
// NewMounter is the mounter routine to build the volume.
|
||||||
func (plugin *flexVolumePlugin) NewMounter(spec *volume.Spec, pod *api.Pod, _ volume.VolumeOptions) (volume.Mounter, error) {
|
func (plugin *flexVolumePlugin) NewMounter(spec *volume.Spec, pod *api.Pod, _ volume.VolumeOptions) (volume.Mounter, error) {
|
||||||
fv := plugin.getVolumeSource(spec)
|
fv, _, err := getVolumeSource(spec)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
secrets := make(map[string]string)
|
secrets := make(map[string]string)
|
||||||
if fv.SecretRef != nil {
|
if fv.SecretRef != nil {
|
||||||
kubeClient := plugin.host.GetKubeClient()
|
kubeClient := plugin.host.GetKubeClient()
|
||||||
@@ -135,7 +132,11 @@ func (plugin *flexVolumePlugin) NewMounter(spec *volume.Spec, pod *api.Pod, _ vo
|
|||||||
|
|
||||||
// newMounterInternal is the internal mounter routine to build the volume.
|
// newMounterInternal is the internal mounter routine to build the volume.
|
||||||
func (plugin *flexVolumePlugin) newMounterInternal(spec *volume.Spec, pod *api.Pod, manager flexVolumeManager, mounter mount.Interface, runner exec.Interface, secrets map[string]string) (volume.Mounter, error) {
|
func (plugin *flexVolumePlugin) newMounterInternal(spec *volume.Spec, pod *api.Pod, manager flexVolumeManager, mounter mount.Interface, runner exec.Interface, secrets map[string]string) (volume.Mounter, error) {
|
||||||
source := plugin.getVolumeSource(spec)
|
source, _, err := getVolumeSource(spec)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
return &flexVolumeMounter{
|
return &flexVolumeMounter{
|
||||||
flexVolumeDisk: &flexVolumeDisk{
|
flexVolumeDisk: &flexVolumeDisk{
|
||||||
podUID: pod.UID,
|
podUID: pod.UID,
|
||||||
@@ -396,17 +397,13 @@ func (f *flexVolumeUnmounter) TearDownAt(dir string) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func getVolumeSource(spec *volume.Spec) (*api.FlexVolumeSource, bool) {
|
func getVolumeSource(spec *volume.Spec) (*api.FlexVolumeSource, bool, error) {
|
||||||
var readOnly bool
|
|
||||||
var volumeSource *api.FlexVolumeSource
|
|
||||||
|
|
||||||
if spec.Volume != nil && spec.Volume.FlexVolume != nil {
|
if spec.Volume != nil && spec.Volume.FlexVolume != nil {
|
||||||
volumeSource = spec.Volume.FlexVolume
|
return spec.Volume.FlexVolume, spec.Volume.FlexVolume.ReadOnly, nil
|
||||||
readOnly = volumeSource.ReadOnly
|
} else if spec.PersistentVolume != nil &&
|
||||||
} else {
|
spec.PersistentVolume.Spec.FlexVolume != nil {
|
||||||
volumeSource = spec.PersistentVolume.Spec.FlexVolume
|
return spec.PersistentVolume.Spec.FlexVolume, spec.ReadOnly, nil
|
||||||
readOnly = spec.ReadOnly
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return volumeSource, readOnly
|
return nil, false, fmt.Errorf("Spec does not reference a Flex volume type")
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -182,13 +182,13 @@ func TestCanSupport(t *testing.T) {
|
|||||||
|
|
||||||
plugMgr := volume.VolumePluginMgr{}
|
plugMgr := volume.VolumePluginMgr{}
|
||||||
installPluginUnderTest(t, "kubernetes.io", "fakeAttacher", tmpDir, execScriptTempl1, nil)
|
installPluginUnderTest(t, "kubernetes.io", "fakeAttacher", tmpDir, execScriptTempl1, nil)
|
||||||
plugMgr.InitPlugins(ProbeVolumePlugins(tmpDir), volumetest.NewFakeVolumeHost("fake", nil, nil))
|
plugMgr.InitPlugins(ProbeVolumePlugins(tmpDir), volumetest.NewFakeVolumeHost("fake", nil, nil, "" /* rootContext */))
|
||||||
plugin, err := plugMgr.FindPluginByName("kubernetes.io/fakeAttacher")
|
plugin, err := plugMgr.FindPluginByName("kubernetes.io/fakeAttacher")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Can't find the plugin by name")
|
t.Errorf("Can't find the plugin by name")
|
||||||
}
|
}
|
||||||
if plugin.Name() != "kubernetes.io/fakeAttacher" {
|
if plugin.GetPluginName() != "kubernetes.io/fakeAttacher" {
|
||||||
t.Errorf("Wrong name: %s", plugin.Name())
|
t.Errorf("Wrong name: %s", plugin.GetPluginName())
|
||||||
}
|
}
|
||||||
if !plugin.CanSupport(&volume.Spec{Volume: &api.Volume{VolumeSource: api.VolumeSource{FlexVolume: &api.FlexVolumeSource{Driver: "kubernetes.io/fakeAttacher"}}}}) {
|
if !plugin.CanSupport(&volume.Spec{Volume: &api.Volume{VolumeSource: api.VolumeSource{FlexVolume: &api.FlexVolumeSource{Driver: "kubernetes.io/fakeAttacher"}}}}) {
|
||||||
t.Errorf("Expected true")
|
t.Errorf("Expected true")
|
||||||
@@ -210,7 +210,7 @@ func TestGetAccessModes(t *testing.T) {
|
|||||||
|
|
||||||
plugMgr := volume.VolumePluginMgr{}
|
plugMgr := volume.VolumePluginMgr{}
|
||||||
installPluginUnderTest(t, "kubernetes.io", "fakeAttacher", tmpDir, execScriptTempl1, nil)
|
installPluginUnderTest(t, "kubernetes.io", "fakeAttacher", tmpDir, execScriptTempl1, nil)
|
||||||
plugMgr.InitPlugins(ProbeVolumePlugins(tmpDir), volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
|
plugMgr.InitPlugins(ProbeVolumePlugins(tmpDir), volumetest.NewFakeVolumeHost(tmpDir, nil, nil, "" /* rootContext */))
|
||||||
|
|
||||||
plugin, err := plugMgr.FindPersistentPluginByName("kubernetes.io/fakeAttacher")
|
plugin, err := plugMgr.FindPersistentPluginByName("kubernetes.io/fakeAttacher")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -233,7 +233,7 @@ func contains(modes []api.PersistentVolumeAccessMode, mode api.PersistentVolumeA
|
|||||||
func doTestPluginAttachDetach(t *testing.T, spec *volume.Spec, tmpDir string) {
|
func doTestPluginAttachDetach(t *testing.T, spec *volume.Spec, tmpDir string) {
|
||||||
plugMgr := volume.VolumePluginMgr{}
|
plugMgr := volume.VolumePluginMgr{}
|
||||||
installPluginUnderTest(t, "kubernetes.io", "fakeAttacher", tmpDir, execScriptTempl1, nil)
|
installPluginUnderTest(t, "kubernetes.io", "fakeAttacher", tmpDir, execScriptTempl1, nil)
|
||||||
plugMgr.InitPlugins(ProbeVolumePlugins(tmpDir), volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
|
plugMgr.InitPlugins(ProbeVolumePlugins(tmpDir), volumetest.NewFakeVolumeHost(tmpDir, nil, nil, "" /* rootContext */))
|
||||||
plugin, err := plugMgr.FindPluginByName("kubernetes.io/fakeAttacher")
|
plugin, err := plugMgr.FindPluginByName("kubernetes.io/fakeAttacher")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Can't find the plugin by name")
|
t.Errorf("Can't find the plugin by name")
|
||||||
@@ -314,7 +314,7 @@ func doTestPluginMountUnmount(t *testing.T, spec *volume.Spec, tmpDir string) {
|
|||||||
|
|
||||||
plugMgr := volume.VolumePluginMgr{}
|
plugMgr := volume.VolumePluginMgr{}
|
||||||
installPluginUnderTest(t, "kubernetes.io", "fakeMounter", tmpDir, execScriptTempl2, nil)
|
installPluginUnderTest(t, "kubernetes.io", "fakeMounter", tmpDir, execScriptTempl2, nil)
|
||||||
plugMgr.InitPlugins(ProbeVolumePlugins(tmpDir), volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
|
plugMgr.InitPlugins(ProbeVolumePlugins(tmpDir), volumetest.NewFakeVolumeHost(tmpDir, nil, nil, "" /* rootContext */))
|
||||||
plugin, err := plugMgr.FindPluginByName("kubernetes.io/fakeMounter")
|
plugin, err := plugMgr.FindPluginByName("kubernetes.io/fakeMounter")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Can't find the plugin by name")
|
t.Errorf("Can't find the plugin by name")
|
||||||
|
|||||||
@@ -71,9 +71,9 @@ func (p *flockerPlugin) GetPluginName() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (p *flockerPlugin) GetVolumeName(spec *volume.Spec) (string, error) {
|
func (p *flockerPlugin) GetVolumeName(spec *volume.Spec) (string, error) {
|
||||||
volumeSource, _ := getVolumeSource(spec)
|
volumeSource, _, err := getVolumeSource(spec)
|
||||||
if volumeSource == nil {
|
if err != nil {
|
||||||
return "", fmt.Errorf("Spec does not reference a Flocker volume type")
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
return volumeSource.DatasetName, nil
|
return volumeSource.DatasetName, nil
|
||||||
@@ -84,6 +84,10 @@ func (p *flockerPlugin) CanSupport(spec *volume.Spec) bool {
|
|||||||
(spec.Volume != nil && spec.Volume.Flocker != nil)
|
(spec.Volume != nil && spec.Volume.Flocker != nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (p *flockerPlugin) RequiresRemount() bool {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
func (p *flockerPlugin) getFlockerVolumeSource(spec *volume.Spec) (*api.FlockerVolumeSource, bool) {
|
func (p *flockerPlugin) getFlockerVolumeSource(spec *volume.Spec) (*api.FlockerVolumeSource, bool) {
|
||||||
// AFAIK this will always be r/w, but perhaps for the future it will be needed
|
// AFAIK this will always be r/w, but perhaps for the future it will be needed
|
||||||
readOnly := false
|
readOnly := false
|
||||||
@@ -152,7 +156,12 @@ func (b flockerMounter) newFlockerClient() (*flockerclient.Client, error) {
|
|||||||
keyPath := env.GetEnvAsStringOrFallback("FLOCKER_CONTROL_SERVICE_CLIENT_KEY_FILE", defaultClientKeyFile)
|
keyPath := env.GetEnvAsStringOrFallback("FLOCKER_CONTROL_SERVICE_CLIENT_KEY_FILE", defaultClientKeyFile)
|
||||||
certPath := env.GetEnvAsStringOrFallback("FLOCKER_CONTROL_SERVICE_CLIENT_CERT_FILE", defaultClientCertFile)
|
certPath := env.GetEnvAsStringOrFallback("FLOCKER_CONTROL_SERVICE_CLIENT_CERT_FILE", defaultClientCertFile)
|
||||||
|
|
||||||
c, err := flockerclient.NewClient(host, port, b.flocker.pod.Status.HostIP, caCertPath, keyPath, certPath)
|
hostIP, err := b.plugin.host.GetHostIP()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
c, err := flockerclient.NewClient(host, port, hostIP.String(), caCertPath, keyPath, certPath)
|
||||||
return c, err
|
return c, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -251,17 +260,13 @@ func (b flockerMounter) updateDatasetPrimary(datasetID, primaryUUID string) erro
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func getVolumeSource(spec *volume.Spec) (*api.FlockerVolumeSource, bool) {
|
func getVolumeSource(spec *volume.Spec) (*api.FlockerVolumeSource, bool, error) {
|
||||||
var readOnly bool
|
|
||||||
var volumeSource *api.FlockerVolumeSource
|
|
||||||
|
|
||||||
if spec.Volume != nil && spec.Volume.Flocker != nil {
|
if spec.Volume != nil && spec.Volume.Flocker != nil {
|
||||||
volumeSource = spec.Volume.Flocker
|
return spec.Volume.Flocker, spec.ReadOnly, nil
|
||||||
readOnly = spec.ReadOnly
|
} else if spec.PersistentVolume != nil &&
|
||||||
} else {
|
spec.PersistentVolume.Spec.Flocker != nil {
|
||||||
volumeSource = spec.PersistentVolume.Spec.Flocker
|
return spec.PersistentVolume.Spec.Flocker, spec.ReadOnly, nil
|
||||||
readOnly = spec.ReadOnly
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return volumeSource, readOnly
|
return nil, false, fmt.Errorf("Spec does not reference a Flocker volume type")
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -35,7 +35,7 @@ func newInitializedVolumePlugMgr(t *testing.T) (*volume.VolumePluginMgr, string)
|
|||||||
plugMgr := &volume.VolumePluginMgr{}
|
plugMgr := &volume.VolumePluginMgr{}
|
||||||
dir, err := utiltesting.MkTmpdir("flocker")
|
dir, err := utiltesting.MkTmpdir("flocker")
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(dir, nil, nil))
|
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(dir, nil, nil, "" /* rootContext */))
|
||||||
return plugMgr, dir
|
return plugMgr, dir
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -61,7 +61,11 @@ func (plugin *gcePersistentDiskPlugin) NewAttacher() (volume.Attacher, error) {
|
|||||||
// Callers are responsible for thread safety between concurrent attach and
|
// Callers are responsible for thread safety between concurrent attach and
|
||||||
// detach operations.
|
// detach operations.
|
||||||
func (attacher *gcePersistentDiskAttacher) Attach(spec *volume.Spec, hostName string) error {
|
func (attacher *gcePersistentDiskAttacher) Attach(spec *volume.Spec, hostName string) error {
|
||||||
volumeSource, readOnly := getVolumeSource(spec)
|
volumeSource, readOnly, err := getVolumeSource(spec)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
pdName := volumeSource.PDName
|
pdName := volumeSource.PDName
|
||||||
|
|
||||||
attached, err := attacher.gceDisks.DiskIsAttached(pdName, hostName)
|
attached, err := attacher.gceDisks.DiskIsAttached(pdName, hostName)
|
||||||
@@ -92,7 +96,11 @@ func (attacher *gcePersistentDiskAttacher) WaitForAttach(spec *volume.Spec, time
|
|||||||
timer := time.NewTimer(timeout)
|
timer := time.NewTimer(timeout)
|
||||||
defer timer.Stop()
|
defer timer.Stop()
|
||||||
|
|
||||||
volumeSource, _ := getVolumeSource(spec)
|
volumeSource, _, err := getVolumeSource(spec)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
pdName := volumeSource.PDName
|
pdName := volumeSource.PDName
|
||||||
partition := ""
|
partition := ""
|
||||||
if volumeSource.Partition != 0 {
|
if volumeSource.Partition != 0 {
|
||||||
@@ -125,13 +133,19 @@ func (attacher *gcePersistentDiskAttacher) WaitForAttach(spec *volume.Spec, time
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (attacher *gcePersistentDiskAttacher) GetDeviceMountPath(spec *volume.Spec) string {
|
func (attacher *gcePersistentDiskAttacher) GetDeviceMountPath(
|
||||||
volumeSource, _ := getVolumeSource(spec)
|
spec *volume.Spec) (string, error) {
|
||||||
return makeGlobalPDName(attacher.host, volumeSource.PDName)
|
volumeSource, _, err := getVolumeSource(spec)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
return makeGlobalPDName(attacher.host, volumeSource.PDName), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (attacher *gcePersistentDiskAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMountPath string, mounter mount.Interface) error {
|
func (attacher *gcePersistentDiskAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMountPath string) error {
|
||||||
// Only mount the PD globally once.
|
// Only mount the PD globally once.
|
||||||
|
mounter := attacher.host.GetMounter()
|
||||||
notMnt, err := mounter.IsLikelyNotMountPoint(deviceMountPath)
|
notMnt, err := mounter.IsLikelyNotMountPoint(deviceMountPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if os.IsNotExist(err) {
|
if os.IsNotExist(err) {
|
||||||
@@ -144,7 +158,10 @@ func (attacher *gcePersistentDiskAttacher) MountDevice(spec *volume.Spec, device
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
volumeSource, readOnly := getVolumeSource(spec)
|
volumeSource, readOnly, err := getVolumeSource(spec)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
options := []string{}
|
options := []string{}
|
||||||
if readOnly {
|
if readOnly {
|
||||||
@@ -163,6 +180,7 @@ func (attacher *gcePersistentDiskAttacher) MountDevice(spec *volume.Spec, device
|
|||||||
}
|
}
|
||||||
|
|
||||||
type gcePersistentDiskDetacher struct {
|
type gcePersistentDiskDetacher struct {
|
||||||
|
host volume.VolumeHost
|
||||||
gceDisks gce.Disks
|
gceDisks gce.Disks
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -175,6 +193,7 @@ func (plugin *gcePersistentDiskPlugin) NewDetacher() (volume.Detacher, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
return &gcePersistentDiskDetacher{
|
return &gcePersistentDiskDetacher{
|
||||||
|
host: plugin.host,
|
||||||
gceDisks: gceCloud,
|
gceDisks: gceCloud,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
@@ -232,6 +251,6 @@ func (detacher *gcePersistentDiskDetacher) WaitForDetach(devicePath string, time
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (detacher *gcePersistentDiskDetacher) UnmountDevice(deviceMountPath string, mounter mount.Interface) error {
|
func (detacher *gcePersistentDiskDetacher) UnmountDevice(deviceMountPath string) error {
|
||||||
return unmountPDAndRemoveGlobalPath(deviceMountPath, mounter)
|
return unmountPDAndRemoveGlobalPath(deviceMountPath, detacher.host.GetMounter())
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -32,7 +32,7 @@ func TestGetDeviceName_Volume(t *testing.T) {
|
|||||||
name := "my-pd-volume"
|
name := "my-pd-volume"
|
||||||
spec := createVSpec(name, false)
|
spec := createVSpec(name, false)
|
||||||
|
|
||||||
deviceName, err := plugin.GetDeviceName(spec)
|
deviceName, err := plugin.GetVolumeName(spec)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("GetDeviceName error: %v", err)
|
t.Errorf("GetDeviceName error: %v", err)
|
||||||
}
|
}
|
||||||
@@ -46,7 +46,7 @@ func TestGetDeviceName_PersistentVolume(t *testing.T) {
|
|||||||
name := "my-pd-pv"
|
name := "my-pd-pv"
|
||||||
spec := createPVSpec(name, true)
|
spec := createPVSpec(name, true)
|
||||||
|
|
||||||
deviceName, err := plugin.GetDeviceName(spec)
|
deviceName, err := plugin.GetVolumeName(spec)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("GetDeviceName error: %v", err)
|
t.Errorf("GetDeviceName error: %v", err)
|
||||||
}
|
}
|
||||||
@@ -181,7 +181,11 @@ func TestAttachDetach(t *testing.T) {
|
|||||||
// newPlugin creates a new gcePersistentDiskPlugin with fake cloud, NewAttacher
|
// newPlugin creates a new gcePersistentDiskPlugin with fake cloud, NewAttacher
|
||||||
// and NewDetacher won't work.
|
// and NewDetacher won't work.
|
||||||
func newPlugin() *gcePersistentDiskPlugin {
|
func newPlugin() *gcePersistentDiskPlugin {
|
||||||
host := volumetest.NewFakeVolumeHost("/tmp", nil, nil)
|
host := volumetest.NewFakeVolumeHost(
|
||||||
|
"/tmp", /* rootDir */
|
||||||
|
nil, /* kubeClient */
|
||||||
|
nil, /* plugins */
|
||||||
|
"" /* rootContext */)
|
||||||
plugins := ProbeVolumePlugins()
|
plugins := ProbeVolumePlugins()
|
||||||
plugin := plugins[0]
|
plugin := plugins[0]
|
||||||
plugin.Init(host)
|
plugin.Init(host)
|
||||||
|
|||||||
@@ -63,9 +63,9 @@ func (plugin *gcePersistentDiskPlugin) GetPluginName() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (plugin *gcePersistentDiskPlugin) GetVolumeName(spec *volume.Spec) (string, error) {
|
func (plugin *gcePersistentDiskPlugin) GetVolumeName(spec *volume.Spec) (string, error) {
|
||||||
volumeSource, _ := getVolumeSource(spec)
|
volumeSource, _, err := getVolumeSource(spec)
|
||||||
if volumeSource == nil {
|
if err != nil {
|
||||||
return "", fmt.Errorf("Spec does not reference a GCE volume type")
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
return volumeSource.PDName, nil
|
return volumeSource.PDName, nil
|
||||||
@@ -76,6 +76,10 @@ func (plugin *gcePersistentDiskPlugin) CanSupport(spec *volume.Spec) bool {
|
|||||||
(spec.Volume != nil && spec.Volume.GCEPersistentDisk != nil)
|
(spec.Volume != nil && spec.Volume.GCEPersistentDisk != nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (plugin *gcePersistentDiskPlugin) RequiresRemount() bool {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
func (plugin *gcePersistentDiskPlugin) GetAccessModes() []api.PersistentVolumeAccessMode {
|
func (plugin *gcePersistentDiskPlugin) GetAccessModes() []api.PersistentVolumeAccessMode {
|
||||||
return []api.PersistentVolumeAccessMode{
|
return []api.PersistentVolumeAccessMode{
|
||||||
api.ReadWriteOnce,
|
api.ReadWriteOnce,
|
||||||
@@ -88,26 +92,35 @@ func (plugin *gcePersistentDiskPlugin) NewMounter(spec *volume.Spec, pod *api.Po
|
|||||||
return plugin.newMounterInternal(spec, pod.UID, &GCEDiskUtil{}, plugin.host.GetMounter())
|
return plugin.newMounterInternal(spec, pod.UID, &GCEDiskUtil{}, plugin.host.GetMounter())
|
||||||
}
|
}
|
||||||
|
|
||||||
func getVolumeSource(spec *volume.Spec) (*api.GCEPersistentDiskVolumeSource, bool) {
|
func getVolumeSource(
|
||||||
var readOnly bool
|
spec *volume.Spec) (*api.GCEPersistentDiskVolumeSource, bool, error) {
|
||||||
var volumeSource *api.GCEPersistentDiskVolumeSource
|
|
||||||
|
|
||||||
if spec.Volume != nil && spec.Volume.GCEPersistentDisk != nil {
|
if spec.Volume != nil && spec.Volume.GCEPersistentDisk != nil {
|
||||||
volumeSource = spec.Volume.GCEPersistentDisk
|
glog.V(4).Infof(
|
||||||
readOnly = volumeSource.ReadOnly
|
"volume source %v spec %v, readonly flag retrieved from source: %v",
|
||||||
glog.V(4).Infof("volume source %v spec %v, readonly flag retrieved from source: %v", volumeSource.PDName, spec.Name(), readOnly)
|
spec.Volume.GCEPersistentDisk.PDName,
|
||||||
} else {
|
spec.Name(),
|
||||||
volumeSource = spec.PersistentVolume.Spec.GCEPersistentDisk
|
spec.Volume.GCEPersistentDisk.ReadOnly)
|
||||||
readOnly = spec.ReadOnly
|
return spec.Volume.GCEPersistentDisk, spec.Volume.GCEPersistentDisk.ReadOnly, nil
|
||||||
glog.V(4).Infof("volume source %v spec %v, readonly flag retrieved from spec: %v", volumeSource.PDName, spec.Name(), readOnly)
|
} else if spec.PersistentVolume != nil &&
|
||||||
|
spec.PersistentVolume.Spec.GCEPersistentDisk != nil {
|
||||||
|
glog.V(4).Infof(
|
||||||
|
"volume source %v spec %v, readonly flag retrieved from spec: %v",
|
||||||
|
spec.PersistentVolume.Spec.GCEPersistentDisk.PDName,
|
||||||
|
spec.Name(),
|
||||||
|
spec.ReadOnly)
|
||||||
|
return spec.PersistentVolume.Spec.GCEPersistentDisk, spec.ReadOnly, nil
|
||||||
}
|
}
|
||||||
return volumeSource, readOnly
|
|
||||||
|
return nil, false, fmt.Errorf("Spec does not reference a GCE volume type")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (plugin *gcePersistentDiskPlugin) newMounterInternal(spec *volume.Spec, podUID types.UID, manager pdManager, mounter mount.Interface) (volume.Mounter, error) {
|
func (plugin *gcePersistentDiskPlugin) newMounterInternal(spec *volume.Spec, podUID types.UID, manager pdManager, mounter mount.Interface) (volume.Mounter, error) {
|
||||||
// GCEPDs used directly in a pod have a ReadOnly flag set by the pod author.
|
// GCEPDs used directly in a pod have a ReadOnly flag set by the pod author.
|
||||||
// GCEPDs used as a PersistentVolume gets the ReadOnly flag indirectly through the persistent-claim volume used to mount the PV
|
// GCEPDs used as a PersistentVolume gets the ReadOnly flag indirectly through the persistent-claim volume used to mount the PV
|
||||||
volumeSource, readOnly := getVolumeSource(spec)
|
volumeSource, readOnly, err := getVolumeSource(spec)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
pdName := volumeSource.PDName
|
pdName := volumeSource.PDName
|
||||||
partition := ""
|
partition := ""
|
||||||
|
|||||||
@@ -39,14 +39,14 @@ func TestCanSupport(t *testing.T) {
|
|||||||
}
|
}
|
||||||
defer os.RemoveAll(tmpDir)
|
defer os.RemoveAll(tmpDir)
|
||||||
plugMgr := volume.VolumePluginMgr{}
|
plugMgr := volume.VolumePluginMgr{}
|
||||||
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
|
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, nil, nil, "" /* rootContext */))
|
||||||
|
|
||||||
plug, err := plugMgr.FindPluginByName("kubernetes.io/gce-pd")
|
plug, err := plugMgr.FindPluginByName("kubernetes.io/gce-pd")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Can't find the plugin by name")
|
t.Errorf("Can't find the plugin by name")
|
||||||
}
|
}
|
||||||
if plug.Name() != "kubernetes.io/gce-pd" {
|
if plug.GetPluginName() != "kubernetes.io/gce-pd" {
|
||||||
t.Errorf("Wrong name: %s", plug.Name())
|
t.Errorf("Wrong name: %s", plug.GetPluginName())
|
||||||
}
|
}
|
||||||
if !plug.CanSupport(&volume.Spec{Volume: &api.Volume{VolumeSource: api.VolumeSource{GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{}}}}) {
|
if !plug.CanSupport(&volume.Spec{Volume: &api.Volume{VolumeSource: api.VolumeSource{GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{}}}}) {
|
||||||
t.Errorf("Expected true")
|
t.Errorf("Expected true")
|
||||||
@@ -63,7 +63,7 @@ func TestGetAccessModes(t *testing.T) {
|
|||||||
}
|
}
|
||||||
defer os.RemoveAll(tmpDir)
|
defer os.RemoveAll(tmpDir)
|
||||||
plugMgr := volume.VolumePluginMgr{}
|
plugMgr := volume.VolumePluginMgr{}
|
||||||
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
|
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, nil, nil, "" /* rootContext */))
|
||||||
|
|
||||||
plug, err := plugMgr.FindPersistentPluginByName("kubernetes.io/gce-pd")
|
plug, err := plugMgr.FindPersistentPluginByName("kubernetes.io/gce-pd")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -106,7 +106,7 @@ func TestPlugin(t *testing.T) {
|
|||||||
}
|
}
|
||||||
defer os.RemoveAll(tmpDir)
|
defer os.RemoveAll(tmpDir)
|
||||||
plugMgr := volume.VolumePluginMgr{}
|
plugMgr := volume.VolumePluginMgr{}
|
||||||
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
|
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, nil, nil, "" /* rootContext */))
|
||||||
|
|
||||||
plug, err := plugMgr.FindPluginByName("kubernetes.io/gce-pd")
|
plug, err := plugMgr.FindPluginByName("kubernetes.io/gce-pd")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -248,7 +248,7 @@ func TestPersistentClaimReadOnlyFlag(t *testing.T) {
|
|||||||
}
|
}
|
||||||
defer os.RemoveAll(tmpDir)
|
defer os.RemoveAll(tmpDir)
|
||||||
plugMgr := volume.VolumePluginMgr{}
|
plugMgr := volume.VolumePluginMgr{}
|
||||||
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, client, nil))
|
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, client, nil, "" /* rootContext */))
|
||||||
plug, _ := plugMgr.FindPluginByName(gcePersistentDiskPluginName)
|
plug, _ := plugMgr.FindPluginByName(gcePersistentDiskPluginName)
|
||||||
|
|
||||||
// readOnly bool is supplied by persistent-claim volume source when its mounter creates other volumes
|
// readOnly bool is supplied by persistent-claim volume source when its mounter creates other volumes
|
||||||
|
|||||||
@@ -75,6 +75,10 @@ func (plugin *gitRepoPlugin) CanSupport(spec *volume.Spec) bool {
|
|||||||
return spec.Volume != nil && spec.Volume.GitRepo != nil
|
return spec.Volume != nil && spec.Volume.GitRepo != nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (plugin *gitRepoPlugin) RequiresRemount() bool {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
func (plugin *gitRepoPlugin) NewMounter(spec *volume.Spec, pod *api.Pod, opts volume.VolumeOptions) (volume.Mounter, error) {
|
func (plugin *gitRepoPlugin) NewMounter(spec *volume.Spec, pod *api.Pod, opts volume.VolumeOptions) (volume.Mounter, error) {
|
||||||
return &gitRepoVolumeMounter{
|
return &gitRepoVolumeMounter{
|
||||||
gitRepoVolume: &gitRepoVolume{
|
gitRepoVolume: &gitRepoVolume{
|
||||||
|
|||||||
@@ -38,7 +38,7 @@ func newTestHost(t *testing.T) (string, volume.VolumeHost) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("can't make a temp rootdir: %v", err)
|
t.Fatalf("can't make a temp rootdir: %v", err)
|
||||||
}
|
}
|
||||||
return tempDir, volumetest.NewFakeVolumeHost(tempDir, nil, empty_dir.ProbeVolumePlugins())
|
return tempDir, volumetest.NewFakeVolumeHost(tempDir, nil, empty_dir.ProbeVolumePlugins(), "" /* rootContext */)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestCanSupport(t *testing.T) {
|
func TestCanSupport(t *testing.T) {
|
||||||
@@ -50,8 +50,8 @@ func TestCanSupport(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Can't find the plugin by name")
|
t.Errorf("Can't find the plugin by name")
|
||||||
}
|
}
|
||||||
if plug.Name() != "kubernetes.io/git-repo" {
|
if plug.GetPluginName() != "kubernetes.io/git-repo" {
|
||||||
t.Errorf("Wrong name: %s", plug.Name())
|
t.Errorf("Wrong name: %s", plug.GetPluginName())
|
||||||
}
|
}
|
||||||
if !plug.CanSupport(&volume.Spec{Volume: &api.Volume{VolumeSource: api.VolumeSource{GitRepo: &api.GitRepoVolumeSource{}}}}) {
|
if !plug.CanSupport(&volume.Spec{Volume: &api.Volume{VolumeSource: api.VolumeSource{GitRepo: &api.GitRepoVolumeSource{}}}}) {
|
||||||
t.Errorf("Expected true")
|
t.Errorf("Expected true")
|
||||||
@@ -230,7 +230,7 @@ func doTestPlugin(scenario struct {
|
|||||||
return allErrs
|
return allErrs
|
||||||
}
|
}
|
||||||
pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}}
|
pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}}
|
||||||
mounter, err := plug.NewMounter(volume.NewSpecFromVolume(scenario.vol), pod, volume.VolumeOptions{RootContext: ""})
|
mounter, err := plug.NewMounter(volume.NewSpecFromVolume(scenario.vol), pod, volume.VolumeOptions{})
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
allErrs = append(allErrs,
|
allErrs = append(allErrs,
|
||||||
|
|||||||
@@ -57,9 +57,9 @@ func (plugin *glusterfsPlugin) GetPluginName() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (plugin *glusterfsPlugin) GetVolumeName(spec *volume.Spec) (string, error) {
|
func (plugin *glusterfsPlugin) GetVolumeName(spec *volume.Spec) (string, error) {
|
||||||
volumeSource, _ := getVolumeSource(spec)
|
volumeSource, _, err := getVolumeSource(spec)
|
||||||
if volumeSource == nil {
|
if err != nil {
|
||||||
return "", fmt.Errorf("Spec does not reference a Gluster volume type")
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
return fmt.Sprintf(
|
return fmt.Sprintf(
|
||||||
@@ -75,7 +75,10 @@ func (plugin *glusterfsPlugin) CanSupport(spec *volume.Spec) bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
return true
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (plugin *glusterfsPlugin) RequiresRemount() bool {
|
||||||
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
func (plugin *glusterfsPlugin) GetAccessModes() []api.PersistentVolumeAccessMode {
|
func (plugin *glusterfsPlugin) GetAccessModes() []api.PersistentVolumeAccessMode {
|
||||||
@@ -288,17 +291,14 @@ func (b *glusterfsMounter) setUpAtInternal(dir string) error {
|
|||||||
return fmt.Errorf("glusterfs: mount failed: %v", errs)
|
return fmt.Errorf("glusterfs: mount failed: %v", errs)
|
||||||
}
|
}
|
||||||
|
|
||||||
func getVolumeSource(spec *volume.Spec) (*api.GlusterfsVolumeSource, bool) {
|
func getVolumeSource(
|
||||||
var readOnly bool
|
spec *volume.Spec) (*api.GlusterfsVolumeSource, bool, error) {
|
||||||
var volumeSource *api.GlusterfsVolumeSource
|
|
||||||
|
|
||||||
if spec.Volume != nil && spec.Volume.Glusterfs != nil {
|
if spec.Volume != nil && spec.Volume.Glusterfs != nil {
|
||||||
volumeSource = spec.Volume.Glusterfs
|
return spec.Volume.Glusterfs, spec.Volume.Glusterfs.ReadOnly, nil
|
||||||
readOnly = volumeSource.ReadOnly
|
} else if spec.PersistentVolume != nil &&
|
||||||
} else {
|
spec.PersistentVolume.Spec.Glusterfs != nil {
|
||||||
volumeSource = spec.PersistentVolume.Spec.Glusterfs
|
return spec.PersistentVolume.Spec.Glusterfs, spec.ReadOnly, nil
|
||||||
readOnly = spec.ReadOnly
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return volumeSource, readOnly
|
return nil, false, fmt.Errorf("Spec does not reference a Gluster volume type")
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -39,13 +39,13 @@ func TestCanSupport(t *testing.T) {
|
|||||||
defer os.RemoveAll(tmpDir)
|
defer os.RemoveAll(tmpDir)
|
||||||
|
|
||||||
plugMgr := volume.VolumePluginMgr{}
|
plugMgr := volume.VolumePluginMgr{}
|
||||||
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
|
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, nil, nil, "" /* rootContext */))
|
||||||
plug, err := plugMgr.FindPluginByName("kubernetes.io/glusterfs")
|
plug, err := plugMgr.FindPluginByName("kubernetes.io/glusterfs")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Can't find the plugin by name")
|
t.Errorf("Can't find the plugin by name")
|
||||||
}
|
}
|
||||||
if plug.Name() != "kubernetes.io/glusterfs" {
|
if plug.GetPluginName() != "kubernetes.io/glusterfs" {
|
||||||
t.Errorf("Wrong name: %s", plug.Name())
|
t.Errorf("Wrong name: %s", plug.GetPluginName())
|
||||||
}
|
}
|
||||||
if plug.CanSupport(&volume.Spec{PersistentVolume: &api.PersistentVolume{Spec: api.PersistentVolumeSpec{PersistentVolumeSource: api.PersistentVolumeSource{}}}}) {
|
if plug.CanSupport(&volume.Spec{PersistentVolume: &api.PersistentVolume{Spec: api.PersistentVolumeSpec{PersistentVolumeSource: api.PersistentVolumeSource{}}}}) {
|
||||||
t.Errorf("Expected false")
|
t.Errorf("Expected false")
|
||||||
@@ -63,7 +63,7 @@ func TestGetAccessModes(t *testing.T) {
|
|||||||
defer os.RemoveAll(tmpDir)
|
defer os.RemoveAll(tmpDir)
|
||||||
|
|
||||||
plugMgr := volume.VolumePluginMgr{}
|
plugMgr := volume.VolumePluginMgr{}
|
||||||
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
|
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, nil, nil, "" /* rootContext */))
|
||||||
|
|
||||||
plug, err := plugMgr.FindPersistentPluginByName("kubernetes.io/glusterfs")
|
plug, err := plugMgr.FindPersistentPluginByName("kubernetes.io/glusterfs")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -91,7 +91,7 @@ func doTestPlugin(t *testing.T, spec *volume.Spec) {
|
|||||||
defer os.RemoveAll(tmpDir)
|
defer os.RemoveAll(tmpDir)
|
||||||
|
|
||||||
plugMgr := volume.VolumePluginMgr{}
|
plugMgr := volume.VolumePluginMgr{}
|
||||||
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
|
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, nil, nil, "" /* rootContext */))
|
||||||
plug, err := plugMgr.FindPluginByName("kubernetes.io/glusterfs")
|
plug, err := plugMgr.FindPluginByName("kubernetes.io/glusterfs")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Can't find the plugin by name")
|
t.Errorf("Can't find the plugin by name")
|
||||||
@@ -223,7 +223,7 @@ func TestPersistentClaimReadOnlyFlag(t *testing.T) {
|
|||||||
client := fake.NewSimpleClientset(pv, claim, ep)
|
client := fake.NewSimpleClientset(pv, claim, ep)
|
||||||
|
|
||||||
plugMgr := volume.VolumePluginMgr{}
|
plugMgr := volume.VolumePluginMgr{}
|
||||||
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, client, nil))
|
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, client, nil, "" /* rootContext */))
|
||||||
plug, _ := plugMgr.FindPluginByName(glusterfsPluginName)
|
plug, _ := plugMgr.FindPluginByName(glusterfsPluginName)
|
||||||
|
|
||||||
// readOnly bool is supplied by persistent-claim volume source when its mounter creates other volumes
|
// readOnly bool is supplied by persistent-claim volume source when its mounter creates other volumes
|
||||||
|
|||||||
@@ -83,9 +83,9 @@ func (plugin *hostPathPlugin) GetPluginName() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (plugin *hostPathPlugin) GetVolumeName(spec *volume.Spec) (string, error) {
|
func (plugin *hostPathPlugin) GetVolumeName(spec *volume.Spec) (string, error) {
|
||||||
volumeSource, _ := getVolumeSource(spec)
|
volumeSource, _, err := getVolumeSource(spec)
|
||||||
if volumeSource == nil {
|
if err != nil {
|
||||||
return "", fmt.Errorf("Spec does not reference an HostPath volume type")
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
return volumeSource.Path, nil
|
return volumeSource.Path, nil
|
||||||
@@ -96,6 +96,10 @@ func (plugin *hostPathPlugin) CanSupport(spec *volume.Spec) bool {
|
|||||||
(spec.Volume != nil && spec.Volume.HostPath != nil)
|
(spec.Volume != nil && spec.Volume.HostPath != nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (plugin *hostPathPlugin) RequiresRemount() bool {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
func (plugin *hostPathPlugin) GetAccessModes() []api.PersistentVolumeAccessMode {
|
func (plugin *hostPathPlugin) GetAccessModes() []api.PersistentVolumeAccessMode {
|
||||||
return []api.PersistentVolumeAccessMode{
|
return []api.PersistentVolumeAccessMode{
|
||||||
api.ReadWriteOnce,
|
api.ReadWriteOnce,
|
||||||
@@ -103,19 +107,14 @@ func (plugin *hostPathPlugin) GetAccessModes() []api.PersistentVolumeAccessMode
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (plugin *hostPathPlugin) NewMounter(spec *volume.Spec, pod *api.Pod, _ volume.VolumeOptions) (volume.Mounter, error) {
|
func (plugin *hostPathPlugin) NewMounter(spec *volume.Spec, pod *api.Pod, _ volume.VolumeOptions) (volume.Mounter, error) {
|
||||||
if spec.Volume != nil && spec.Volume.HostPath != nil {
|
hostPathVolumeSource, readOnly, err := getVolumeSource(spec)
|
||||||
path := spec.Volume.HostPath.Path
|
if err != nil {
|
||||||
return &hostPathMounter{
|
return nil, err
|
||||||
hostPath: &hostPath{path: path},
|
|
||||||
readOnly: false,
|
|
||||||
}, nil
|
|
||||||
} else {
|
|
||||||
path := spec.PersistentVolume.Spec.HostPath.Path
|
|
||||||
return &hostPathMounter{
|
|
||||||
hostPath: &hostPath{path: path},
|
|
||||||
readOnly: spec.ReadOnly,
|
|
||||||
}, nil
|
|
||||||
}
|
}
|
||||||
|
return &hostPathMounter{
|
||||||
|
hostPath: &hostPath{path: hostPathVolumeSource.Path},
|
||||||
|
readOnly: readOnly,
|
||||||
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (plugin *hostPathPlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) {
|
func (plugin *hostPathPlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) {
|
||||||
@@ -313,17 +312,14 @@ func (r *hostPathDeleter) Delete() error {
|
|||||||
return os.RemoveAll(r.GetPath())
|
return os.RemoveAll(r.GetPath())
|
||||||
}
|
}
|
||||||
|
|
||||||
func getVolumeSource(spec *volume.Spec) (*api.HostPathVolumeSource, bool) {
|
func getVolumeSource(
|
||||||
var readOnly bool
|
spec *volume.Spec) (*api.HostPathVolumeSource, bool, error) {
|
||||||
var volumeSource *api.HostPathVolumeSource
|
|
||||||
|
|
||||||
if spec.Volume != nil && spec.Volume.HostPath != nil {
|
if spec.Volume != nil && spec.Volume.HostPath != nil {
|
||||||
volumeSource = spec.Volume.HostPath
|
return spec.Volume.HostPath, spec.ReadOnly, nil
|
||||||
readOnly = spec.ReadOnly
|
} else if spec.PersistentVolume != nil &&
|
||||||
} else {
|
spec.PersistentVolume.Spec.HostPath != nil {
|
||||||
volumeSource = spec.PersistentVolume.Spec.HostPath
|
return spec.PersistentVolume.Spec.HostPath, spec.ReadOnly, nil
|
||||||
readOnly = spec.ReadOnly
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return volumeSource, readOnly
|
return nil, false, fmt.Errorf("Spec does not reference an HostPath volume type")
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -34,14 +34,14 @@ import (
|
|||||||
|
|
||||||
func TestCanSupport(t *testing.T) {
|
func TestCanSupport(t *testing.T) {
|
||||||
plugMgr := volume.VolumePluginMgr{}
|
plugMgr := volume.VolumePluginMgr{}
|
||||||
plugMgr.InitPlugins(ProbeVolumePlugins(volume.VolumeConfig{}), volumetest.NewFakeVolumeHost("fake", nil, nil))
|
plugMgr.InitPlugins(ProbeVolumePlugins(volume.VolumeConfig{}), volumetest.NewFakeVolumeHost("fake", nil, nil, "" /* rootContext */))
|
||||||
|
|
||||||
plug, err := plugMgr.FindPluginByName("kubernetes.io/host-path")
|
plug, err := plugMgr.FindPluginByName("kubernetes.io/host-path")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Can't find the plugin by name")
|
t.Errorf("Can't find the plugin by name")
|
||||||
}
|
}
|
||||||
if plug.Name() != "kubernetes.io/host-path" {
|
if plug.GetPluginName() != "kubernetes.io/host-path" {
|
||||||
t.Errorf("Wrong name: %s", plug.Name())
|
t.Errorf("Wrong name: %s", plug.GetPluginName())
|
||||||
}
|
}
|
||||||
if !plug.CanSupport(&volume.Spec{Volume: &api.Volume{VolumeSource: api.VolumeSource{HostPath: &api.HostPathVolumeSource{}}}}) {
|
if !plug.CanSupport(&volume.Spec{Volume: &api.Volume{VolumeSource: api.VolumeSource{HostPath: &api.HostPathVolumeSource{}}}}) {
|
||||||
t.Errorf("Expected true")
|
t.Errorf("Expected true")
|
||||||
@@ -56,7 +56,7 @@ func TestCanSupport(t *testing.T) {
|
|||||||
|
|
||||||
func TestGetAccessModes(t *testing.T) {
|
func TestGetAccessModes(t *testing.T) {
|
||||||
plugMgr := volume.VolumePluginMgr{}
|
plugMgr := volume.VolumePluginMgr{}
|
||||||
plugMgr.InitPlugins(ProbeVolumePlugins(volume.VolumeConfig{}), volumetest.NewFakeVolumeHost("/tmp/fake", nil, nil))
|
plugMgr.InitPlugins(ProbeVolumePlugins(volume.VolumeConfig{}), volumetest.NewFakeVolumeHost("/tmp/fake", nil, nil, "" /* rootContext */))
|
||||||
|
|
||||||
plug, err := plugMgr.FindPersistentPluginByName("kubernetes.io/host-path")
|
plug, err := plugMgr.FindPersistentPluginByName("kubernetes.io/host-path")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -69,7 +69,7 @@ func TestGetAccessModes(t *testing.T) {
|
|||||||
|
|
||||||
func TestRecycler(t *testing.T) {
|
func TestRecycler(t *testing.T) {
|
||||||
plugMgr := volume.VolumePluginMgr{}
|
plugMgr := volume.VolumePluginMgr{}
|
||||||
pluginHost := volumetest.NewFakeVolumeHost("/tmp/fake", nil, nil)
|
pluginHost := volumetest.NewFakeVolumeHost("/tmp/fake", nil, nil, "" /* rootContext */)
|
||||||
plugMgr.InitPlugins([]volume.VolumePlugin{&hostPathPlugin{nil, volumetest.NewFakeRecycler, nil, nil, volume.VolumeConfig{}}}, pluginHost)
|
plugMgr.InitPlugins([]volume.VolumePlugin{&hostPathPlugin{nil, volumetest.NewFakeRecycler, nil, nil, volume.VolumeConfig{}}}, pluginHost)
|
||||||
|
|
||||||
spec := &volume.Spec{PersistentVolume: &api.PersistentVolume{Spec: api.PersistentVolumeSpec{PersistentVolumeSource: api.PersistentVolumeSource{HostPath: &api.HostPathVolumeSource{Path: "/foo"}}}}}
|
spec := &volume.Spec{PersistentVolume: &api.PersistentVolume{Spec: api.PersistentVolumeSpec{PersistentVolumeSource: api.PersistentVolumeSource{HostPath: &api.HostPathVolumeSource{Path: "/foo"}}}}}
|
||||||
@@ -99,7 +99,7 @@ func TestDeleter(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
plugMgr := volume.VolumePluginMgr{}
|
plugMgr := volume.VolumePluginMgr{}
|
||||||
plugMgr.InitPlugins(ProbeVolumePlugins(volume.VolumeConfig{}), volumetest.NewFakeVolumeHost("/tmp/fake", nil, nil))
|
plugMgr.InitPlugins(ProbeVolumePlugins(volume.VolumeConfig{}), volumetest.NewFakeVolumeHost("/tmp/fake", nil, nil, "" /* rootContext */))
|
||||||
|
|
||||||
spec := &volume.Spec{PersistentVolume: &api.PersistentVolume{Spec: api.PersistentVolumeSpec{PersistentVolumeSource: api.PersistentVolumeSource{HostPath: &api.HostPathVolumeSource{Path: tempPath}}}}}
|
spec := &volume.Spec{PersistentVolume: &api.PersistentVolume{Spec: api.PersistentVolumeSpec{PersistentVolumeSource: api.PersistentVolumeSource{HostPath: &api.HostPathVolumeSource{Path: tempPath}}}}}
|
||||||
plug, err := plugMgr.FindDeletablePluginBySpec(spec)
|
plug, err := plugMgr.FindDeletablePluginBySpec(spec)
|
||||||
@@ -133,7 +133,7 @@ func TestDeleterTempDir(t *testing.T) {
|
|||||||
|
|
||||||
for name, test := range tests {
|
for name, test := range tests {
|
||||||
plugMgr := volume.VolumePluginMgr{}
|
plugMgr := volume.VolumePluginMgr{}
|
||||||
plugMgr.InitPlugins(ProbeVolumePlugins(volume.VolumeConfig{}), volumetest.NewFakeVolumeHost("/tmp/fake", nil, nil))
|
plugMgr.InitPlugins(ProbeVolumePlugins(volume.VolumeConfig{}), volumetest.NewFakeVolumeHost("/tmp/fake", nil, nil, "" /* rootContext */))
|
||||||
spec := &volume.Spec{PersistentVolume: &api.PersistentVolume{Spec: api.PersistentVolumeSpec{PersistentVolumeSource: api.PersistentVolumeSource{HostPath: &api.HostPathVolumeSource{Path: test.path}}}}}
|
spec := &volume.Spec{PersistentVolume: &api.PersistentVolume{Spec: api.PersistentVolumeSpec{PersistentVolumeSource: api.PersistentVolumeSource{HostPath: &api.HostPathVolumeSource{Path: test.path}}}}}
|
||||||
plug, _ := plugMgr.FindDeletablePluginBySpec(spec)
|
plug, _ := plugMgr.FindDeletablePluginBySpec(spec)
|
||||||
deleter, _ := plug.NewDeleter(spec)
|
deleter, _ := plug.NewDeleter(spec)
|
||||||
@@ -153,7 +153,7 @@ func TestProvisioner(t *testing.T) {
|
|||||||
err := os.MkdirAll(tempPath, 0750)
|
err := os.MkdirAll(tempPath, 0750)
|
||||||
|
|
||||||
plugMgr := volume.VolumePluginMgr{}
|
plugMgr := volume.VolumePluginMgr{}
|
||||||
plugMgr.InitPlugins(ProbeVolumePlugins(volume.VolumeConfig{}), volumetest.NewFakeVolumeHost("/tmp/fake", nil, nil))
|
plugMgr.InitPlugins(ProbeVolumePlugins(volume.VolumeConfig{}), volumetest.NewFakeVolumeHost("/tmp/fake", nil, nil, "" /* rootContext */))
|
||||||
spec := &volume.Spec{PersistentVolume: &api.PersistentVolume{Spec: api.PersistentVolumeSpec{PersistentVolumeSource: api.PersistentVolumeSource{HostPath: &api.HostPathVolumeSource{Path: tempPath}}}}}
|
spec := &volume.Spec{PersistentVolume: &api.PersistentVolume{Spec: api.PersistentVolumeSpec{PersistentVolumeSource: api.PersistentVolumeSource{HostPath: &api.HostPathVolumeSource{Path: tempPath}}}}}
|
||||||
plug, err := plugMgr.FindCreatablePluginBySpec(spec)
|
plug, err := plugMgr.FindCreatablePluginBySpec(spec)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -187,7 +187,7 @@ func TestProvisioner(t *testing.T) {
|
|||||||
|
|
||||||
func TestPlugin(t *testing.T) {
|
func TestPlugin(t *testing.T) {
|
||||||
plugMgr := volume.VolumePluginMgr{}
|
plugMgr := volume.VolumePluginMgr{}
|
||||||
plugMgr.InitPlugins(ProbeVolumePlugins(volume.VolumeConfig{}), volumetest.NewFakeVolumeHost("fake", nil, nil))
|
plugMgr.InitPlugins(ProbeVolumePlugins(volume.VolumeConfig{}), volumetest.NewFakeVolumeHost("fake", nil, nil, "" /* rootContext */))
|
||||||
|
|
||||||
plug, err := plugMgr.FindPluginByName("kubernetes.io/host-path")
|
plug, err := plugMgr.FindPluginByName("kubernetes.io/host-path")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -259,7 +259,7 @@ func TestPersistentClaimReadOnlyFlag(t *testing.T) {
|
|||||||
client := fake.NewSimpleClientset(pv, claim)
|
client := fake.NewSimpleClientset(pv, claim)
|
||||||
|
|
||||||
plugMgr := volume.VolumePluginMgr{}
|
plugMgr := volume.VolumePluginMgr{}
|
||||||
plugMgr.InitPlugins(ProbeVolumePlugins(volume.VolumeConfig{}), volumetest.NewFakeVolumeHost("/tmp/fake", client, nil))
|
plugMgr.InitPlugins(ProbeVolumePlugins(volume.VolumeConfig{}), volumetest.NewFakeVolumeHost("/tmp/fake", client, nil, "" /* rootContext */))
|
||||||
plug, _ := plugMgr.FindPluginByName(hostPathPluginName)
|
plug, _ := plugMgr.FindPluginByName(hostPathPluginName)
|
||||||
|
|
||||||
// readOnly bool is supplied by persistent-claim volume source when its mounter creates other volumes
|
// readOnly bool is supplied by persistent-claim volume source when its mounter creates other volumes
|
||||||
|
|||||||
@@ -58,9 +58,9 @@ func (plugin *iscsiPlugin) GetPluginName() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (plugin *iscsiPlugin) GetVolumeName(spec *volume.Spec) (string, error) {
|
func (plugin *iscsiPlugin) GetVolumeName(spec *volume.Spec) (string, error) {
|
||||||
volumeSource, _ := getVolumeSource(spec)
|
volumeSource, _, err := getVolumeSource(spec)
|
||||||
if volumeSource == nil {
|
if err != nil {
|
||||||
return "", fmt.Errorf("Spec does not reference a ISCSI volume type")
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
return fmt.Sprintf(
|
return fmt.Sprintf(
|
||||||
@@ -78,6 +78,10 @@ func (plugin *iscsiPlugin) CanSupport(spec *volume.Spec) bool {
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (plugin *iscsiPlugin) RequiresRemount() bool {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
func (plugin *iscsiPlugin) GetAccessModes() []api.PersistentVolumeAccessMode {
|
func (plugin *iscsiPlugin) GetAccessModes() []api.PersistentVolumeAccessMode {
|
||||||
return []api.PersistentVolumeAccessMode{
|
return []api.PersistentVolumeAccessMode{
|
||||||
api.ReadWriteOnce,
|
api.ReadWriteOnce,
|
||||||
@@ -93,14 +97,9 @@ func (plugin *iscsiPlugin) NewMounter(spec *volume.Spec, pod *api.Pod, _ volume.
|
|||||||
func (plugin *iscsiPlugin) newMounterInternal(spec *volume.Spec, podUID types.UID, manager diskManager, mounter mount.Interface) (volume.Mounter, error) {
|
func (plugin *iscsiPlugin) newMounterInternal(spec *volume.Spec, podUID types.UID, manager diskManager, mounter mount.Interface) (volume.Mounter, error) {
|
||||||
// iscsi volumes used directly in a pod have a ReadOnly flag set by the pod author.
|
// iscsi volumes used directly in a pod have a ReadOnly flag set by the pod author.
|
||||||
// iscsi volumes used as a PersistentVolume gets the ReadOnly flag indirectly through the persistent-claim volume used to mount the PV
|
// iscsi volumes used as a PersistentVolume gets the ReadOnly flag indirectly through the persistent-claim volume used to mount the PV
|
||||||
var readOnly bool
|
iscsi, readOnly, err := getVolumeSource(spec)
|
||||||
var iscsi *api.ISCSIVolumeSource
|
if err != nil {
|
||||||
if spec.Volume != nil && spec.Volume.ISCSI != nil {
|
return nil, err
|
||||||
iscsi = spec.Volume.ISCSI
|
|
||||||
readOnly = iscsi.ReadOnly
|
|
||||||
} else {
|
|
||||||
iscsi = spec.PersistentVolume.Spec.ISCSI
|
|
||||||
readOnly = spec.ReadOnly
|
|
||||||
}
|
}
|
||||||
|
|
||||||
lun := strconv.Itoa(int(iscsi.Lun))
|
lun := strconv.Itoa(int(iscsi.Lun))
|
||||||
@@ -221,17 +220,13 @@ func portalMounter(portal string) string {
|
|||||||
return portal
|
return portal
|
||||||
}
|
}
|
||||||
|
|
||||||
func getVolumeSource(spec *volume.Spec) (*api.ISCSIVolumeSource, bool) {
|
func getVolumeSource(spec *volume.Spec) (*api.ISCSIVolumeSource, bool, error) {
|
||||||
var readOnly bool
|
|
||||||
var volumeSource *api.ISCSIVolumeSource
|
|
||||||
|
|
||||||
if spec.Volume != nil && spec.Volume.ISCSI != nil {
|
if spec.Volume != nil && spec.Volume.ISCSI != nil {
|
||||||
volumeSource = spec.Volume.ISCSI
|
return spec.Volume.ISCSI, spec.Volume.ISCSI.ReadOnly, nil
|
||||||
readOnly = volumeSource.ReadOnly
|
} else if spec.PersistentVolume != nil &&
|
||||||
} else {
|
spec.PersistentVolume.Spec.ISCSI != nil {
|
||||||
volumeSource = spec.PersistentVolume.Spec.ISCSI
|
return spec.PersistentVolume.Spec.ISCSI, spec.ReadOnly, nil
|
||||||
readOnly = spec.ReadOnly
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return volumeSource, readOnly
|
return nil, false, fmt.Errorf("Spec does not reference a ISCSI volume type")
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -38,14 +38,14 @@ func TestCanSupport(t *testing.T) {
|
|||||||
defer os.RemoveAll(tmpDir)
|
defer os.RemoveAll(tmpDir)
|
||||||
|
|
||||||
plugMgr := volume.VolumePluginMgr{}
|
plugMgr := volume.VolumePluginMgr{}
|
||||||
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
|
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, nil, nil, "" /* rootContext */))
|
||||||
|
|
||||||
plug, err := plugMgr.FindPluginByName("kubernetes.io/iscsi")
|
plug, err := plugMgr.FindPluginByName("kubernetes.io/iscsi")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Can't find the plugin by name")
|
t.Errorf("Can't find the plugin by name")
|
||||||
}
|
}
|
||||||
if plug.Name() != "kubernetes.io/iscsi" {
|
if plug.GetPluginName() != "kubernetes.io/iscsi" {
|
||||||
t.Errorf("Wrong name: %s", plug.Name())
|
t.Errorf("Wrong name: %s", plug.GetPluginName())
|
||||||
}
|
}
|
||||||
if plug.CanSupport(&volume.Spec{Volume: &api.Volume{VolumeSource: api.VolumeSource{}}}) {
|
if plug.CanSupport(&volume.Spec{Volume: &api.Volume{VolumeSource: api.VolumeSource{}}}) {
|
||||||
t.Errorf("Expected false")
|
t.Errorf("Expected false")
|
||||||
@@ -60,7 +60,7 @@ func TestGetAccessModes(t *testing.T) {
|
|||||||
defer os.RemoveAll(tmpDir)
|
defer os.RemoveAll(tmpDir)
|
||||||
|
|
||||||
plugMgr := volume.VolumePluginMgr{}
|
plugMgr := volume.VolumePluginMgr{}
|
||||||
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
|
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, nil, nil, "" /* rootContext */))
|
||||||
|
|
||||||
plug, err := plugMgr.FindPersistentPluginByName("kubernetes.io/iscsi")
|
plug, err := plugMgr.FindPersistentPluginByName("kubernetes.io/iscsi")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -131,7 +131,7 @@ func doTestPlugin(t *testing.T, spec *volume.Spec) {
|
|||||||
defer os.RemoveAll(tmpDir)
|
defer os.RemoveAll(tmpDir)
|
||||||
|
|
||||||
plugMgr := volume.VolumePluginMgr{}
|
plugMgr := volume.VolumePluginMgr{}
|
||||||
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
|
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, nil, nil, "" /* rootContext */))
|
||||||
|
|
||||||
plug, err := plugMgr.FindPluginByName("kubernetes.io/iscsi")
|
plug, err := plugMgr.FindPluginByName("kubernetes.io/iscsi")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -274,7 +274,7 @@ func TestPersistentClaimReadOnlyFlag(t *testing.T) {
|
|||||||
client := fake.NewSimpleClientset(pv, claim)
|
client := fake.NewSimpleClientset(pv, claim)
|
||||||
|
|
||||||
plugMgr := volume.VolumePluginMgr{}
|
plugMgr := volume.VolumePluginMgr{}
|
||||||
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, client, nil))
|
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, client, nil, "" /* rootContext */))
|
||||||
plug, _ := plugMgr.FindPluginByName(iscsiPluginName)
|
plug, _ := plugMgr.FindPluginByName(iscsiPluginName)
|
||||||
|
|
||||||
// readOnly bool is supplied by persistent-claim volume source when its mounter creates other volumes
|
// readOnly bool is supplied by persistent-claim volume source when its mounter creates other volumes
|
||||||
|
|||||||
@@ -68,9 +68,9 @@ func (plugin *nfsPlugin) GetPluginName() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (plugin *nfsPlugin) GetVolumeName(spec *volume.Spec) (string, error) {
|
func (plugin *nfsPlugin) GetVolumeName(spec *volume.Spec) (string, error) {
|
||||||
volumeSource, _ := getVolumeSource(spec)
|
volumeSource, _, err := getVolumeSource(spec)
|
||||||
if volumeSource == nil {
|
if err != nil {
|
||||||
return "", fmt.Errorf("Spec does not reference a NFS volume type")
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
return fmt.Sprintf(
|
return fmt.Sprintf(
|
||||||
@@ -84,6 +84,10 @@ func (plugin *nfsPlugin) CanSupport(spec *volume.Spec) bool {
|
|||||||
(spec.Volume != nil && spec.Volume.NFS != nil)
|
(spec.Volume != nil && spec.Volume.NFS != nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (plugin *nfsPlugin) RequiresRemount() bool {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
func (plugin *nfsPlugin) GetAccessModes() []api.PersistentVolumeAccessMode {
|
func (plugin *nfsPlugin) GetAccessModes() []api.PersistentVolumeAccessMode {
|
||||||
return []api.PersistentVolumeAccessMode{
|
return []api.PersistentVolumeAccessMode{
|
||||||
api.ReadWriteOnce,
|
api.ReadWriteOnce,
|
||||||
@@ -97,15 +101,11 @@ func (plugin *nfsPlugin) NewMounter(spec *volume.Spec, pod *api.Pod, _ volume.Vo
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (plugin *nfsPlugin) newMounterInternal(spec *volume.Spec, pod *api.Pod, mounter mount.Interface) (volume.Mounter, error) {
|
func (plugin *nfsPlugin) newMounterInternal(spec *volume.Spec, pod *api.Pod, mounter mount.Interface) (volume.Mounter, error) {
|
||||||
var source *api.NFSVolumeSource
|
source, readOnly, err := getVolumeSource(spec)
|
||||||
var readOnly bool
|
if err != nil {
|
||||||
if spec.Volume != nil && spec.Volume.NFS != nil {
|
return nil, err
|
||||||
source = spec.Volume.NFS
|
|
||||||
readOnly = spec.Volume.NFS.ReadOnly
|
|
||||||
} else {
|
|
||||||
source = spec.PersistentVolume.Spec.NFS
|
|
||||||
readOnly = spec.ReadOnly
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return &nfsMounter{
|
return &nfsMounter{
|
||||||
nfs: &nfs{
|
nfs: &nfs{
|
||||||
volName: spec.Name(),
|
volName: spec.Name(),
|
||||||
@@ -309,17 +309,13 @@ func (r *nfsRecycler) Recycle() error {
|
|||||||
return volume.RecycleVolumeByWatchingPodUntilCompletion(r.pvName, pod, r.host.GetKubeClient())
|
return volume.RecycleVolumeByWatchingPodUntilCompletion(r.pvName, pod, r.host.GetKubeClient())
|
||||||
}
|
}
|
||||||
|
|
||||||
func getVolumeSource(spec *volume.Spec) (*api.NFSVolumeSource, bool) {
|
func getVolumeSource(spec *volume.Spec) (*api.NFSVolumeSource, bool, error) {
|
||||||
var readOnly bool
|
|
||||||
var volumeSource *api.NFSVolumeSource
|
|
||||||
|
|
||||||
if spec.Volume != nil && spec.Volume.NFS != nil {
|
if spec.Volume != nil && spec.Volume.NFS != nil {
|
||||||
volumeSource = spec.Volume.NFS
|
return spec.Volume.NFS, spec.Volume.NFS.ReadOnly, nil
|
||||||
readOnly = volumeSource.ReadOnly
|
} else if spec.PersistentVolume != nil &&
|
||||||
} else {
|
spec.PersistentVolume.Spec.NFS != nil {
|
||||||
volumeSource = spec.PersistentVolume.Spec.NFS
|
return spec.PersistentVolume.Spec.NFS, spec.ReadOnly, nil
|
||||||
readOnly = spec.ReadOnly
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return volumeSource, readOnly
|
return nil, false, fmt.Errorf("Spec does not reference a NFS volume type")
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -38,13 +38,13 @@ func TestCanSupport(t *testing.T) {
|
|||||||
defer os.RemoveAll(tmpDir)
|
defer os.RemoveAll(tmpDir)
|
||||||
|
|
||||||
plugMgr := volume.VolumePluginMgr{}
|
plugMgr := volume.VolumePluginMgr{}
|
||||||
plugMgr.InitPlugins(ProbeVolumePlugins(volume.VolumeConfig{}), volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
|
plugMgr.InitPlugins(ProbeVolumePlugins(volume.VolumeConfig{}), volumetest.NewFakeVolumeHost(tmpDir, nil, nil, "" /* rootContext */))
|
||||||
plug, err := plugMgr.FindPluginByName("kubernetes.io/nfs")
|
plug, err := plugMgr.FindPluginByName("kubernetes.io/nfs")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Can't find the plugin by name")
|
t.Errorf("Can't find the plugin by name")
|
||||||
}
|
}
|
||||||
if plug.Name() != "kubernetes.io/nfs" {
|
if plug.GetPluginName() != "kubernetes.io/nfs" {
|
||||||
t.Errorf("Wrong name: %s", plug.Name())
|
t.Errorf("Wrong name: %s", plug.GetPluginName())
|
||||||
}
|
}
|
||||||
if !plug.CanSupport(&volume.Spec{Volume: &api.Volume{VolumeSource: api.VolumeSource{NFS: &api.NFSVolumeSource{}}}}) {
|
if !plug.CanSupport(&volume.Spec{Volume: &api.Volume{VolumeSource: api.VolumeSource{NFS: &api.NFSVolumeSource{}}}}) {
|
||||||
t.Errorf("Expected true")
|
t.Errorf("Expected true")
|
||||||
@@ -65,7 +65,7 @@ func TestGetAccessModes(t *testing.T) {
|
|||||||
defer os.RemoveAll(tmpDir)
|
defer os.RemoveAll(tmpDir)
|
||||||
|
|
||||||
plugMgr := volume.VolumePluginMgr{}
|
plugMgr := volume.VolumePluginMgr{}
|
||||||
plugMgr.InitPlugins(ProbeVolumePlugins(volume.VolumeConfig{}), volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
|
plugMgr.InitPlugins(ProbeVolumePlugins(volume.VolumeConfig{}), volumetest.NewFakeVolumeHost(tmpDir, nil, nil, "" /* rootContext */))
|
||||||
|
|
||||||
plug, err := plugMgr.FindPersistentPluginByName("kubernetes.io/nfs")
|
plug, err := plugMgr.FindPersistentPluginByName("kubernetes.io/nfs")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -84,7 +84,7 @@ func TestRecycler(t *testing.T) {
|
|||||||
defer os.RemoveAll(tmpDir)
|
defer os.RemoveAll(tmpDir)
|
||||||
|
|
||||||
plugMgr := volume.VolumePluginMgr{}
|
plugMgr := volume.VolumePluginMgr{}
|
||||||
plugMgr.InitPlugins([]volume.VolumePlugin{&nfsPlugin{nil, newMockRecycler, volume.VolumeConfig{}}}, volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
|
plugMgr.InitPlugins([]volume.VolumePlugin{&nfsPlugin{nil, newMockRecycler, volume.VolumeConfig{}}}, volumetest.NewFakeVolumeHost(tmpDir, nil, nil, "" /* rootContext */))
|
||||||
|
|
||||||
spec := &volume.Spec{PersistentVolume: &api.PersistentVolume{Spec: api.PersistentVolumeSpec{PersistentVolumeSource: api.PersistentVolumeSource{NFS: &api.NFSVolumeSource{Path: "/foo"}}}}}
|
spec := &volume.Spec{PersistentVolume: &api.PersistentVolume{Spec: api.PersistentVolumeSpec{PersistentVolumeSource: api.PersistentVolumeSource{NFS: &api.NFSVolumeSource{Path: "/foo"}}}}}
|
||||||
plug, err := plugMgr.FindRecyclablePluginBySpec(spec)
|
plug, err := plugMgr.FindRecyclablePluginBySpec(spec)
|
||||||
@@ -141,7 +141,7 @@ func doTestPlugin(t *testing.T, spec *volume.Spec) {
|
|||||||
defer os.RemoveAll(tmpDir)
|
defer os.RemoveAll(tmpDir)
|
||||||
|
|
||||||
plugMgr := volume.VolumePluginMgr{}
|
plugMgr := volume.VolumePluginMgr{}
|
||||||
plugMgr.InitPlugins(ProbeVolumePlugins(volume.VolumeConfig{}), volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
|
plugMgr.InitPlugins(ProbeVolumePlugins(volume.VolumeConfig{}), volumetest.NewFakeVolumeHost(tmpDir, nil, nil, "" /* rootContext */))
|
||||||
plug, err := plugMgr.FindPluginByName("kubernetes.io/nfs")
|
plug, err := plugMgr.FindPluginByName("kubernetes.io/nfs")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Can't find the plugin by name")
|
t.Errorf("Can't find the plugin by name")
|
||||||
@@ -269,7 +269,7 @@ func TestPersistentClaimReadOnlyFlag(t *testing.T) {
|
|||||||
client := fake.NewSimpleClientset(pv, claim)
|
client := fake.NewSimpleClientset(pv, claim)
|
||||||
|
|
||||||
plugMgr := volume.VolumePluginMgr{}
|
plugMgr := volume.VolumePluginMgr{}
|
||||||
plugMgr.InitPlugins(ProbeVolumePlugins(volume.VolumeConfig{}), volumetest.NewFakeVolumeHost(tmpDir, client, nil))
|
plugMgr.InitPlugins(ProbeVolumePlugins(volume.VolumeConfig{}), volumetest.NewFakeVolumeHost(tmpDir, client, nil, "" /* rootContext */))
|
||||||
plug, _ := plugMgr.FindPluginByName(nfsPluginName)
|
plug, _ := plugMgr.FindPluginByName(nfsPluginName)
|
||||||
|
|
||||||
// readOnly bool is supplied by persistent-claim volume source when its mounter creates other volumes
|
// readOnly bool is supplied by persistent-claim volume source when its mounter creates other volumes
|
||||||
|
|||||||
@@ -18,6 +18,7 @@ package volume
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"net"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
@@ -35,12 +36,6 @@ import (
|
|||||||
|
|
||||||
// VolumeOptions contains option information about a volume.
|
// VolumeOptions contains option information about a volume.
|
||||||
type VolumeOptions struct {
|
type VolumeOptions struct {
|
||||||
// The rootcontext to use when performing mounts for a volume. This is a
|
|
||||||
// temporary measure in order to set the rootContext of tmpfs mounts
|
|
||||||
// correctly. it will be replaced and expanded on by future
|
|
||||||
// SecurityContext work.
|
|
||||||
RootContext string
|
|
||||||
|
|
||||||
// The attributes below are required by volume.Provisioner
|
// The attributes below are required by volume.Provisioner
|
||||||
// TODO: refactor all of this out of volumes when an admin can configure
|
// TODO: refactor all of this out of volumes when an admin can configure
|
||||||
// many kinds of provisioners.
|
// many kinds of provisioners.
|
||||||
@@ -86,6 +81,11 @@ type VolumePlugin interface {
|
|||||||
// const.
|
// const.
|
||||||
CanSupport(spec *Spec) bool
|
CanSupport(spec *Spec) bool
|
||||||
|
|
||||||
|
// RequiresRemount returns true if this plugin requires mount calls to be
|
||||||
|
// reexecuted. Atomically updating volumes, like Downward API, depend on
|
||||||
|
// this to update the contents of the volume.
|
||||||
|
RequiresRemount() bool
|
||||||
|
|
||||||
// NewMounter creates a new volume.Mounter from an API specification.
|
// NewMounter creates a new volume.Mounter from an API specification.
|
||||||
// Ownership of the spec pointer in *not* transferred.
|
// Ownership of the spec pointer in *not* transferred.
|
||||||
// - spec: The api.Volume spec
|
// - spec: The api.Volume spec
|
||||||
@@ -196,6 +196,15 @@ type VolumeHost interface {
|
|||||||
|
|
||||||
// Returns the hostname of the host kubelet is running on
|
// Returns the hostname of the host kubelet is running on
|
||||||
GetHostName() string
|
GetHostName() string
|
||||||
|
|
||||||
|
// Returns host IP or nil in the case of error.
|
||||||
|
GetHostIP() (net.IP, error)
|
||||||
|
|
||||||
|
// Returns the rootcontext to use when performing mounts for a volume.
|
||||||
|
// This is a temporary measure in order to set the rootContext of tmpfs
|
||||||
|
// mounts correctly. It will be replaced and expanded on by future
|
||||||
|
// SecurityContext work.
|
||||||
|
GetRootContext() string
|
||||||
}
|
}
|
||||||
|
|
||||||
// VolumePluginMgr tracks registered plugins.
|
// VolumePluginMgr tracks registered plugins.
|
||||||
|
|||||||
@@ -55,9 +55,9 @@ func (plugin *rbdPlugin) GetPluginName() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (plugin *rbdPlugin) GetVolumeName(spec *volume.Spec) (string, error) {
|
func (plugin *rbdPlugin) GetVolumeName(spec *volume.Spec) (string, error) {
|
||||||
volumeSource, _ := getVolumeSource(spec)
|
volumeSource, _, err := getVolumeSource(spec)
|
||||||
if volumeSource == nil {
|
if err != nil {
|
||||||
return "", fmt.Errorf("Spec does not reference a RBD volume type")
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
return fmt.Sprintf(
|
return fmt.Sprintf(
|
||||||
@@ -74,6 +74,10 @@ func (plugin *rbdPlugin) CanSupport(spec *volume.Spec) bool {
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (plugin *rbdPlugin) RequiresRemount() bool {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
func (plugin *rbdPlugin) GetAccessModes() []api.PersistentVolumeAccessMode {
|
func (plugin *rbdPlugin) GetAccessModes() []api.PersistentVolumeAccessMode {
|
||||||
return []api.PersistentVolumeAccessMode{
|
return []api.PersistentVolumeAccessMode{
|
||||||
api.ReadWriteOnce,
|
api.ReadWriteOnce,
|
||||||
@@ -234,17 +238,14 @@ func (plugin *rbdPlugin) execCommand(command string, args []string) ([]byte, err
|
|||||||
return cmd.CombinedOutput()
|
return cmd.CombinedOutput()
|
||||||
}
|
}
|
||||||
|
|
||||||
func getVolumeSource(spec *volume.Spec) (*api.RBDVolumeSource, bool) {
|
func getVolumeSource(
|
||||||
var readOnly bool
|
spec *volume.Spec) (*api.RBDVolumeSource, bool, error) {
|
||||||
var volumeSource *api.RBDVolumeSource
|
|
||||||
|
|
||||||
if spec.Volume != nil && spec.Volume.RBD != nil {
|
if spec.Volume != nil && spec.Volume.RBD != nil {
|
||||||
volumeSource = spec.Volume.RBD
|
return spec.Volume.RBD, spec.Volume.RBD.ReadOnly, nil
|
||||||
readOnly = volumeSource.ReadOnly
|
} else if spec.PersistentVolume != nil &&
|
||||||
} else {
|
spec.PersistentVolume.Spec.RBD != nil {
|
||||||
volumeSource = spec.PersistentVolume.Spec.RBD
|
return spec.PersistentVolume.Spec.RBD, spec.ReadOnly, nil
|
||||||
readOnly = spec.ReadOnly
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return volumeSource, readOnly
|
return nil, false, fmt.Errorf("Spec does not reference a RBD volume type")
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -38,14 +38,14 @@ func TestCanSupport(t *testing.T) {
|
|||||||
defer os.RemoveAll(tmpDir)
|
defer os.RemoveAll(tmpDir)
|
||||||
|
|
||||||
plugMgr := volume.VolumePluginMgr{}
|
plugMgr := volume.VolumePluginMgr{}
|
||||||
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
|
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, nil, nil, "" /* rootContext */))
|
||||||
|
|
||||||
plug, err := plugMgr.FindPluginByName("kubernetes.io/rbd")
|
plug, err := plugMgr.FindPluginByName("kubernetes.io/rbd")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Can't find the plugin by name")
|
t.Errorf("Can't find the plugin by name")
|
||||||
}
|
}
|
||||||
if plug.Name() != "kubernetes.io/rbd" {
|
if plug.GetPluginName() != "kubernetes.io/rbd" {
|
||||||
t.Errorf("Wrong name: %s", plug.Name())
|
t.Errorf("Wrong name: %s", plug.GetPluginName())
|
||||||
}
|
}
|
||||||
if plug.CanSupport(&volume.Spec{Volume: &api.Volume{VolumeSource: api.VolumeSource{}}}) {
|
if plug.CanSupport(&volume.Spec{Volume: &api.Volume{VolumeSource: api.VolumeSource{}}}) {
|
||||||
t.Errorf("Expected false")
|
t.Errorf("Expected false")
|
||||||
@@ -95,7 +95,7 @@ func doTestPlugin(t *testing.T, spec *volume.Spec) {
|
|||||||
defer os.RemoveAll(tmpDir)
|
defer os.RemoveAll(tmpDir)
|
||||||
|
|
||||||
plugMgr := volume.VolumePluginMgr{}
|
plugMgr := volume.VolumePluginMgr{}
|
||||||
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
|
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, nil, nil, "" /* rootContext */))
|
||||||
|
|
||||||
plug, err := plugMgr.FindPluginByName("kubernetes.io/rbd")
|
plug, err := plugMgr.FindPluginByName("kubernetes.io/rbd")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -226,7 +226,7 @@ func TestPersistentClaimReadOnlyFlag(t *testing.T) {
|
|||||||
client := fake.NewSimpleClientset(pv, claim)
|
client := fake.NewSimpleClientset(pv, claim)
|
||||||
|
|
||||||
plugMgr := volume.VolumePluginMgr{}
|
plugMgr := volume.VolumePluginMgr{}
|
||||||
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, client, nil))
|
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, client, nil, "" /* rootContext */))
|
||||||
plug, _ := plugMgr.FindPluginByName(rbdPluginName)
|
plug, _ := plugMgr.FindPluginByName(rbdPluginName)
|
||||||
|
|
||||||
// readOnly bool is supplied by persistent-claim volume source when its mounter creates other volumes
|
// readOnly bool is supplied by persistent-claim volume source when its mounter creates other volumes
|
||||||
|
|||||||
@@ -75,6 +75,10 @@ func (plugin *secretPlugin) CanSupport(spec *volume.Spec) bool {
|
|||||||
return spec.Volume != nil && spec.Volume.Secret != nil
|
return spec.Volume != nil && spec.Volume.Secret != nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (plugin *secretPlugin) RequiresRemount() bool {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
func (plugin *secretPlugin) NewMounter(spec *volume.Spec, pod *api.Pod, opts volume.VolumeOptions) (volume.Mounter, error) {
|
func (plugin *secretPlugin) NewMounter(spec *volume.Spec, pod *api.Pod, opts volume.VolumeOptions) (volume.Mounter, error) {
|
||||||
return &secretVolumeMounter{
|
return &secretVolumeMounter{
|
||||||
secretVolume: &secretVolume{
|
secretVolume: &secretVolume{
|
||||||
|
|||||||
@@ -187,7 +187,7 @@ func newTestHost(t *testing.T, clientset clientset.Interface) (string, volume.Vo
|
|||||||
t.Fatalf("can't make a temp rootdir: %v", err)
|
t.Fatalf("can't make a temp rootdir: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return tempDir, volumetest.NewFakeVolumeHost(tempDir, clientset, empty_dir.ProbeVolumePlugins())
|
return tempDir, volumetest.NewFakeVolumeHost(tempDir, clientset, empty_dir.ProbeVolumePlugins(), "" /* rootContext */)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestCanSupport(t *testing.T) {
|
func TestCanSupport(t *testing.T) {
|
||||||
@@ -199,8 +199,8 @@ func TestCanSupport(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Can't find the plugin by name")
|
t.Errorf("Can't find the plugin by name")
|
||||||
}
|
}
|
||||||
if plugin.Name() != secretPluginName {
|
if plugin.GetPluginName() != secretPluginName {
|
||||||
t.Errorf("Wrong name: %s", plugin.Name())
|
t.Errorf("Wrong name: %s", plugin.GetPluginName())
|
||||||
}
|
}
|
||||||
if !plugin.CanSupport(&volume.Spec{Volume: &api.Volume{VolumeSource: api.VolumeSource{Secret: &api.SecretVolumeSource{SecretName: ""}}}}) {
|
if !plugin.CanSupport(&volume.Spec{Volume: &api.Volume{VolumeSource: api.VolumeSource{Secret: &api.SecretVolumeSource{SecretName: ""}}}}) {
|
||||||
t.Errorf("Expected true")
|
t.Errorf("Expected true")
|
||||||
|
|||||||
@@ -18,11 +18,13 @@ package testing
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"net"
|
||||||
"os"
|
"os"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
"path"
|
"path"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"k8s.io/kubernetes/pkg/api"
|
"k8s.io/kubernetes/pkg/api"
|
||||||
@@ -40,16 +42,17 @@ import (
|
|||||||
|
|
||||||
// fakeVolumeHost is useful for testing volume plugins.
|
// fakeVolumeHost is useful for testing volume plugins.
|
||||||
type fakeVolumeHost struct {
|
type fakeVolumeHost struct {
|
||||||
rootDir string
|
rootDir string
|
||||||
kubeClient clientset.Interface
|
kubeClient clientset.Interface
|
||||||
pluginMgr VolumePluginMgr
|
pluginMgr VolumePluginMgr
|
||||||
cloud cloudprovider.Interface
|
cloud cloudprovider.Interface
|
||||||
mounter mount.Interface
|
mounter mount.Interface
|
||||||
writer io.Writer
|
writer io.Writer
|
||||||
|
rootContext string
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewFakeVolumeHost(rootDir string, kubeClient clientset.Interface, plugins []VolumePlugin) *fakeVolumeHost {
|
func NewFakeVolumeHost(rootDir string, kubeClient clientset.Interface, plugins []VolumePlugin, rootContext string) *fakeVolumeHost {
|
||||||
host := &fakeVolumeHost{rootDir: rootDir, kubeClient: kubeClient, cloud: nil}
|
host := &fakeVolumeHost{rootDir: rootDir, kubeClient: kubeClient, cloud: nil, rootContext: rootContext}
|
||||||
host.mounter = &mount.FakeMounter{}
|
host.mounter = &mount.FakeMounter{}
|
||||||
host.writer = &io.StdWriter{}
|
host.writer = &io.StdWriter{}
|
||||||
host.pluginMgr.InitPlugins(plugins, host)
|
host.pluginMgr.InitPlugins(plugins, host)
|
||||||
@@ -115,6 +118,15 @@ func (f *fakeVolumeHost) GetHostName() string {
|
|||||||
return "fakeHostName"
|
return "fakeHostName"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Returns host IP or nil in the case of error.
|
||||||
|
func (f *fakeVolumeHost) GetHostIP() (net.IP, error) {
|
||||||
|
return nil, fmt.Errorf("GetHostIP() not implemented")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *fakeVolumeHost) GetRootContext() string {
|
||||||
|
return f.rootContext
|
||||||
|
}
|
||||||
|
|
||||||
func ProbeVolumePlugins(config VolumeConfig) []VolumePlugin {
|
func ProbeVolumePlugins(config VolumeConfig) []VolumePlugin {
|
||||||
if _, ok := config.OtherAttributes["fake-property"]; ok {
|
if _, ok := config.OtherAttributes["fake-property"]; ok {
|
||||||
return []VolumePlugin{
|
return []VolumePlugin{
|
||||||
@@ -181,6 +193,10 @@ func (plugin *FakeVolumePlugin) CanSupport(spec *Spec) bool {
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (plugin *FakeVolumePlugin) RequiresRemount() bool {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
func (plugin *FakeVolumePlugin) NewMounter(spec *Spec, pod *api.Pod, opts VolumeOptions) (Mounter, error) {
|
func (plugin *FakeVolumePlugin) NewMounter(spec *Spec, pod *api.Pod, opts VolumeOptions) (Mounter, error) {
|
||||||
plugin.Lock()
|
plugin.Lock()
|
||||||
defer plugin.Unlock()
|
defer plugin.Unlock()
|
||||||
@@ -192,6 +208,12 @@ func (plugin *FakeVolumePlugin) NewMounter(spec *Spec, pod *api.Pod, opts Volume
|
|||||||
return volume, nil
|
return volume, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (plugin *FakeVolumePlugin) GetMounters() (Mounters []*FakeVolume) {
|
||||||
|
plugin.RLock()
|
||||||
|
defer plugin.RUnlock()
|
||||||
|
return plugin.Mounters
|
||||||
|
}
|
||||||
|
|
||||||
func (plugin *FakeVolumePlugin) NewUnmounter(volName string, podUID types.UID) (Unmounter, error) {
|
func (plugin *FakeVolumePlugin) NewUnmounter(volName string, podUID types.UID) (Unmounter, error) {
|
||||||
plugin.Lock()
|
plugin.Lock()
|
||||||
defer plugin.Unlock()
|
defer plugin.Unlock()
|
||||||
@@ -203,6 +225,12 @@ func (plugin *FakeVolumePlugin) NewUnmounter(volName string, podUID types.UID) (
|
|||||||
return volume, nil
|
return volume, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (plugin *FakeVolumePlugin) GetUnmounters() (Unmounters []*FakeVolume) {
|
||||||
|
plugin.RLock()
|
||||||
|
defer plugin.RUnlock()
|
||||||
|
return plugin.Unmounters
|
||||||
|
}
|
||||||
|
|
||||||
func (plugin *FakeVolumePlugin) NewAttacher() (Attacher, error) {
|
func (plugin *FakeVolumePlugin) NewAttacher() (Attacher, error) {
|
||||||
plugin.Lock()
|
plugin.Lock()
|
||||||
defer plugin.Unlock()
|
defer plugin.Unlock()
|
||||||
@@ -293,6 +321,12 @@ func (fv *FakeVolume) SetUp(fsGroup *int64) error {
|
|||||||
return fv.SetUpAt(fv.getPath(), fsGroup)
|
return fv.SetUpAt(fv.getPath(), fsGroup)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (fv *FakeVolume) GetSetUpCallCount() int {
|
||||||
|
fv.RLock()
|
||||||
|
defer fv.RUnlock()
|
||||||
|
return fv.SetUpCallCount
|
||||||
|
}
|
||||||
|
|
||||||
func (fv *FakeVolume) SetUpAt(dir string, fsGroup *int64) error {
|
func (fv *FakeVolume) SetUpAt(dir string, fsGroup *int64) error {
|
||||||
return os.MkdirAll(dir, 0750)
|
return os.MkdirAll(dir, 0750)
|
||||||
}
|
}
|
||||||
@@ -314,6 +348,12 @@ func (fv *FakeVolume) TearDown() error {
|
|||||||
return fv.TearDownAt(fv.getPath())
|
return fv.TearDownAt(fv.getPath())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (fv *FakeVolume) GetTearDownCallCount() int {
|
||||||
|
fv.RLock()
|
||||||
|
defer fv.RUnlock()
|
||||||
|
return fv.TearDownCallCount
|
||||||
|
}
|
||||||
|
|
||||||
func (fv *FakeVolume) TearDownAt(dir string) error {
|
func (fv *FakeVolume) TearDownAt(dir string) error {
|
||||||
return os.RemoveAll(dir)
|
return os.RemoveAll(dir)
|
||||||
}
|
}
|
||||||
@@ -338,20 +378,32 @@ func (fv *FakeVolume) WaitForAttach(spec *Spec, spectimeout time.Duration) (stri
|
|||||||
return "", nil
|
return "", nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (fv *FakeVolume) GetDeviceMountPath(spec *Spec) string {
|
func (fv *FakeVolume) GetWaitForAttachCallCount() int {
|
||||||
|
fv.RLock()
|
||||||
|
defer fv.RUnlock()
|
||||||
|
return fv.WaitForAttachCallCount
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fv *FakeVolume) GetDeviceMountPath(spec *Spec) (string, error) {
|
||||||
fv.Lock()
|
fv.Lock()
|
||||||
defer fv.Unlock()
|
defer fv.Unlock()
|
||||||
fv.GetDeviceMountPathCallCount++
|
fv.GetDeviceMountPathCallCount++
|
||||||
return ""
|
return "", nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (fv *FakeVolume) MountDevice(spec *Spec, devicePath string, deviceMountPath string, mounter mount.Interface) error {
|
func (fv *FakeVolume) MountDevice(spec *Spec, devicePath string, deviceMountPath string) error {
|
||||||
fv.Lock()
|
fv.Lock()
|
||||||
defer fv.Unlock()
|
defer fv.Unlock()
|
||||||
fv.MountDeviceCallCount++
|
fv.MountDeviceCallCount++
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (fv *FakeVolume) GetMountDeviceCallCount() int {
|
||||||
|
fv.RLock()
|
||||||
|
defer fv.RUnlock()
|
||||||
|
return fv.MountDeviceCallCount
|
||||||
|
}
|
||||||
|
|
||||||
func (fv *FakeVolume) Detach(deviceMountPath string, hostName string) error {
|
func (fv *FakeVolume) Detach(deviceMountPath string, hostName string) error {
|
||||||
fv.Lock()
|
fv.Lock()
|
||||||
defer fv.Unlock()
|
defer fv.Unlock()
|
||||||
@@ -372,7 +424,7 @@ func (fv *FakeVolume) WaitForDetach(devicePath string, timeout time.Duration) er
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (fv *FakeVolume) UnmountDevice(globalMountPath string, mounter mount.Interface) error {
|
func (fv *FakeVolume) UnmountDevice(globalMountPath string) error {
|
||||||
fv.Lock()
|
fv.Lock()
|
||||||
defer fv.Unlock()
|
defer fv.Unlock()
|
||||||
fv.UnmountDeviceCallCount++
|
fv.UnmountDeviceCallCount++
|
||||||
@@ -466,3 +518,212 @@ func FindEmptyDirectoryUsageOnTmpfs() (*resource.Quantity, error) {
|
|||||||
used.Format = resource.BinarySI
|
used.Format = resource.BinarySI
|
||||||
return &used, nil
|
return &used, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// VerifyAttachCallCount ensures that at least one of the Attachers for this
|
||||||
|
// plugin has the expectedAttachCallCount number of calls. Otherwise it returns
|
||||||
|
// an error.
|
||||||
|
func VerifyAttachCallCount(
|
||||||
|
expectedAttachCallCount int,
|
||||||
|
fakeVolumePlugin *FakeVolumePlugin) error {
|
||||||
|
for _, attacher := range fakeVolumePlugin.GetAttachers() {
|
||||||
|
actualCallCount := attacher.GetAttachCallCount()
|
||||||
|
if actualCallCount == expectedAttachCallCount {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return fmt.Errorf(
|
||||||
|
"No attachers have expected AttachCallCount. Expected: <%v>.",
|
||||||
|
expectedAttachCallCount)
|
||||||
|
}
|
||||||
|
|
||||||
|
// VerifyZeroAttachCalls ensures that all of the Attachers for this plugin have
|
||||||
|
// a zero AttachCallCount. Otherwise it returns an error.
|
||||||
|
func VerifyZeroAttachCalls(fakeVolumePlugin *FakeVolumePlugin) error {
|
||||||
|
for _, attacher := range fakeVolumePlugin.GetAttachers() {
|
||||||
|
actualCallCount := attacher.GetAttachCallCount()
|
||||||
|
if actualCallCount != 0 {
|
||||||
|
return fmt.Errorf(
|
||||||
|
"At least one attacher has non-zero AttachCallCount: <%v>.",
|
||||||
|
actualCallCount)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// VerifyWaitForAttachCallCount ensures that at least one of the Mounters for
|
||||||
|
// this plugin has the expectedWaitForAttachCallCount number of calls. Otherwise
|
||||||
|
// it returns an error.
|
||||||
|
func VerifyWaitForAttachCallCount(
|
||||||
|
expectedWaitForAttachCallCount int,
|
||||||
|
fakeVolumePlugin *FakeVolumePlugin) error {
|
||||||
|
for _, attacher := range fakeVolumePlugin.GetAttachers() {
|
||||||
|
actualCallCount := attacher.GetWaitForAttachCallCount()
|
||||||
|
if actualCallCount == expectedWaitForAttachCallCount {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return fmt.Errorf(
|
||||||
|
"No Attachers have expected WaitForAttachCallCount. Expected: <%v>.",
|
||||||
|
expectedWaitForAttachCallCount)
|
||||||
|
}
|
||||||
|
|
||||||
|
// VerifyZeroWaitForAttachCallCount ensures that all Attachers for this plugin
|
||||||
|
// have a zero WaitForAttachCallCount. Otherwise it returns an error.
|
||||||
|
func VerifyZeroWaitForAttachCallCount(fakeVolumePlugin *FakeVolumePlugin) error {
|
||||||
|
for _, attacher := range fakeVolumePlugin.GetAttachers() {
|
||||||
|
actualCallCount := attacher.GetWaitForAttachCallCount()
|
||||||
|
if actualCallCount != 0 {
|
||||||
|
return fmt.Errorf(
|
||||||
|
"At least one attacher has non-zero WaitForAttachCallCount: <%v>.",
|
||||||
|
actualCallCount)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// VerifyMountDeviceCallCount ensures that at least one of the Mounters for
|
||||||
|
// this plugin has the expectedMountDeviceCallCount number of calls. Otherwise
|
||||||
|
// it returns an error.
|
||||||
|
func VerifyMountDeviceCallCount(
|
||||||
|
expectedMountDeviceCallCount int,
|
||||||
|
fakeVolumePlugin *FakeVolumePlugin) error {
|
||||||
|
for _, attacher := range fakeVolumePlugin.GetAttachers() {
|
||||||
|
actualCallCount := attacher.GetMountDeviceCallCount()
|
||||||
|
if actualCallCount == expectedMountDeviceCallCount {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return fmt.Errorf(
|
||||||
|
"No Attachers have expected MountDeviceCallCount. Expected: <%v>.",
|
||||||
|
expectedMountDeviceCallCount)
|
||||||
|
}
|
||||||
|
|
||||||
|
// VerifyZeroMountDeviceCallCount ensures that all Attachers for this plugin
|
||||||
|
// have a zero MountDeviceCallCount. Otherwise it returns an error.
|
||||||
|
func VerifyZeroMountDeviceCallCount(fakeVolumePlugin *FakeVolumePlugin) error {
|
||||||
|
for _, attacher := range fakeVolumePlugin.GetAttachers() {
|
||||||
|
actualCallCount := attacher.GetMountDeviceCallCount()
|
||||||
|
if actualCallCount != 0 {
|
||||||
|
return fmt.Errorf(
|
||||||
|
"At least one attacher has non-zero MountDeviceCallCount: <%v>.",
|
||||||
|
actualCallCount)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// VerifySetUpCallCount ensures that at least one of the Mounters for this
|
||||||
|
// plugin has the expectedSetUpCallCount number of calls. Otherwise it returns
|
||||||
|
// an error.
|
||||||
|
func VerifySetUpCallCount(
|
||||||
|
expectedSetUpCallCount int,
|
||||||
|
fakeVolumePlugin *FakeVolumePlugin) error {
|
||||||
|
for _, mounter := range fakeVolumePlugin.GetMounters() {
|
||||||
|
actualCallCount := mounter.GetSetUpCallCount()
|
||||||
|
if actualCallCount >= expectedSetUpCallCount {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return fmt.Errorf(
|
||||||
|
"No Mounters have expected SetUpCallCount. Expected: <%v>.",
|
||||||
|
expectedSetUpCallCount)
|
||||||
|
}
|
||||||
|
|
||||||
|
// VerifyZeroSetUpCallCount ensures that all Mounters for this plugin have a
|
||||||
|
// zero SetUpCallCount. Otherwise it returns an error.
|
||||||
|
func VerifyZeroSetUpCallCount(fakeVolumePlugin *FakeVolumePlugin) error {
|
||||||
|
for _, mounter := range fakeVolumePlugin.GetMounters() {
|
||||||
|
actualCallCount := mounter.GetSetUpCallCount()
|
||||||
|
if actualCallCount != 0 {
|
||||||
|
return fmt.Errorf(
|
||||||
|
"At least one mounter has non-zero SetUpCallCount: <%v>.",
|
||||||
|
actualCallCount)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// VerifyTearDownCallCount ensures that at least one of the Unounters for this
|
||||||
|
// plugin has the expectedTearDownCallCount number of calls. Otherwise it
|
||||||
|
// returns an error.
|
||||||
|
func VerifyTearDownCallCount(
|
||||||
|
expectedTearDownCallCount int,
|
||||||
|
fakeVolumePlugin *FakeVolumePlugin) error {
|
||||||
|
for _, unmounter := range fakeVolumePlugin.GetUnmounters() {
|
||||||
|
actualCallCount := unmounter.GetTearDownCallCount()
|
||||||
|
if actualCallCount >= expectedTearDownCallCount {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return fmt.Errorf(
|
||||||
|
"No Unmounters have expected SetUpCallCount. Expected: <%v>.",
|
||||||
|
expectedTearDownCallCount)
|
||||||
|
}
|
||||||
|
|
||||||
|
// VerifyZeroTearDownCallCount ensures that all Mounters for this plugin have a
|
||||||
|
// zero TearDownCallCount. Otherwise it returns an error.
|
||||||
|
func VerifyZeroTearDownCallCount(fakeVolumePlugin *FakeVolumePlugin) error {
|
||||||
|
for _, mounter := range fakeVolumePlugin.GetMounters() {
|
||||||
|
actualCallCount := mounter.GetTearDownCallCount()
|
||||||
|
if actualCallCount != 0 {
|
||||||
|
return fmt.Errorf(
|
||||||
|
"At least one mounter has non-zero TearDownCallCount: <%v>.",
|
||||||
|
actualCallCount)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// VerifyDetachCallCount ensures that at least one of the Attachers for this
|
||||||
|
// plugin has the expectedDetachCallCount number of calls. Otherwise it returns
|
||||||
|
// an error.
|
||||||
|
func VerifyDetachCallCount(
|
||||||
|
expectedDetachCallCount int,
|
||||||
|
fakeVolumePlugin *FakeVolumePlugin) error {
|
||||||
|
for _, detacher := range fakeVolumePlugin.GetDetachers() {
|
||||||
|
actualCallCount := detacher.GetDetachCallCount()
|
||||||
|
if actualCallCount == expectedDetachCallCount {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return fmt.Errorf(
|
||||||
|
"No Detachers have expected DetachCallCount. Expected: <%v>.",
|
||||||
|
expectedDetachCallCount)
|
||||||
|
}
|
||||||
|
|
||||||
|
// VerifyZeroDetachCallCount ensures that all Detachers for this plugin have a
|
||||||
|
// zero DetachCallCount. Otherwise it returns an error.
|
||||||
|
func VerifyZeroDetachCallCount(fakeVolumePlugin *FakeVolumePlugin) error {
|
||||||
|
for _, detacher := range fakeVolumePlugin.GetDetachers() {
|
||||||
|
actualCallCount := detacher.GetDetachCallCount()
|
||||||
|
if actualCallCount != 0 {
|
||||||
|
return fmt.Errorf(
|
||||||
|
"At least one detacher has non-zero DetachCallCount: <%v>.",
|
||||||
|
actualCallCount)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetTestVolumePluginMgr creates, initializes, and returns a test volume plugin
|
||||||
|
// manager and fake volume plugin using a fake volume host.
|
||||||
|
func GetTestVolumePluginMgr(
|
||||||
|
t *testing.T) (*VolumePluginMgr, *FakeVolumePlugin) {
|
||||||
|
plugins := ProbeVolumePlugins(VolumeConfig{})
|
||||||
|
volumePluginMgr := NewFakeVolumeHost(
|
||||||
|
"" /* rootDir */, nil /* kubeClient */, plugins, "" /* rootContext */).pluginMgr
|
||||||
|
|
||||||
|
return &volumePluginMgr, plugins[0].(*FakeVolumePlugin)
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,71 +0,0 @@
|
|||||||
/*
|
|
||||||
Copyright 2016 The Kubernetes Authors All rights reserved.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
// Package attachdetach contains consts and helper methods used by various
|
|
||||||
// attach/detach components in controller and kubelet
|
|
||||||
package attachdetach
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"k8s.io/kubernetes/pkg/api"
|
|
||||||
"k8s.io/kubernetes/pkg/volume"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
// ControllerManagedAnnotation is the key of the annotation on Node objects
|
|
||||||
// that indicates attach/detach operations for the node should be managed
|
|
||||||
// by the attach/detach controller
|
|
||||||
ControllerManagedAnnotation string = "volumes.kubernetes.io/controller-managed-attach-detach"
|
|
||||||
)
|
|
||||||
|
|
||||||
// GetUniqueDeviceName returns a unique name representing the device with the
|
|
||||||
// spcified deviceName of the pluginName volume type.
|
|
||||||
// The returned name can be used to uniquely reference the device. For example,
|
|
||||||
// to prevent operations (attach/detach) from being triggered on the same volume
|
|
||||||
func GetUniqueDeviceName(
|
|
||||||
pluginName, deviceName string) api.UniqueDeviceName {
|
|
||||||
return api.UniqueDeviceName(fmt.Sprintf("%s/%s", pluginName, deviceName))
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetUniqueDeviceNameFromSpec uses the given AttachableVolumePlugin to
|
|
||||||
// generate a unique name representing the device defined in the specified
|
|
||||||
// volume spec.
|
|
||||||
// This returned name can be used to uniquely reference the device. For example,
|
|
||||||
// to prevent operations (attach/detach) from being triggered on the same volume.
|
|
||||||
// If the given plugin does not support the volume spec, this returns an error.
|
|
||||||
func GetUniqueDeviceNameFromSpec(
|
|
||||||
attachableVolumePlugin volume.AttachableVolumePlugin,
|
|
||||||
volumeSpec *volume.Spec) (api.UniqueDeviceName, error) {
|
|
||||||
if attachableVolumePlugin == nil {
|
|
||||||
return "", fmt.Errorf(
|
|
||||||
"attachablePlugin should not be nil. volumeSpec.Name=%q",
|
|
||||||
volumeSpec.Name())
|
|
||||||
}
|
|
||||||
|
|
||||||
deviceName, err := attachableVolumePlugin.GetDeviceName(volumeSpec)
|
|
||||||
if err != nil || deviceName == "" {
|
|
||||||
return "", fmt.Errorf(
|
|
||||||
"failed to GetDeviceName from AttachablePlugin for volumeSpec %q err=%v",
|
|
||||||
volumeSpec.Name(),
|
|
||||||
err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return GetUniqueDeviceName(
|
|
||||||
attachableVolumePlugin.Name(),
|
|
||||||
deviceName),
|
|
||||||
nil
|
|
||||||
}
|
|
||||||
807
pkg/volume/util/operationexecutor/operation_executor.go
Normal file
807
pkg/volume/util/operationexecutor/operation_executor.go
Normal file
@@ -0,0 +1,807 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2016 The Kubernetes Authors All rights reserved.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
// Package operationexecutor implements interfaces that enable execution of
|
||||||
|
// attach, detach, mount, and unmount operations with a goroutinemap so that
|
||||||
|
// more than one operation is never triggered on the same volume.
|
||||||
|
package operationexecutor
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/golang/glog"
|
||||||
|
"k8s.io/kubernetes/pkg/api"
|
||||||
|
"k8s.io/kubernetes/pkg/types"
|
||||||
|
"k8s.io/kubernetes/pkg/util/goroutinemap"
|
||||||
|
"k8s.io/kubernetes/pkg/volume"
|
||||||
|
volumetypes "k8s.io/kubernetes/pkg/volume/util/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
// OperationExecutor defines a set of operations for attaching, detaching,
|
||||||
|
// mounting, or unmounting a volume that are executed with a goroutinemap which
|
||||||
|
// prevents more than one operation from being triggered on the same volume.
|
||||||
|
//
|
||||||
|
// These operations should be idempotent (for example, AttachVolume should
|
||||||
|
// still succeed if the volume is already attached to the node, etc.). However,
|
||||||
|
// they depend on the volume plugins to implement this behavior.
|
||||||
|
//
|
||||||
|
// Once an operation completes successfully, the actualStateOfWorld is updated
|
||||||
|
// to indicate the volume is attached/detached/mounted/unmounted.
|
||||||
|
//
|
||||||
|
// If the OperationExecutor fails to start the operation because, for example,
|
||||||
|
// an operation with the same UniqueVolumeName is already pending, a non-nil
|
||||||
|
// error is returned.
|
||||||
|
//
|
||||||
|
// Once the operation is started, since it is executed asynchronously,
|
||||||
|
// errors are simply logged and the goroutine is terminated without updating
|
||||||
|
// actualStateOfWorld (callers are responsible for retrying as needed).
|
||||||
|
type OperationExecutor interface {
|
||||||
|
// AttachVolume attaches the volume to the node specified in volumeToAttach.
|
||||||
|
// It then updates the actual state of the world to reflect that.
|
||||||
|
AttachVolume(volumeToAttach VolumeToAttach, actualStateOfWorld ActualStateOfWorldAttacherUpdater) error
|
||||||
|
|
||||||
|
// DetachVolume detaches the volume from the node specified in
|
||||||
|
// volumeToDetach, and updates the actual state of the world to reflect
|
||||||
|
// that.
|
||||||
|
DetachVolume(volumeToDetach AttachedVolume, actualStateOfWorld ActualStateOfWorldAttacherUpdater) error
|
||||||
|
|
||||||
|
// MountVolume mounts the volume to the pod specified in volumeToMount.
|
||||||
|
// Specifically it will:
|
||||||
|
// * Wait for the device to finish attaching (for attachable volumes only).
|
||||||
|
// * Mount device to global mount path (for attachable volumes only).
|
||||||
|
// * Update actual state of world to reflect volume is globally mounted (for
|
||||||
|
// attachable volumes only).
|
||||||
|
// * Mount the volume to the pod specific path.
|
||||||
|
// * Update actual state of world to reflect volume is mounted to the pod
|
||||||
|
// path.
|
||||||
|
MountVolume(waitForAttachTimeout time.Duration, volumeToMount VolumeToMount, actualStateOfWorld ActualStateOfWorldMounterUpdater) error
|
||||||
|
|
||||||
|
// UnmountVolume unmounts the volume from the pod specified in
|
||||||
|
// volumeToUnmount and updates the actual state of the world to reflect that.
|
||||||
|
UnmountVolume(volumeToUnmount MountedVolume, actualStateOfWorld ActualStateOfWorldMounterUpdater) error
|
||||||
|
|
||||||
|
// UnmountDevice unmounts the volumes global mount path from the device (for
|
||||||
|
// attachable volumes only, freeing it for detach. It then updates the
|
||||||
|
// actual state of the world to reflect that.
|
||||||
|
UnmountDevice(deviceToDetach AttachedVolume, actualStateOfWorld ActualStateOfWorldMounterUpdater) error
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewOperationExecutor returns a new instance of OperationExecutor.
|
||||||
|
func NewOperationExecutor(
|
||||||
|
volumePluginMgr *volume.VolumePluginMgr) OperationExecutor {
|
||||||
|
return &operationExecutor{
|
||||||
|
volumePluginMgr: volumePluginMgr,
|
||||||
|
pendingOperations: goroutinemap.NewGoRoutineMap(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ActualStateOfWorldMounterUpdater defines a set of operations updating the actual
|
||||||
|
// state of the world cache after successful mount/unmount.
|
||||||
|
type ActualStateOfWorldMounterUpdater interface {
|
||||||
|
// Marks the specified volume as mounted to the specified pod
|
||||||
|
MarkVolumeAsMounted(podName volumetypes.UniquePodName, podUID types.UID, volumeName api.UniqueVolumeName, mounter volume.Mounter, outerVolumeSpecName string, volumeGidValue string) error
|
||||||
|
|
||||||
|
// Marks the specified volume as unmounted from the specified pod
|
||||||
|
MarkVolumeAsUnmounted(podName volumetypes.UniquePodName, volumeName api.UniqueVolumeName) error
|
||||||
|
|
||||||
|
// Marks the specified volume as having been globally mounted.
|
||||||
|
MarkDeviceAsMounted(volumeName api.UniqueVolumeName) error
|
||||||
|
|
||||||
|
// Marks the specified volume as having its global mount unmounted.
|
||||||
|
MarkDeviceAsUnmounted(volumeName api.UniqueVolumeName) error
|
||||||
|
}
|
||||||
|
|
||||||
|
// ActualStateOfWorldAttacherUpdater defines a set of operations updating the
|
||||||
|
// actual state of the world cache after successful attach/detach/mount/unmount.
|
||||||
|
type ActualStateOfWorldAttacherUpdater interface {
|
||||||
|
// Marks the specified volume as attached to the specified node
|
||||||
|
MarkVolumeAsAttached(volumeSpec *volume.Spec, nodeName string) error
|
||||||
|
|
||||||
|
// Marks the specified volume as detached from the specified node
|
||||||
|
MarkVolumeAsDetached(volumeName api.UniqueVolumeName, nodeName string)
|
||||||
|
}
|
||||||
|
|
||||||
|
// VolumeToAttach represents a volume that should be attached to a node.
|
||||||
|
type VolumeToAttach struct {
|
||||||
|
// VolumeName is the unique identifier for the volume that should be
|
||||||
|
// attached.
|
||||||
|
VolumeName api.UniqueVolumeName
|
||||||
|
|
||||||
|
// VolumeSpec is a volume spec containing the specification for the volume
|
||||||
|
// that should be attached.
|
||||||
|
VolumeSpec *volume.Spec
|
||||||
|
|
||||||
|
// NodeName is the identifier for the node that the volume should be
|
||||||
|
// attached to.
|
||||||
|
NodeName string
|
||||||
|
}
|
||||||
|
|
||||||
|
// VolumeToMount represents a volume that should be attached to this node and
|
||||||
|
// mounted to the PodName.
|
||||||
|
type VolumeToMount struct {
|
||||||
|
// VolumeName is the unique identifier for the volume that should be
|
||||||
|
// mounted.
|
||||||
|
VolumeName api.UniqueVolumeName
|
||||||
|
|
||||||
|
// PodName is the unique identifier for the pod that the volume should be
|
||||||
|
// mounted to after it is attached.
|
||||||
|
PodName volumetypes.UniquePodName
|
||||||
|
|
||||||
|
// VolumeSpec is a volume spec containing the specification for the volume
|
||||||
|
// that should be mounted. Used to create NewMounter. Used to generate
|
||||||
|
// InnerVolumeSpecName.
|
||||||
|
VolumeSpec *volume.Spec
|
||||||
|
|
||||||
|
// outerVolumeSpecName is the podSpec.Volume[x].Name of the volume. If the
|
||||||
|
// volume was referenced through a persistent volume claim, this contains
|
||||||
|
// the podSpec.Volume[x].Name of the persistent volume claim.
|
||||||
|
OuterVolumeSpecName string
|
||||||
|
|
||||||
|
// Pod to mount the volume to. Used to create NewMounter.
|
||||||
|
Pod *api.Pod
|
||||||
|
|
||||||
|
// PluginIsAttachable indicates that the plugin for this volume implements
|
||||||
|
// the volume.Attacher interface
|
||||||
|
PluginIsAttachable bool
|
||||||
|
|
||||||
|
// VolumeGidValue contains the value of the GID annotation, if present.
|
||||||
|
VolumeGidValue string
|
||||||
|
}
|
||||||
|
|
||||||
|
// AttachedVolume represents a volume that is attached to a node.
|
||||||
|
type AttachedVolume struct {
|
||||||
|
// VolumeName is the unique identifier for the volume that is attached.
|
||||||
|
VolumeName api.UniqueVolumeName
|
||||||
|
|
||||||
|
// VolumeSpec is the volume spec containing the specification for the
|
||||||
|
// volume that is attached.
|
||||||
|
VolumeSpec *volume.Spec
|
||||||
|
|
||||||
|
// NodeName is the identifier for the node that the volume is attached to.
|
||||||
|
NodeName string
|
||||||
|
|
||||||
|
// PluginIsAttachable indicates that the plugin for this volume implements
|
||||||
|
// the volume.Attacher interface
|
||||||
|
PluginIsAttachable bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// MountedVolume represents a volume that has successfully been mounted to a pod.
|
||||||
|
type MountedVolume struct {
|
||||||
|
// PodName is the unique identifier of the pod mounted to.
|
||||||
|
PodName volumetypes.UniquePodName
|
||||||
|
|
||||||
|
// VolumeName is the unique identifier of the volume mounted to the pod.
|
||||||
|
VolumeName api.UniqueVolumeName
|
||||||
|
|
||||||
|
// InnerVolumeSpecName is the volume.Spec.Name() of the volume. If the
|
||||||
|
// volume was referenced through a persistent volume claims, this contains
|
||||||
|
// the name of the bound persistent volume object.
|
||||||
|
// It is the name that plugins use in their pod mount path, i.e.
|
||||||
|
// /var/lib/kubelet/pods/{podUID}/volumes/{escapeQualifiedPluginName}/{innerVolumeSpecName}/
|
||||||
|
// PVC example,
|
||||||
|
// apiVersion: v1
|
||||||
|
// kind: PersistentVolume
|
||||||
|
// metadata:
|
||||||
|
// name: pv0003 <- InnerVolumeSpecName
|
||||||
|
// spec:
|
||||||
|
// capacity:
|
||||||
|
// storage: 5Gi
|
||||||
|
// accessModes:
|
||||||
|
// - ReadWriteOnce
|
||||||
|
// persistentVolumeReclaimPolicy: Recycle
|
||||||
|
// nfs:
|
||||||
|
// path: /tmp
|
||||||
|
// server: 172.17.0.2
|
||||||
|
// Non-PVC example:
|
||||||
|
// apiVersion: v1
|
||||||
|
// kind: Pod
|
||||||
|
// metadata:
|
||||||
|
// name: test-pd
|
||||||
|
// spec:
|
||||||
|
// containers:
|
||||||
|
// - image: gcr.io/google_containers/test-webserver
|
||||||
|
// name: test-container
|
||||||
|
// volumeMounts:
|
||||||
|
// - mountPath: /test-pd
|
||||||
|
// name: test-volume
|
||||||
|
// volumes:
|
||||||
|
// - name: test-volume <- InnerVolumeSpecName
|
||||||
|
// gcePersistentDisk:
|
||||||
|
// pdName: my-data-disk
|
||||||
|
// fsType: ext4
|
||||||
|
InnerVolumeSpecName string
|
||||||
|
|
||||||
|
// outerVolumeSpecName is the podSpec.Volume[x].Name of the volume. If the
|
||||||
|
// volume was referenced through a persistent volume claim, this contains
|
||||||
|
// the podSpec.Volume[x].Name of the persistent volume claim.
|
||||||
|
// PVC example:
|
||||||
|
// kind: Pod
|
||||||
|
// apiVersion: v1
|
||||||
|
// metadata:
|
||||||
|
// name: mypod
|
||||||
|
// spec:
|
||||||
|
// containers:
|
||||||
|
// - name: myfrontend
|
||||||
|
// image: dockerfile/nginx
|
||||||
|
// volumeMounts:
|
||||||
|
// - mountPath: "/var/www/html"
|
||||||
|
// name: mypd
|
||||||
|
// volumes:
|
||||||
|
// - name: mypd <- OuterVolumeSpecName
|
||||||
|
// persistentVolumeClaim:
|
||||||
|
// claimName: myclaim
|
||||||
|
// Non-PVC example:
|
||||||
|
// apiVersion: v1
|
||||||
|
// kind: Pod
|
||||||
|
// metadata:
|
||||||
|
// name: test-pd
|
||||||
|
// spec:
|
||||||
|
// containers:
|
||||||
|
// - image: gcr.io/google_containers/test-webserver
|
||||||
|
// name: test-container
|
||||||
|
// volumeMounts:
|
||||||
|
// - mountPath: /test-pd
|
||||||
|
// name: test-volume
|
||||||
|
// volumes:
|
||||||
|
// - name: test-volume <- OuterVolumeSpecName
|
||||||
|
// gcePersistentDisk:
|
||||||
|
// pdName: my-data-disk
|
||||||
|
// fsType: ext4
|
||||||
|
OuterVolumeSpecName string
|
||||||
|
|
||||||
|
// PluginName is the "Unescaped Qualified" name of the volume plugin used to
|
||||||
|
// mount and unmount this volume. It can be used to fetch the volume plugin
|
||||||
|
// to unmount with, on demand. It is also the name that plugins use, though
|
||||||
|
// escaped, in their pod mount path, i.e.
|
||||||
|
// /var/lib/kubelet/pods/{podUID}/volumes/{escapeQualifiedPluginName}/{outerVolumeSpecName}/
|
||||||
|
PluginName string
|
||||||
|
|
||||||
|
// PodUID is the UID of the pod mounted to. It is also the string used by
|
||||||
|
// plugins in their pod mount path, i.e.
|
||||||
|
// /var/lib/kubelet/pods/{podUID}/volumes/{escapeQualifiedPluginName}/{outerVolumeSpecName}/
|
||||||
|
PodUID types.UID
|
||||||
|
|
||||||
|
// Mounter is the volume mounter used to mount this volume. It is required
|
||||||
|
// by kubelet to create container.VolumeMap.
|
||||||
|
Mounter volume.Mounter
|
||||||
|
|
||||||
|
// VolumeGidValue contains the value of the GID annotation, if present.
|
||||||
|
VolumeGidValue string
|
||||||
|
}
|
||||||
|
|
||||||
|
type operationExecutor struct {
|
||||||
|
// volumePluginMgr is the volume plugin manager used to create volume
|
||||||
|
// plugin objects.
|
||||||
|
volumePluginMgr *volume.VolumePluginMgr
|
||||||
|
// pendingOperations keeps track of pending attach and detach operations so
|
||||||
|
// multiple operations are not started on the same volume
|
||||||
|
pendingOperations goroutinemap.GoRoutineMap
|
||||||
|
}
|
||||||
|
|
||||||
|
func (oe *operationExecutor) AttachVolume(
|
||||||
|
volumeToAttach VolumeToAttach,
|
||||||
|
actualStateOfWorld ActualStateOfWorldAttacherUpdater) error {
|
||||||
|
attachFunc, err :=
|
||||||
|
oe.generateAttachVolumeFunc(volumeToAttach, actualStateOfWorld)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return oe.pendingOperations.Run(
|
||||||
|
string(volumeToAttach.VolumeName), attachFunc)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (oe *operationExecutor) DetachVolume(
|
||||||
|
volumeToDetach AttachedVolume,
|
||||||
|
actualStateOfWorld ActualStateOfWorldAttacherUpdater) error {
|
||||||
|
detachFunc, err :=
|
||||||
|
oe.generateDetachVolumeFunc(volumeToDetach, actualStateOfWorld)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return oe.pendingOperations.Run(
|
||||||
|
string(volumeToDetach.VolumeName), detachFunc)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (oe *operationExecutor) MountVolume(
|
||||||
|
waitForAttachTimeout time.Duration,
|
||||||
|
volumeToMount VolumeToMount,
|
||||||
|
actualStateOfWorld ActualStateOfWorldMounterUpdater) error {
|
||||||
|
mountFunc, err := oe.generateMountVolumeFunc(
|
||||||
|
waitForAttachTimeout, volumeToMount, actualStateOfWorld)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return oe.pendingOperations.Run(
|
||||||
|
string(volumeToMount.VolumeName), mountFunc)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (oe *operationExecutor) UnmountVolume(
|
||||||
|
volumeToUnmount MountedVolume,
|
||||||
|
actualStateOfWorld ActualStateOfWorldMounterUpdater) error {
|
||||||
|
unmountFunc, err :=
|
||||||
|
oe.generateUnmountVolumeFunc(volumeToUnmount, actualStateOfWorld)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return oe.pendingOperations.Run(
|
||||||
|
string(volumeToUnmount.VolumeName), unmountFunc)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (oe *operationExecutor) UnmountDevice(
|
||||||
|
deviceToDetach AttachedVolume,
|
||||||
|
actualStateOfWorld ActualStateOfWorldMounterUpdater) error {
|
||||||
|
unmountDeviceFunc, err :=
|
||||||
|
oe.generateUnmountDeviceFunc(deviceToDetach, actualStateOfWorld)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return oe.pendingOperations.Run(
|
||||||
|
string(deviceToDetach.VolumeName), unmountDeviceFunc)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (oe *operationExecutor) generateAttachVolumeFunc(
|
||||||
|
volumeToAttach VolumeToAttach,
|
||||||
|
actualStateOfWorld ActualStateOfWorldAttacherUpdater) (func() error, error) {
|
||||||
|
// Get attacher plugin
|
||||||
|
attachableVolumePlugin, err :=
|
||||||
|
oe.volumePluginMgr.FindAttachablePluginBySpec(volumeToAttach.VolumeSpec)
|
||||||
|
if err != nil || attachableVolumePlugin == nil {
|
||||||
|
return nil, fmt.Errorf(
|
||||||
|
"AttachVolume.FindAttachablePluginBySpec failed for volume %q (spec.Name: %q) from node %q with: %v",
|
||||||
|
volumeToAttach.VolumeName,
|
||||||
|
volumeToAttach.VolumeSpec.Name(),
|
||||||
|
volumeToAttach.NodeName,
|
||||||
|
err)
|
||||||
|
}
|
||||||
|
|
||||||
|
volumeAttacher, newAttacherErr := attachableVolumePlugin.NewAttacher()
|
||||||
|
if newAttacherErr != nil {
|
||||||
|
return nil, fmt.Errorf(
|
||||||
|
"AttachVolume.NewAttacher failed for volume %q (spec.Name: %q) from node %q with: %v",
|
||||||
|
volumeToAttach.VolumeName,
|
||||||
|
volumeToAttach.VolumeSpec.Name(),
|
||||||
|
volumeToAttach.NodeName,
|
||||||
|
newAttacherErr)
|
||||||
|
}
|
||||||
|
|
||||||
|
return func() error {
|
||||||
|
// Execute attach
|
||||||
|
attachErr := volumeAttacher.Attach(
|
||||||
|
volumeToAttach.VolumeSpec, volumeToAttach.NodeName)
|
||||||
|
|
||||||
|
if attachErr != nil {
|
||||||
|
// On failure, just log and exit. The controller will retry
|
||||||
|
glog.Errorf(
|
||||||
|
"AttachVolume.Attach failed for volume %q (spec.Name: %q) from node %q with: %v",
|
||||||
|
volumeToAttach.VolumeName,
|
||||||
|
volumeToAttach.VolumeSpec.Name(),
|
||||||
|
volumeToAttach.NodeName,
|
||||||
|
attachErr)
|
||||||
|
return attachErr
|
||||||
|
}
|
||||||
|
|
||||||
|
glog.Infof(
|
||||||
|
"AttachVolume.Attach succeeded for volume %q (spec.Name: %q) from node %q.",
|
||||||
|
volumeToAttach.VolumeName,
|
||||||
|
volumeToAttach.VolumeSpec.Name(),
|
||||||
|
volumeToAttach.NodeName)
|
||||||
|
|
||||||
|
// Update actual state of world
|
||||||
|
addVolumeNodeErr := actualStateOfWorld.MarkVolumeAsAttached(
|
||||||
|
volumeToAttach.VolumeSpec, volumeToAttach.NodeName)
|
||||||
|
if addVolumeNodeErr != nil {
|
||||||
|
// On failure, just log and exit. The controller will retry
|
||||||
|
glog.Errorf(
|
||||||
|
"AttachVolume.MarkVolumeAsAttached failed for volume %q (spec.Name: %q) from node %q with: %v.",
|
||||||
|
volumeToAttach.VolumeName,
|
||||||
|
volumeToAttach.VolumeSpec.Name(),
|
||||||
|
volumeToAttach.NodeName,
|
||||||
|
addVolumeNodeErr)
|
||||||
|
return addVolumeNodeErr
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (oe *operationExecutor) generateDetachVolumeFunc(
|
||||||
|
volumeToDetach AttachedVolume,
|
||||||
|
actualStateOfWorld ActualStateOfWorldAttacherUpdater) (func() error, error) {
|
||||||
|
// Get attacher plugin
|
||||||
|
attachableVolumePlugin, err :=
|
||||||
|
oe.volumePluginMgr.FindAttachablePluginBySpec(volumeToDetach.VolumeSpec)
|
||||||
|
if err != nil || attachableVolumePlugin == nil {
|
||||||
|
return nil, fmt.Errorf(
|
||||||
|
"DetachVolume.FindAttachablePluginBySpec failed for volume %q (spec.Name: %q) from node %q with: %v",
|
||||||
|
volumeToDetach.VolumeName,
|
||||||
|
volumeToDetach.VolumeSpec.Name(),
|
||||||
|
volumeToDetach.NodeName,
|
||||||
|
err)
|
||||||
|
}
|
||||||
|
|
||||||
|
volumeName, err :=
|
||||||
|
attachableVolumePlugin.GetVolumeName(volumeToDetach.VolumeSpec)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf(
|
||||||
|
"DetachVolume.GetVolumeName failed for volume %q (spec.Name: %q) from node %q with: %v",
|
||||||
|
volumeToDetach.VolumeName,
|
||||||
|
volumeToDetach.VolumeSpec.Name(),
|
||||||
|
volumeToDetach.NodeName,
|
||||||
|
err)
|
||||||
|
}
|
||||||
|
|
||||||
|
volumeDetacher, err := attachableVolumePlugin.NewDetacher()
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf(
|
||||||
|
"DetachVolume.NewDetacher failed for volume %q (spec.Name: %q) from node %q with: %v",
|
||||||
|
volumeToDetach.VolumeName,
|
||||||
|
volumeToDetach.VolumeSpec.Name(),
|
||||||
|
volumeToDetach.NodeName,
|
||||||
|
err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return func() error {
|
||||||
|
// Execute detach
|
||||||
|
detachErr := volumeDetacher.Detach(volumeName, volumeToDetach.NodeName)
|
||||||
|
if detachErr != nil {
|
||||||
|
// On failure, just log and exit. The controller will retry
|
||||||
|
glog.Errorf(
|
||||||
|
"DetachVolume.Detach failed for volume %q (spec.Name: %q) from node %q with: %v",
|
||||||
|
volumeToDetach.VolumeName,
|
||||||
|
volumeToDetach.VolumeSpec.Name(),
|
||||||
|
volumeToDetach.NodeName,
|
||||||
|
detachErr)
|
||||||
|
return detachErr
|
||||||
|
}
|
||||||
|
|
||||||
|
glog.Infof(
|
||||||
|
"DetachVolume.Detach succeeded for volume %q (spec.Name: %q) from node %q.",
|
||||||
|
volumeToDetach.VolumeName,
|
||||||
|
volumeToDetach.VolumeSpec.Name(),
|
||||||
|
volumeToDetach.NodeName)
|
||||||
|
|
||||||
|
// Update actual state of world
|
||||||
|
actualStateOfWorld.MarkVolumeAsDetached(
|
||||||
|
volumeToDetach.VolumeName, volumeToDetach.NodeName)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (oe *operationExecutor) generateMountVolumeFunc(
|
||||||
|
waitForAttachTimeout time.Duration,
|
||||||
|
volumeToMount VolumeToMount,
|
||||||
|
actualStateOfWorld ActualStateOfWorldMounterUpdater) (func() error, error) {
|
||||||
|
// Get mounter plugin
|
||||||
|
volumePlugin, err :=
|
||||||
|
oe.volumePluginMgr.FindPluginBySpec(volumeToMount.VolumeSpec)
|
||||||
|
if err != nil || volumePlugin == nil {
|
||||||
|
return nil, fmt.Errorf(
|
||||||
|
"MountVolume.FindPluginBySpec failed for volume %q (spec.Name: %q) pod %q (UID: %q) with: %v",
|
||||||
|
volumeToMount.VolumeName,
|
||||||
|
volumeToMount.VolumeSpec.Name(),
|
||||||
|
volumeToMount.PodName,
|
||||||
|
volumeToMount.Pod.UID,
|
||||||
|
err)
|
||||||
|
}
|
||||||
|
|
||||||
|
volumeMounter, newMounterErr := volumePlugin.NewMounter(
|
||||||
|
volumeToMount.VolumeSpec,
|
||||||
|
volumeToMount.Pod,
|
||||||
|
volume.VolumeOptions{})
|
||||||
|
if newMounterErr != nil {
|
||||||
|
return nil, fmt.Errorf(
|
||||||
|
"MountVolume.NewMounter failed for volume %q (spec.Name: %q) pod %q (UID: %q) with: %v",
|
||||||
|
volumeToMount.VolumeName,
|
||||||
|
volumeToMount.VolumeSpec.Name(),
|
||||||
|
volumeToMount.PodName,
|
||||||
|
volumeToMount.Pod.UID,
|
||||||
|
newMounterErr)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get attacher, if possible
|
||||||
|
attachableVolumePlugin, _ :=
|
||||||
|
oe.volumePluginMgr.FindAttachablePluginBySpec(volumeToMount.VolumeSpec)
|
||||||
|
var volumeAttacher volume.Attacher
|
||||||
|
if attachableVolumePlugin != nil {
|
||||||
|
volumeAttacher, _ = attachableVolumePlugin.NewAttacher()
|
||||||
|
}
|
||||||
|
|
||||||
|
var fsGroup *int64
|
||||||
|
if volumeToMount.Pod.Spec.SecurityContext != nil &&
|
||||||
|
volumeToMount.Pod.Spec.SecurityContext.FSGroup != nil {
|
||||||
|
fsGroup = volumeToMount.Pod.Spec.SecurityContext.FSGroup
|
||||||
|
}
|
||||||
|
|
||||||
|
return func() error {
|
||||||
|
if volumeAttacher != nil {
|
||||||
|
// Wait for attachable volumes to finish attaching
|
||||||
|
glog.Infof(
|
||||||
|
"Entering MountVolume.WaitForAttach for volume %q (spec.Name: %q) pod %q (UID: %q).",
|
||||||
|
volumeToMount.VolumeName,
|
||||||
|
volumeToMount.VolumeSpec.Name(),
|
||||||
|
volumeToMount.PodName,
|
||||||
|
volumeToMount.Pod.UID)
|
||||||
|
|
||||||
|
devicePath, err := volumeAttacher.WaitForAttach(
|
||||||
|
volumeToMount.VolumeSpec, waitForAttachTimeout)
|
||||||
|
if err != nil {
|
||||||
|
glog.Errorf(
|
||||||
|
"MountVolume.WaitForAttach failed for volume %q (spec.Name: %q) pod %q (UID: %q) with: %v",
|
||||||
|
volumeToMount.VolumeName,
|
||||||
|
volumeToMount.VolumeSpec.Name(),
|
||||||
|
volumeToMount.PodName,
|
||||||
|
volumeToMount.Pod.UID,
|
||||||
|
err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
glog.Infof(
|
||||||
|
"MountVolume.WaitForAttach succeeded for volume %q (spec.Name: %q) pod %q (UID: %q).",
|
||||||
|
volumeToMount.VolumeName,
|
||||||
|
volumeToMount.VolumeSpec.Name(),
|
||||||
|
volumeToMount.PodName,
|
||||||
|
volumeToMount.Pod.UID)
|
||||||
|
|
||||||
|
deviceMountPath, err :=
|
||||||
|
volumeAttacher.GetDeviceMountPath(volumeToMount.VolumeSpec)
|
||||||
|
if err != nil {
|
||||||
|
glog.Errorf(
|
||||||
|
"MountVolume.GetDeviceMountPath failed for volume %q (spec.Name: %q) pod %q (UID: %q) with: %v",
|
||||||
|
volumeToMount.VolumeName,
|
||||||
|
volumeToMount.VolumeSpec.Name(),
|
||||||
|
volumeToMount.PodName,
|
||||||
|
volumeToMount.Pod.UID,
|
||||||
|
err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Mount device to global mount path
|
||||||
|
err = volumeAttacher.MountDevice(
|
||||||
|
volumeToMount.VolumeSpec,
|
||||||
|
devicePath,
|
||||||
|
deviceMountPath)
|
||||||
|
if err != nil {
|
||||||
|
glog.Errorf(
|
||||||
|
"MountVolume.MountDevice failed for volume %q (spec.Name: %q) pod %q (UID: %q) with: %v",
|
||||||
|
volumeToMount.VolumeName,
|
||||||
|
volumeToMount.VolumeSpec.Name(),
|
||||||
|
volumeToMount.PodName,
|
||||||
|
volumeToMount.Pod.UID,
|
||||||
|
err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
glog.Infof(
|
||||||
|
"MountVolume.MountDevice succeeded for volume %q (spec.Name: %q) pod %q (UID: %q).",
|
||||||
|
volumeToMount.VolumeName,
|
||||||
|
volumeToMount.VolumeSpec.Name(),
|
||||||
|
volumeToMount.PodName,
|
||||||
|
volumeToMount.Pod.UID)
|
||||||
|
|
||||||
|
// Update actual state of world to reflect volume is globally mounted
|
||||||
|
markDeviceMountedErr := actualStateOfWorld.MarkDeviceAsMounted(
|
||||||
|
volumeToMount.VolumeName)
|
||||||
|
if markDeviceMountedErr != nil {
|
||||||
|
// On failure, just log and exit. The controller will retry
|
||||||
|
glog.Errorf(
|
||||||
|
"MountVolume.MarkDeviceAsMounted failed for volume %q (spec.Name: %q) pod %q (UID: %q) with: %v",
|
||||||
|
volumeToMount.VolumeName,
|
||||||
|
volumeToMount.VolumeSpec.Name(),
|
||||||
|
volumeToMount.PodName,
|
||||||
|
volumeToMount.Pod.UID,
|
||||||
|
markDeviceMountedErr)
|
||||||
|
return markDeviceMountedErr
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Execute mount
|
||||||
|
mountErr := volumeMounter.SetUp(fsGroup)
|
||||||
|
if mountErr != nil {
|
||||||
|
// On failure, just log and exit. The controller will retry
|
||||||
|
glog.Errorf(
|
||||||
|
"MountVolume.SetUp failed for volume %q (spec.Name: %q) pod %q (UID: %q) with: %v",
|
||||||
|
volumeToMount.VolumeName,
|
||||||
|
volumeToMount.VolumeSpec.Name(),
|
||||||
|
volumeToMount.PodName,
|
||||||
|
volumeToMount.Pod.UID,
|
||||||
|
mountErr)
|
||||||
|
return mountErr
|
||||||
|
}
|
||||||
|
|
||||||
|
glog.Infof(
|
||||||
|
"MountVolume.SetUp succeeded for volume %q (spec.Name: %q) pod %q (UID: %q).",
|
||||||
|
volumeToMount.VolumeName,
|
||||||
|
volumeToMount.VolumeSpec.Name(),
|
||||||
|
volumeToMount.PodName,
|
||||||
|
volumeToMount.Pod.UID)
|
||||||
|
|
||||||
|
// Update actual state of world
|
||||||
|
markVolMountedErr := actualStateOfWorld.MarkVolumeAsMounted(
|
||||||
|
volumeToMount.PodName,
|
||||||
|
volumeToMount.Pod.UID,
|
||||||
|
volumeToMount.VolumeName,
|
||||||
|
volumeMounter,
|
||||||
|
volumeToMount.OuterVolumeSpecName,
|
||||||
|
volumeToMount.VolumeGidValue)
|
||||||
|
if markVolMountedErr != nil {
|
||||||
|
// On failure, just log and exit. The controller will retry
|
||||||
|
glog.Errorf(
|
||||||
|
"MountVolume.MarkVolumeAsMounted failed for volume %q (spec.Name: %q) pod %q (UID: %q) with: %v",
|
||||||
|
volumeToMount.VolumeName,
|
||||||
|
volumeToMount.VolumeSpec.Name(),
|
||||||
|
volumeToMount.PodName,
|
||||||
|
volumeToMount.Pod.UID,
|
||||||
|
markVolMountedErr)
|
||||||
|
return markVolMountedErr
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (oe *operationExecutor) generateUnmountVolumeFunc(
|
||||||
|
volumeToUnmount MountedVolume,
|
||||||
|
actualStateOfWorld ActualStateOfWorldMounterUpdater) (func() error, error) {
|
||||||
|
// Get mountable plugin
|
||||||
|
volumePlugin, err :=
|
||||||
|
oe.volumePluginMgr.FindPluginByName(volumeToUnmount.PluginName)
|
||||||
|
if err != nil || volumePlugin == nil {
|
||||||
|
return nil, fmt.Errorf(
|
||||||
|
"UnmountVolume.FindPluginByName failed for volume %q (volume.spec.Name: %q) pod %q (UID: %q) err=%v",
|
||||||
|
volumeToUnmount.VolumeName,
|
||||||
|
volumeToUnmount.OuterVolumeSpecName,
|
||||||
|
volumeToUnmount.PodName,
|
||||||
|
volumeToUnmount.PodUID,
|
||||||
|
err)
|
||||||
|
}
|
||||||
|
|
||||||
|
volumeUnmounter, newUnmounterErr := volumePlugin.NewUnmounter(
|
||||||
|
volumeToUnmount.InnerVolumeSpecName, volumeToUnmount.PodUID)
|
||||||
|
if newUnmounterErr != nil {
|
||||||
|
return nil, fmt.Errorf(
|
||||||
|
"UnmountVolume.NewUnmounter failed for volume %q (volume.spec.Name: %q) pod %q (UID: %q) err=%v",
|
||||||
|
volumeToUnmount.VolumeName,
|
||||||
|
volumeToUnmount.OuterVolumeSpecName,
|
||||||
|
volumeToUnmount.PodName,
|
||||||
|
volumeToUnmount.PodUID,
|
||||||
|
newUnmounterErr)
|
||||||
|
}
|
||||||
|
|
||||||
|
return func() error {
|
||||||
|
// Execute unmount
|
||||||
|
unmountErr := volumeUnmounter.TearDown()
|
||||||
|
if unmountErr != nil {
|
||||||
|
// On failure, just log and exit. The controller will retry
|
||||||
|
glog.Errorf(
|
||||||
|
"UnmountVolume.TearDown failed for volume %q (volume.spec.Name: %q) pod %q (UID: %q) with: %v",
|
||||||
|
volumeToUnmount.VolumeName,
|
||||||
|
volumeToUnmount.OuterVolumeSpecName,
|
||||||
|
volumeToUnmount.PodName,
|
||||||
|
volumeToUnmount.PodUID,
|
||||||
|
unmountErr)
|
||||||
|
return unmountErr
|
||||||
|
}
|
||||||
|
|
||||||
|
glog.Infof(
|
||||||
|
"UnmountVolume.TearDown succeeded for volume %q (volume.spec.Name: %q) pod %q (UID: %q).",
|
||||||
|
volumeToUnmount.VolumeName,
|
||||||
|
volumeToUnmount.OuterVolumeSpecName,
|
||||||
|
volumeToUnmount.PodName,
|
||||||
|
volumeToUnmount.PodUID)
|
||||||
|
|
||||||
|
// Update actual state of world
|
||||||
|
markVolMountedErr := actualStateOfWorld.MarkVolumeAsUnmounted(
|
||||||
|
volumeToUnmount.PodName, volumeToUnmount.VolumeName)
|
||||||
|
if markVolMountedErr != nil {
|
||||||
|
// On failure, just log and exit
|
||||||
|
glog.Errorf(
|
||||||
|
"UnmountVolume.MarkVolumeAsUnmounted failed for volume %q (volume.spec.Name: %q) pod %q (UID: %q) with: %v",
|
||||||
|
volumeToUnmount.VolumeName,
|
||||||
|
volumeToUnmount.OuterVolumeSpecName,
|
||||||
|
volumeToUnmount.PodName,
|
||||||
|
volumeToUnmount.PodUID,
|
||||||
|
unmountErr)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (oe *operationExecutor) generateUnmountDeviceFunc(
|
||||||
|
deviceToDetach AttachedVolume,
|
||||||
|
actualStateOfWorld ActualStateOfWorldMounterUpdater) (func() error, error) {
|
||||||
|
// Get attacher plugin
|
||||||
|
attachableVolumePlugin, err :=
|
||||||
|
oe.volumePluginMgr.FindAttachablePluginBySpec(deviceToDetach.VolumeSpec)
|
||||||
|
if err != nil || attachableVolumePlugin == nil {
|
||||||
|
return nil, fmt.Errorf(
|
||||||
|
"UnmountDevice.FindAttachablePluginBySpec failed for volume %q (spec.Name: %q) with: %v",
|
||||||
|
deviceToDetach.VolumeName,
|
||||||
|
deviceToDetach.VolumeSpec.Name(),
|
||||||
|
err)
|
||||||
|
}
|
||||||
|
|
||||||
|
volumeDetacher, err := attachableVolumePlugin.NewDetacher()
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf(
|
||||||
|
"UnmountDevice.NewDetacher failed for volume %q (spec.Name: %q) with: %v",
|
||||||
|
deviceToDetach.VolumeName,
|
||||||
|
deviceToDetach.VolumeSpec.Name(),
|
||||||
|
err)
|
||||||
|
}
|
||||||
|
|
||||||
|
volumeAttacher, err := attachableVolumePlugin.NewAttacher()
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf(
|
||||||
|
"UnmountDevice.NewAttacher failed for volume %q (spec.Name: %q) with: %v",
|
||||||
|
deviceToDetach.VolumeName,
|
||||||
|
deviceToDetach.VolumeSpec.Name(),
|
||||||
|
err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return func() error {
|
||||||
|
deviceMountPath, err :=
|
||||||
|
volumeAttacher.GetDeviceMountPath(deviceToDetach.VolumeSpec)
|
||||||
|
if err != nil {
|
||||||
|
// On failure, just log and exit. The controller will retry
|
||||||
|
glog.Errorf(
|
||||||
|
"GetDeviceMountPath failed for volume %q (spec.Name: %q) with: %v",
|
||||||
|
deviceToDetach.VolumeName,
|
||||||
|
deviceToDetach.VolumeSpec.Name(),
|
||||||
|
err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Execute unmount
|
||||||
|
unmountDeviceErr := volumeDetacher.UnmountDevice(deviceMountPath)
|
||||||
|
if unmountDeviceErr != nil {
|
||||||
|
// On failure, just log and exit. The controller will retry
|
||||||
|
glog.Errorf(
|
||||||
|
"UnmountDevice failed for volume %q (spec.Name: %q) with: %v",
|
||||||
|
deviceToDetach.VolumeName,
|
||||||
|
deviceToDetach.VolumeSpec.Name(),
|
||||||
|
unmountDeviceErr)
|
||||||
|
return unmountDeviceErr
|
||||||
|
}
|
||||||
|
|
||||||
|
glog.Infof(
|
||||||
|
"UnmountDevice succeeded for volume %q (spec.Name: %q).",
|
||||||
|
deviceToDetach.VolumeName,
|
||||||
|
deviceToDetach.VolumeSpec.Name())
|
||||||
|
|
||||||
|
// Update actual state of world
|
||||||
|
markDeviceUnmountedErr := actualStateOfWorld.MarkDeviceAsUnmounted(
|
||||||
|
deviceToDetach.VolumeName)
|
||||||
|
if markDeviceUnmountedErr != nil {
|
||||||
|
// On failure, just log and exit. The controller will retry
|
||||||
|
glog.Errorf(
|
||||||
|
"MarkDeviceAsUnmounted failed for device %q (spec.Name: %q) with: %v",
|
||||||
|
deviceToDetach.VolumeName,
|
||||||
|
deviceToDetach.VolumeSpec.Name(),
|
||||||
|
markDeviceUnmountedErr)
|
||||||
|
return markDeviceUnmountedErr
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
23
pkg/volume/util/types/types.go
Normal file
23
pkg/volume/util/types/types.go
Normal file
@@ -0,0 +1,23 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2016 The Kubernetes Authors All rights reserved.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
// Package types defines types used only by volume componenets
|
||||||
|
package types
|
||||||
|
|
||||||
|
import "k8s.io/kubernetes/pkg/types"
|
||||||
|
|
||||||
|
// UniquePodName defines the type to key pods off of
|
||||||
|
type UniquePodName types.UID
|
||||||
@@ -23,23 +23,32 @@ import (
|
|||||||
|
|
||||||
"k8s.io/kubernetes/pkg/api"
|
"k8s.io/kubernetes/pkg/api"
|
||||||
"k8s.io/kubernetes/pkg/volume"
|
"k8s.io/kubernetes/pkg/volume"
|
||||||
|
"k8s.io/kubernetes/pkg/volume/util/types"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
// ControllerManagedAnnotation is the key of the annotation on Node objects
|
// ControllerManagedAttachAnnotation is the key of the annotation on Node
|
||||||
// that indicates attach/detach operations for the node should be managed
|
// objects that indicates attach/detach operations for the node should be
|
||||||
// by the attach/detach controller
|
// managed by the attach/detach controller
|
||||||
ControllerManagedAnnotation string = "volumes.kubernetes.io/controller-managed-attach-detach"
|
ControllerManagedAttachAnnotation string = "volumes.kubernetes.io/controller-managed-attach-detach"
|
||||||
|
|
||||||
|
// VolumeGidAnnotationKey is the of the annotation on the PersistentVolume
|
||||||
|
// object that specifies a supplemental GID.
|
||||||
|
VolumeGidAnnotationKey = "pv.beta.kubernetes.io/gid"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// GetUniquePodName returns a unique identifier to reference a pod by
|
||||||
|
func GetUniquePodName(pod *api.Pod) types.UniquePodName {
|
||||||
|
return types.UniquePodName(pod.UID)
|
||||||
|
}
|
||||||
|
|
||||||
// GetUniqueVolumeName returns a unique name representing the volume/plugin.
|
// GetUniqueVolumeName returns a unique name representing the volume/plugin.
|
||||||
// Caller should ensure that volumeName is a name/ID uniquely identifying the
|
// Caller should ensure that volumeName is a name/ID uniquely identifying the
|
||||||
// actual backing device, directory, path, etc. for a particular volume.
|
// actual backing device, directory, path, etc. for a particular volume.
|
||||||
// The returned name can be used to uniquely reference the volume, for example,
|
// The returned name can be used to uniquely reference the volume, for example,
|
||||||
// to prevent operations (attach/detach or mount/unmount) from being triggered
|
// to prevent operations (attach/detach or mount/unmount) from being triggered
|
||||||
// on the same volume.
|
// on the same volume.
|
||||||
func GetUniqueVolumeName(
|
func GetUniqueVolumeName(pluginName, volumeName string) api.UniqueVolumeName {
|
||||||
pluginName string, volumeName string) api.UniqueVolumeName {
|
|
||||||
return api.UniqueVolumeName(fmt.Sprintf("%s/%s", pluginName, volumeName))
|
return api.UniqueVolumeName(fmt.Sprintf("%s/%s", pluginName, volumeName))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -24,7 +24,6 @@ import (
|
|||||||
|
|
||||||
"k8s.io/kubernetes/pkg/api"
|
"k8s.io/kubernetes/pkg/api"
|
||||||
"k8s.io/kubernetes/pkg/api/resource"
|
"k8s.io/kubernetes/pkg/api/resource"
|
||||||
"k8s.io/kubernetes/pkg/util/mount"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Volume represents a directory used by pods or hosts on a node. All method
|
// Volume represents a directory used by pods or hosts on a node. All method
|
||||||
@@ -147,11 +146,11 @@ type Attacher interface {
|
|||||||
// GetDeviceMountPath returns a path where the device should
|
// GetDeviceMountPath returns a path where the device should
|
||||||
// be mounted after it is attached. This is a global mount
|
// be mounted after it is attached. This is a global mount
|
||||||
// point which should be bind mounted for individual volumes.
|
// point which should be bind mounted for individual volumes.
|
||||||
GetDeviceMountPath(spec *Spec) string
|
GetDeviceMountPath(spec *Spec) (string, error)
|
||||||
|
|
||||||
// MountDevice mounts the disk to a global path which
|
// MountDevice mounts the disk to a global path which
|
||||||
// individual pods can then bind mount
|
// individual pods can then bind mount
|
||||||
MountDevice(spec *Spec, devicePath string, deviceMountPath string, mounter mount.Interface) error
|
MountDevice(spec *Spec, devicePath string, deviceMountPath string) error
|
||||||
}
|
}
|
||||||
|
|
||||||
// Detacher can detach a volume from a node.
|
// Detacher can detach a volume from a node.
|
||||||
@@ -167,7 +166,7 @@ type Detacher interface {
|
|||||||
// UnmountDevice unmounts the global mount of the disk. This
|
// UnmountDevice unmounts the global mount of the disk. This
|
||||||
// should only be called once all bind mounts have been
|
// should only be called once all bind mounts have been
|
||||||
// unmounted.
|
// unmounted.
|
||||||
UnmountDevice(deviceMountPath string, mounter mount.Interface) error
|
UnmountDevice(deviceMountPath string) error
|
||||||
}
|
}
|
||||||
|
|
||||||
func RenameDirectory(oldPath, newName string) (string, error) {
|
func RenameDirectory(oldPath, newName string) (string, error) {
|
||||||
|
|||||||
@@ -62,9 +62,9 @@ func (plugin *vsphereVolumePlugin) GetPluginName() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (plugin *vsphereVolumePlugin) GetVolumeName(spec *volume.Spec) (string, error) {
|
func (plugin *vsphereVolumePlugin) GetVolumeName(spec *volume.Spec) (string, error) {
|
||||||
volumeSource, _ := getVolumeSource(spec)
|
volumeSource, _, err := getVolumeSource(spec)
|
||||||
if volumeSource == nil {
|
if err != nil {
|
||||||
return "", fmt.Errorf("Spec does not reference a VSphere volume type")
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
return volumeSource.VolumePath, nil
|
return volumeSource.VolumePath, nil
|
||||||
@@ -75,6 +75,10 @@ func (plugin *vsphereVolumePlugin) CanSupport(spec *volume.Spec) bool {
|
|||||||
(spec.Volume != nil && spec.Volume.VsphereVolume != nil)
|
(spec.Volume != nil && spec.Volume.VsphereVolume != nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (plugin *vsphereVolumePlugin) RequiresRemount() bool {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
func (plugin *vsphereVolumePlugin) NewMounter(spec *volume.Spec, pod *api.Pod, _ volume.VolumeOptions) (volume.Mounter, error) {
|
func (plugin *vsphereVolumePlugin) NewMounter(spec *volume.Spec, pod *api.Pod, _ volume.VolumeOptions) (volume.Mounter, error) {
|
||||||
return plugin.newMounterInternal(spec, pod.UID, &VsphereDiskUtil{}, plugin.host.GetMounter())
|
return plugin.newMounterInternal(spec, pod.UID, &VsphereDiskUtil{}, plugin.host.GetMounter())
|
||||||
}
|
}
|
||||||
@@ -84,11 +88,9 @@ func (plugin *vsphereVolumePlugin) NewUnmounter(volName string, podUID types.UID
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (plugin *vsphereVolumePlugin) newMounterInternal(spec *volume.Spec, podUID types.UID, manager vdManager, mounter mount.Interface) (volume.Mounter, error) {
|
func (plugin *vsphereVolumePlugin) newMounterInternal(spec *volume.Spec, podUID types.UID, manager vdManager, mounter mount.Interface) (volume.Mounter, error) {
|
||||||
var vvol *api.VsphereVirtualDiskVolumeSource
|
vvol, _, err := getVolumeSource(spec)
|
||||||
if spec.Volume != nil && spec.Volume.VsphereVolume != nil {
|
if err != nil {
|
||||||
vvol = spec.Volume.VsphereVolume
|
return nil, err
|
||||||
} else {
|
|
||||||
vvol = spec.PersistentVolume.Spec.VsphereVolume
|
|
||||||
}
|
}
|
||||||
|
|
||||||
volPath := vvol.VolumePath
|
volPath := vvol.VolumePath
|
||||||
@@ -427,17 +429,14 @@ func (v *vsphereVolumeProvisioner) Provision() (*api.PersistentVolume, error) {
|
|||||||
return pv, nil
|
return pv, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func getVolumeSource(spec *volume.Spec) (*api.VsphereVirtualDiskVolumeSource, bool) {
|
func getVolumeSource(
|
||||||
var readOnly bool
|
spec *volume.Spec) (*api.VsphereVirtualDiskVolumeSource, bool, error) {
|
||||||
var volumeSource *api.VsphereVirtualDiskVolumeSource
|
|
||||||
|
|
||||||
if spec.Volume != nil && spec.Volume.VsphereVolume != nil {
|
if spec.Volume != nil && spec.Volume.VsphereVolume != nil {
|
||||||
volumeSource = spec.Volume.VsphereVolume
|
return spec.Volume.VsphereVolume, spec.ReadOnly, nil
|
||||||
readOnly = spec.ReadOnly
|
} else if spec.PersistentVolume != nil &&
|
||||||
} else {
|
spec.PersistentVolume.Spec.VsphereVolume != nil {
|
||||||
volumeSource = spec.PersistentVolume.Spec.VsphereVolume
|
return spec.PersistentVolume.Spec.VsphereVolume, spec.ReadOnly, nil
|
||||||
readOnly = spec.ReadOnly
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return volumeSource, readOnly
|
return nil, false, fmt.Errorf("Spec does not reference a VSphere volume type")
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -38,14 +38,14 @@ func TestCanSupport(t *testing.T) {
|
|||||||
}
|
}
|
||||||
defer os.RemoveAll(tmpDir)
|
defer os.RemoveAll(tmpDir)
|
||||||
plugMgr := volume.VolumePluginMgr{}
|
plugMgr := volume.VolumePluginMgr{}
|
||||||
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
|
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, nil, nil, "" /* rootContext */))
|
||||||
|
|
||||||
plug, err := plugMgr.FindPluginByName("kubernetes.io/vsphere-volume")
|
plug, err := plugMgr.FindPluginByName("kubernetes.io/vsphere-volume")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Can't find the plugin by name")
|
t.Errorf("Can't find the plugin by name")
|
||||||
}
|
}
|
||||||
if plug.Name() != "kubernetes.io/vsphere-volume" {
|
if plug.GetPluginName() != "kubernetes.io/vsphere-volume" {
|
||||||
t.Errorf("Wrong name: %s", plug.Name())
|
t.Errorf("Wrong name: %s", plug.GetPluginName())
|
||||||
}
|
}
|
||||||
|
|
||||||
if !plug.CanSupport(&volume.Spec{Volume: &api.Volume{VolumeSource: api.VolumeSource{VsphereVolume: &api.VsphereVirtualDiskVolumeSource{}}}}) {
|
if !plug.CanSupport(&volume.Spec{Volume: &api.Volume{VolumeSource: api.VolumeSource{VsphereVolume: &api.VsphereVirtualDiskVolumeSource{}}}}) {
|
||||||
@@ -118,7 +118,7 @@ func TestPlugin(t *testing.T) {
|
|||||||
defer os.RemoveAll(tmpDir)
|
defer os.RemoveAll(tmpDir)
|
||||||
|
|
||||||
plugMgr := volume.VolumePluginMgr{}
|
plugMgr := volume.VolumePluginMgr{}
|
||||||
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
|
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, nil, nil, "" /* rootContext */))
|
||||||
|
|
||||||
plug, err := plugMgr.FindPluginByName("kubernetes.io/vsphere-volume")
|
plug, err := plugMgr.FindPluginByName("kubernetes.io/vsphere-volume")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|||||||
@@ -44,13 +44,16 @@ import (
|
|||||||
const (
|
const (
|
||||||
gcePDDetachTimeout = 10 * time.Minute
|
gcePDDetachTimeout = 10 * time.Minute
|
||||||
gcePDDetachPollTime = 10 * time.Second
|
gcePDDetachPollTime = 10 * time.Second
|
||||||
|
nodeStatusTimeout = 1 * time.Minute
|
||||||
|
nodeStatusPollTime = 1 * time.Second
|
||||||
)
|
)
|
||||||
|
|
||||||
var _ = framework.KubeDescribe("Pod Disks", func() {
|
var _ = framework.KubeDescribe("Pod Disks", func() {
|
||||||
var (
|
var (
|
||||||
podClient client.PodInterface
|
podClient client.PodInterface
|
||||||
host0Name string
|
nodeClient client.NodeInterface
|
||||||
host1Name string
|
host0Name string
|
||||||
|
host1Name string
|
||||||
)
|
)
|
||||||
f := framework.NewDefaultFramework("pod-disks")
|
f := framework.NewDefaultFramework("pod-disks")
|
||||||
|
|
||||||
@@ -58,6 +61,7 @@ var _ = framework.KubeDescribe("Pod Disks", func() {
|
|||||||
framework.SkipUnlessNodeCountIsAtLeast(2)
|
framework.SkipUnlessNodeCountIsAtLeast(2)
|
||||||
|
|
||||||
podClient = f.Client.Pods(f.Namespace.Name)
|
podClient = f.Client.Pods(f.Namespace.Name)
|
||||||
|
nodeClient = f.Client.Nodes()
|
||||||
nodes := framework.GetReadySchedulableNodesOrDie(f.Client)
|
nodes := framework.GetReadySchedulableNodesOrDie(f.Client)
|
||||||
|
|
||||||
Expect(len(nodes.Items)).To(BeNumerically(">=", 2), "Requires at least 2 nodes")
|
Expect(len(nodes.Items)).To(BeNumerically(">=", 2), "Requires at least 2 nodes")
|
||||||
@@ -100,6 +104,9 @@ var _ = framework.KubeDescribe("Pod Disks", func() {
|
|||||||
framework.ExpectNoError(f.WriteFileViaContainer(host0Pod.Name, containerName, testFile, testFileContents))
|
framework.ExpectNoError(f.WriteFileViaContainer(host0Pod.Name, containerName, testFile, testFileContents))
|
||||||
framework.Logf("Wrote value: %v", testFileContents)
|
framework.Logf("Wrote value: %v", testFileContents)
|
||||||
|
|
||||||
|
// Verify that disk shows up for in node 1's VolumeInUse list
|
||||||
|
framework.ExpectNoError(waitForPDInVolumesInUse(nodeClient, diskName, host0Name, nodeStatusTimeout, true /* shouldExist */))
|
||||||
|
|
||||||
By("deleting host0Pod")
|
By("deleting host0Pod")
|
||||||
framework.ExpectNoError(podClient.Delete(host0Pod.Name, api.NewDeleteOptions(0)), "Failed to delete host0Pod")
|
framework.ExpectNoError(podClient.Delete(host0Pod.Name, api.NewDeleteOptions(0)), "Failed to delete host0Pod")
|
||||||
|
|
||||||
@@ -115,6 +122,9 @@ var _ = framework.KubeDescribe("Pod Disks", func() {
|
|||||||
|
|
||||||
Expect(strings.TrimSpace(v)).To(Equal(strings.TrimSpace(testFileContents)))
|
Expect(strings.TrimSpace(v)).To(Equal(strings.TrimSpace(testFileContents)))
|
||||||
|
|
||||||
|
// Verify that disk is removed from node 1's VolumeInUse list
|
||||||
|
framework.ExpectNoError(waitForPDInVolumesInUse(nodeClient, diskName, host0Name, nodeStatusTimeout, false /* shouldExist */))
|
||||||
|
|
||||||
By("deleting host1Pod")
|
By("deleting host1Pod")
|
||||||
framework.ExpectNoError(podClient.Delete(host1Pod.Name, api.NewDeleteOptions(0)), "Failed to delete host1Pod")
|
framework.ExpectNoError(podClient.Delete(host1Pod.Name, api.NewDeleteOptions(0)), "Failed to delete host1Pod")
|
||||||
|
|
||||||
@@ -545,3 +555,52 @@ func detachAndDeletePDs(diskName string, hosts []string) {
|
|||||||
By(fmt.Sprintf("Deleting PD %q", diskName))
|
By(fmt.Sprintf("Deleting PD %q", diskName))
|
||||||
deletePDWithRetry(diskName)
|
deletePDWithRetry(diskName)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func waitForPDInVolumesInUse(
|
||||||
|
nodeClient client.NodeInterface,
|
||||||
|
diskName, nodeName string,
|
||||||
|
timeout time.Duration,
|
||||||
|
shouldExist bool) error {
|
||||||
|
logStr := "to contain"
|
||||||
|
if !shouldExist {
|
||||||
|
logStr = "to NOT contain"
|
||||||
|
}
|
||||||
|
framework.Logf(
|
||||||
|
"Waiting for node %s's VolumesInUse Status %s PD %q",
|
||||||
|
nodeName, logStr, diskName)
|
||||||
|
for start := time.Now(); time.Since(start) < timeout; time.Sleep(nodeStatusPollTime) {
|
||||||
|
nodeObj, err := nodeClient.Get(nodeName)
|
||||||
|
if err != nil || nodeObj == nil {
|
||||||
|
framework.Logf(
|
||||||
|
"Failed to fetch node object %q from API server. err=%v",
|
||||||
|
nodeName, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
exists := false
|
||||||
|
for _, volumeInUse := range nodeObj.Status.VolumesInUse {
|
||||||
|
volumeInUseStr := string(volumeInUse)
|
||||||
|
if strings.Contains(volumeInUseStr, diskName) {
|
||||||
|
if shouldExist {
|
||||||
|
framework.Logf(
|
||||||
|
"Found PD %q in node %q's VolumesInUse Status: %q",
|
||||||
|
diskName, nodeName, volumeInUseStr)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
exists = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !shouldExist && !exists {
|
||||||
|
framework.Logf(
|
||||||
|
"Verified PD %q does not exist in node %q's VolumesInUse Status.",
|
||||||
|
diskName, nodeName)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return fmt.Errorf(
|
||||||
|
"Timed out waiting for node %s VolumesInUse Status %s diskName %q",
|
||||||
|
nodeName, logStr, diskName)
|
||||||
|
}
|
||||||
|
|||||||
@@ -582,7 +582,7 @@ func createClients(t *testing.T, s *httptest.Server) (*clientset.Clientset, *per
|
|||||||
binderClient := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}, QPS: 1000000, Burst: 1000000})
|
binderClient := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}, QPS: 1000000, Burst: 1000000})
|
||||||
testClient := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}, QPS: 1000000, Burst: 1000000})
|
testClient := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}, QPS: 1000000, Burst: 1000000})
|
||||||
|
|
||||||
host := volumetest.NewFakeVolumeHost("/tmp/fake", nil, nil)
|
host := volumetest.NewFakeVolumeHost("/tmp/fake", nil, nil, "" /* rootContext */)
|
||||||
plugins := []volume.VolumePlugin{&volumetest.FakeVolumePlugin{
|
plugins := []volume.VolumePlugin{&volumetest.FakeVolumePlugin{
|
||||||
PluginName: "plugin-name",
|
PluginName: "plugin-name",
|
||||||
Host: host,
|
Host: host,
|
||||||
|
|||||||
Reference in New Issue
Block a user