mirror of
https://github.com/optim-enterprises-bv/kubernetes.git
synced 2025-11-02 03:08:15 +00:00
Allow clients to request most recent container logs
Many users attempt to use 'kubectl logs' in order to find the logs for a container, but receive no logs or an error telling them their container is not running. The fix in this case is to run with '--previous', but this does not match user expectations for the logs command. This commit changes the behavior of the Kubelet to return the logs of the currently running container or the previous running container unless the user provides the "previous" flag. If the user specifies "follow" the logs of the most recent container will be displayed, and if it is a terminated container the logs will come to an end (the user can repeatedly invoke 'kubectl logs --follow' and see the same output). Clean up error messages in the kubelet log path to be consistent and give users a more predictable experience. Have the Kubelet return 400 on invalid requests
This commit is contained in:
@@ -2398,33 +2398,7 @@ func TestPurgingObsoleteStatusMapEntries(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidatePodStatus(t *testing.T) {
|
||||
testKubelet := newTestKubelet(t)
|
||||
kubelet := testKubelet.kubelet
|
||||
testCases := []struct {
|
||||
podPhase api.PodPhase
|
||||
success bool
|
||||
}{
|
||||
{api.PodRunning, true},
|
||||
{api.PodSucceeded, true},
|
||||
{api.PodFailed, true},
|
||||
{api.PodPending, false},
|
||||
{api.PodUnknown, false},
|
||||
}
|
||||
|
||||
for i, tc := range testCases {
|
||||
err := kubelet.validatePodPhase(&api.PodStatus{Phase: tc.podPhase})
|
||||
if tc.success {
|
||||
if err != nil {
|
||||
t.Errorf("[case %d]: unexpected failure - %v", i, err)
|
||||
}
|
||||
} else if err == nil {
|
||||
t.Errorf("[case %d]: unexpected success", i)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateContainerStatus(t *testing.T) {
|
||||
func TestValidateContainerLogStatus(t *testing.T) {
|
||||
testKubelet := newTestKubelet(t)
|
||||
kubelet := testKubelet.kubelet
|
||||
containerName := "x"
|
||||
@@ -2446,6 +2420,17 @@ func TestValidateContainerStatus(t *testing.T) {
|
||||
},
|
||||
success: true,
|
||||
},
|
||||
{
|
||||
statuses: []api.ContainerStatus{
|
||||
{
|
||||
Name: containerName,
|
||||
State: api.ContainerState{
|
||||
Running: &api.ContainerStateRunning{},
|
||||
},
|
||||
},
|
||||
},
|
||||
success: true,
|
||||
},
|
||||
{
|
||||
statuses: []api.ContainerStatus{
|
||||
{
|
||||
@@ -2468,10 +2453,28 @@ func TestValidateContainerStatus(t *testing.T) {
|
||||
},
|
||||
success: false,
|
||||
},
|
||||
{
|
||||
statuses: []api.ContainerStatus{
|
||||
{
|
||||
Name: containerName,
|
||||
State: api.ContainerState{Waiting: &api.ContainerStateWaiting{Reason: "ErrImagePull"}},
|
||||
},
|
||||
},
|
||||
success: false,
|
||||
},
|
||||
{
|
||||
statuses: []api.ContainerStatus{
|
||||
{
|
||||
Name: containerName,
|
||||
State: api.ContainerState{Waiting: &api.ContainerStateWaiting{Reason: "ErrImagePullBackOff"}},
|
||||
},
|
||||
},
|
||||
success: false,
|
||||
},
|
||||
}
|
||||
|
||||
for i, tc := range testCases {
|
||||
_, err := kubelet.validateContainerStatus(&api.PodStatus{
|
||||
_, err := kubelet.validateContainerLogStatus("podName", &api.PodStatus{
|
||||
ContainerStatuses: tc.statuses,
|
||||
}, containerName, false)
|
||||
if tc.success {
|
||||
@@ -2482,21 +2485,31 @@ func TestValidateContainerStatus(t *testing.T) {
|
||||
t.Errorf("[case %d]: unexpected success", i)
|
||||
}
|
||||
}
|
||||
if _, err := kubelet.validateContainerStatus(&api.PodStatus{
|
||||
if _, err := kubelet.validateContainerLogStatus("podName", &api.PodStatus{
|
||||
ContainerStatuses: testCases[0].statuses,
|
||||
}, "blah", false); err == nil {
|
||||
t.Errorf("expected error with invalid container name")
|
||||
}
|
||||
if _, err := kubelet.validateContainerStatus(&api.PodStatus{
|
||||
if _, err := kubelet.validateContainerLogStatus("podName", &api.PodStatus{
|
||||
ContainerStatuses: testCases[0].statuses,
|
||||
}, containerName, true); err != nil {
|
||||
t.Errorf("unexpected error with for previous terminated container - %v", err)
|
||||
}
|
||||
if _, err := kubelet.validateContainerStatus(&api.PodStatus{
|
||||
if _, err := kubelet.validateContainerLogStatus("podName", &api.PodStatus{
|
||||
ContainerStatuses: testCases[0].statuses,
|
||||
}, containerName, false); err != nil {
|
||||
t.Errorf("unexpected error with for most recent container - %v", err)
|
||||
}
|
||||
if _, err := kubelet.validateContainerLogStatus("podName", &api.PodStatus{
|
||||
ContainerStatuses: testCases[1].statuses,
|
||||
}, containerName, true); err == nil {
|
||||
t.Errorf("expected error with for previous terminated container")
|
||||
}
|
||||
if _, err := kubelet.validateContainerLogStatus("podName", &api.PodStatus{
|
||||
ContainerStatuses: testCases[1].statuses,
|
||||
}, containerName, false); err != nil {
|
||||
t.Errorf("unexpected error with for most recent container")
|
||||
}
|
||||
}
|
||||
|
||||
// updateDiskSpacePolicy creates a new DiskSpaceManager with a new policy. This new manager along
|
||||
|
||||
Reference in New Issue
Block a user