mirror of
https://github.com/optim-enterprises-bv/kubernetes.git
synced 2025-11-02 03:08:15 +00:00
Merge remote-tracking branch 'upstream/master'
This commit is contained in:
72
Godeps/Godeps.json
generated
72
Godeps/Godeps.json
generated
@@ -261,93 +261,93 @@
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/api",
|
||||
"Comment": "0.16.0-81-g27fb6d5",
|
||||
"Rev": "27fb6d593c6bffe274718119659815771e79e198"
|
||||
"Comment": "0.16.0.2",
|
||||
"Rev": "cefada41b87c35294533638733c563a349b95f05"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/cache/memory",
|
||||
"Comment": "0.16.0-81-g27fb6d5",
|
||||
"Rev": "27fb6d593c6bffe274718119659815771e79e198"
|
||||
"Comment": "0.16.0.2",
|
||||
"Rev": "cefada41b87c35294533638733c563a349b95f05"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/collector",
|
||||
"Comment": "0.16.0-81-g27fb6d5",
|
||||
"Rev": "27fb6d593c6bffe274718119659815771e79e198"
|
||||
"Comment": "0.16.0.2",
|
||||
"Rev": "cefada41b87c35294533638733c563a349b95f05"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/container",
|
||||
"Comment": "0.16.0-81-g27fb6d5",
|
||||
"Rev": "27fb6d593c6bffe274718119659815771e79e198"
|
||||
"Comment": "0.16.0.2",
|
||||
"Rev": "cefada41b87c35294533638733c563a349b95f05"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/events",
|
||||
"Comment": "0.16.0-81-g27fb6d5",
|
||||
"Rev": "27fb6d593c6bffe274718119659815771e79e198"
|
||||
"Comment": "0.16.0.2",
|
||||
"Rev": "cefada41b87c35294533638733c563a349b95f05"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/fs",
|
||||
"Comment": "0.16.0-81-g27fb6d5",
|
||||
"Rev": "27fb6d593c6bffe274718119659815771e79e198"
|
||||
"Comment": "0.16.0.2",
|
||||
"Rev": "cefada41b87c35294533638733c563a349b95f05"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/healthz",
|
||||
"Comment": "0.16.0-81-g27fb6d5",
|
||||
"Rev": "27fb6d593c6bffe274718119659815771e79e198"
|
||||
"Comment": "0.16.0.2",
|
||||
"Rev": "cefada41b87c35294533638733c563a349b95f05"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/http",
|
||||
"Comment": "0.16.0-81-g27fb6d5",
|
||||
"Rev": "27fb6d593c6bffe274718119659815771e79e198"
|
||||
"Comment": "0.16.0.2",
|
||||
"Rev": "cefada41b87c35294533638733c563a349b95f05"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/info/v1",
|
||||
"Comment": "0.16.0-81-g27fb6d5",
|
||||
"Rev": "27fb6d593c6bffe274718119659815771e79e198"
|
||||
"Comment": "0.16.0.2",
|
||||
"Rev": "cefada41b87c35294533638733c563a349b95f05"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/info/v2",
|
||||
"Comment": "0.16.0-81-g27fb6d5",
|
||||
"Rev": "27fb6d593c6bffe274718119659815771e79e198"
|
||||
"Comment": "0.16.0.2",
|
||||
"Rev": "cefada41b87c35294533638733c563a349b95f05"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/manager",
|
||||
"Comment": "0.16.0-81-g27fb6d5",
|
||||
"Rev": "27fb6d593c6bffe274718119659815771e79e198"
|
||||
"Comment": "0.16.0.2",
|
||||
"Rev": "cefada41b87c35294533638733c563a349b95f05"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/metrics",
|
||||
"Comment": "0.16.0-81-g27fb6d5",
|
||||
"Rev": "27fb6d593c6bffe274718119659815771e79e198"
|
||||
"Comment": "0.16.0.2",
|
||||
"Rev": "cefada41b87c35294533638733c563a349b95f05"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/pages",
|
||||
"Comment": "0.16.0-81-g27fb6d5",
|
||||
"Rev": "27fb6d593c6bffe274718119659815771e79e198"
|
||||
"Comment": "0.16.0.2",
|
||||
"Rev": "cefada41b87c35294533638733c563a349b95f05"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/storage",
|
||||
"Comment": "0.16.0-81-g27fb6d5",
|
||||
"Rev": "27fb6d593c6bffe274718119659815771e79e198"
|
||||
"Comment": "0.16.0.2",
|
||||
"Rev": "cefada41b87c35294533638733c563a349b95f05"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/summary",
|
||||
"Comment": "0.16.0-81-g27fb6d5",
|
||||
"Rev": "27fb6d593c6bffe274718119659815771e79e198"
|
||||
"Comment": "0.16.0.2",
|
||||
"Rev": "cefada41b87c35294533638733c563a349b95f05"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/utils",
|
||||
"Comment": "0.16.0-81-g27fb6d5",
|
||||
"Rev": "27fb6d593c6bffe274718119659815771e79e198"
|
||||
"Comment": "0.16.0.2",
|
||||
"Rev": "cefada41b87c35294533638733c563a349b95f05"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/validate",
|
||||
"Comment": "0.16.0-81-g27fb6d5",
|
||||
"Rev": "27fb6d593c6bffe274718119659815771e79e198"
|
||||
"Comment": "0.16.0.2",
|
||||
"Rev": "cefada41b87c35294533638733c563a349b95f05"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/version",
|
||||
"Comment": "0.16.0-81-g27fb6d5",
|
||||
"Rev": "27fb6d593c6bffe274718119659815771e79e198"
|
||||
"Comment": "0.16.0.2",
|
||||
"Rev": "cefada41b87c35294533638733c563a349b95f05"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/gofuzz",
|
||||
|
||||
65
Godeps/_workspace/src/github.com/google/cadvisor/api/versions.go
generated
vendored
65
Godeps/_workspace/src/github.com/google/cadvisor/api/versions.go
generated
vendored
@@ -19,6 +19,7 @@ import (
|
||||
"net/http"
|
||||
"path"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
info "github.com/google/cadvisor/info/v1"
|
||||
@@ -449,8 +450,63 @@ func (self *version2_0) HandleRequest(requestType string, request []string, m ma
|
||||
}
|
||||
}
|
||||
|
||||
func instCpuStats(last, cur *info.ContainerStats) (*v2.CpuInstStats, error) {
|
||||
if last == nil {
|
||||
return nil, nil
|
||||
}
|
||||
if !cur.Timestamp.After(last.Timestamp) {
|
||||
return nil, fmt.Errorf("container stats move backwards in time")
|
||||
}
|
||||
if len(last.Cpu.Usage.PerCpu) != len(cur.Cpu.Usage.PerCpu) {
|
||||
return nil, fmt.Errorf("different number of cpus")
|
||||
}
|
||||
timeDelta := cur.Timestamp.Sub(last.Timestamp)
|
||||
if timeDelta <= 100*time.Millisecond {
|
||||
return nil, fmt.Errorf("time delta unexpectedly small")
|
||||
}
|
||||
// Nanoseconds to gain precision and avoid having zero seconds if the
|
||||
// difference between the timestamps is just under a second
|
||||
timeDeltaNs := uint64(timeDelta.Nanoseconds())
|
||||
convertToRate := func(lastValue, curValue uint64) (uint64, error) {
|
||||
if curValue < lastValue {
|
||||
return 0, fmt.Errorf("cumulative stats decrease")
|
||||
}
|
||||
valueDelta := curValue - lastValue
|
||||
return (valueDelta * 1e9) / timeDeltaNs, nil
|
||||
}
|
||||
total, err := convertToRate(last.Cpu.Usage.Total, cur.Cpu.Usage.Total)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
percpu := make([]uint64, len(last.Cpu.Usage.PerCpu))
|
||||
for i := range percpu {
|
||||
var err error
|
||||
percpu[i], err = convertToRate(last.Cpu.Usage.PerCpu[i], cur.Cpu.Usage.PerCpu[i])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
user, err := convertToRate(last.Cpu.Usage.User, cur.Cpu.Usage.User)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
system, err := convertToRate(last.Cpu.Usage.System, cur.Cpu.Usage.System)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &v2.CpuInstStats{
|
||||
Usage: v2.CpuInstUsage{
|
||||
Total: total,
|
||||
PerCpu: percpu,
|
||||
User: user,
|
||||
System: system,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func convertStats(cont *info.ContainerInfo) []v2.ContainerStats {
|
||||
stats := []v2.ContainerStats{}
|
||||
stats := make([]v2.ContainerStats, 0, len(cont.Stats))
|
||||
var last *info.ContainerStats
|
||||
for _, val := range cont.Stats {
|
||||
stat := v2.ContainerStats{
|
||||
Timestamp: val.Timestamp,
|
||||
@@ -463,6 +519,13 @@ func convertStats(cont *info.ContainerInfo) []v2.ContainerStats {
|
||||
}
|
||||
if stat.HasCpu {
|
||||
stat.Cpu = val.Cpu
|
||||
cpuInst, err := instCpuStats(last, val)
|
||||
if err != nil {
|
||||
glog.Warningf("Could not get instant cpu stats: %v", err)
|
||||
} else {
|
||||
stat.CpuInst = cpuInst
|
||||
}
|
||||
last = val
|
||||
}
|
||||
if stat.HasMemory {
|
||||
stat.Memory = val.Memory
|
||||
|
||||
169
Godeps/_workspace/src/github.com/google/cadvisor/api/versions_test.go
generated
vendored
169
Godeps/_workspace/src/github.com/google/cadvisor/api/versions_test.go
generated
vendored
@@ -19,9 +19,11 @@ import (
|
||||
"net/http"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/google/cadvisor/events"
|
||||
info "github.com/google/cadvisor/info/v1"
|
||||
"github.com/google/cadvisor/info/v2"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
@@ -78,3 +80,170 @@ func TestGetEventRequestDoubleArgument(t *testing.T) {
|
||||
assert.True(t, stream)
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
|
||||
func TestInstCpuStats(t *testing.T) {
|
||||
tests := []struct {
|
||||
last *info.ContainerStats
|
||||
cur *info.ContainerStats
|
||||
want *v2.CpuInstStats
|
||||
}{
|
||||
// Last is missing
|
||||
{
|
||||
nil,
|
||||
&info.ContainerStats{},
|
||||
nil,
|
||||
},
|
||||
// Goes back in time
|
||||
{
|
||||
&info.ContainerStats{
|
||||
Timestamp: time.Unix(100, 0).Add(time.Second),
|
||||
},
|
||||
&info.ContainerStats{
|
||||
Timestamp: time.Unix(100, 0),
|
||||
},
|
||||
nil,
|
||||
},
|
||||
// Zero time delta
|
||||
{
|
||||
&info.ContainerStats{
|
||||
Timestamp: time.Unix(100, 0),
|
||||
},
|
||||
&info.ContainerStats{
|
||||
Timestamp: time.Unix(100, 0),
|
||||
},
|
||||
nil,
|
||||
},
|
||||
// Unexpectedly small time delta
|
||||
{
|
||||
&info.ContainerStats{
|
||||
Timestamp: time.Unix(100, 0),
|
||||
},
|
||||
&info.ContainerStats{
|
||||
Timestamp: time.Unix(100, 0).Add(30 * time.Millisecond),
|
||||
},
|
||||
nil,
|
||||
},
|
||||
// Different number of cpus
|
||||
{
|
||||
&info.ContainerStats{
|
||||
Timestamp: time.Unix(100, 0),
|
||||
Cpu: info.CpuStats{
|
||||
Usage: info.CpuUsage{
|
||||
PerCpu: []uint64{100, 200},
|
||||
},
|
||||
},
|
||||
},
|
||||
&info.ContainerStats{
|
||||
Timestamp: time.Unix(100, 0).Add(time.Second),
|
||||
Cpu: info.CpuStats{
|
||||
Usage: info.CpuUsage{
|
||||
PerCpu: []uint64{100, 200, 300},
|
||||
},
|
||||
},
|
||||
},
|
||||
nil,
|
||||
},
|
||||
// Stat numbers decrease
|
||||
{
|
||||
&info.ContainerStats{
|
||||
Timestamp: time.Unix(100, 0),
|
||||
Cpu: info.CpuStats{
|
||||
Usage: info.CpuUsage{
|
||||
Total: 300,
|
||||
PerCpu: []uint64{100, 200},
|
||||
User: 250,
|
||||
System: 50,
|
||||
},
|
||||
},
|
||||
},
|
||||
&info.ContainerStats{
|
||||
Timestamp: time.Unix(100, 0).Add(time.Second),
|
||||
Cpu: info.CpuStats{
|
||||
Usage: info.CpuUsage{
|
||||
Total: 200,
|
||||
PerCpu: []uint64{100, 100},
|
||||
User: 150,
|
||||
System: 50,
|
||||
},
|
||||
},
|
||||
},
|
||||
nil,
|
||||
},
|
||||
// One second elapsed
|
||||
{
|
||||
&info.ContainerStats{
|
||||
Timestamp: time.Unix(100, 0),
|
||||
Cpu: info.CpuStats{
|
||||
Usage: info.CpuUsage{
|
||||
Total: 300,
|
||||
PerCpu: []uint64{100, 200},
|
||||
User: 250,
|
||||
System: 50,
|
||||
},
|
||||
},
|
||||
},
|
||||
&info.ContainerStats{
|
||||
Timestamp: time.Unix(100, 0).Add(time.Second),
|
||||
Cpu: info.CpuStats{
|
||||
Usage: info.CpuUsage{
|
||||
Total: 500,
|
||||
PerCpu: []uint64{200, 300},
|
||||
User: 400,
|
||||
System: 100,
|
||||
},
|
||||
},
|
||||
},
|
||||
&v2.CpuInstStats{
|
||||
Usage: v2.CpuInstUsage{
|
||||
Total: 200,
|
||||
PerCpu: []uint64{100, 100},
|
||||
User: 150,
|
||||
System: 50,
|
||||
},
|
||||
},
|
||||
},
|
||||
// Two seconds elapsed
|
||||
{
|
||||
&info.ContainerStats{
|
||||
Timestamp: time.Unix(100, 0),
|
||||
Cpu: info.CpuStats{
|
||||
Usage: info.CpuUsage{
|
||||
Total: 300,
|
||||
PerCpu: []uint64{100, 200},
|
||||
User: 250,
|
||||
System: 50,
|
||||
},
|
||||
},
|
||||
},
|
||||
&info.ContainerStats{
|
||||
Timestamp: time.Unix(100, 0).Add(2 * time.Second),
|
||||
Cpu: info.CpuStats{
|
||||
Usage: info.CpuUsage{
|
||||
Total: 500,
|
||||
PerCpu: []uint64{200, 300},
|
||||
User: 400,
|
||||
System: 100,
|
||||
},
|
||||
},
|
||||
},
|
||||
&v2.CpuInstStats{
|
||||
Usage: v2.CpuInstUsage{
|
||||
Total: 100,
|
||||
PerCpu: []uint64{50, 50},
|
||||
User: 75,
|
||||
System: 25,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, c := range tests {
|
||||
got, err := instCpuStats(c.last, c.cur)
|
||||
if err != nil {
|
||||
if c.want == nil {
|
||||
continue
|
||||
}
|
||||
t.Errorf("Unexpected error: %v", err)
|
||||
}
|
||||
assert.Equal(t, c.want, got)
|
||||
}
|
||||
}
|
||||
|
||||
3
Godeps/_workspace/src/github.com/google/cadvisor/container/docker/factory.go
generated
vendored
3
Godeps/_workspace/src/github.com/google/cadvisor/container/docker/factory.go
generated
vendored
@@ -96,7 +96,7 @@ func (self *dockerFactory) String() string {
|
||||
return DockerNamespace
|
||||
}
|
||||
|
||||
func (self *dockerFactory) NewContainerHandler(name string) (handler container.ContainerHandler, err error) {
|
||||
func (self *dockerFactory) NewContainerHandler(name string, inHostNamespace bool) (handler container.ContainerHandler, err error) {
|
||||
client, err := docker.NewClient(*ArgDockerEndpoint)
|
||||
if err != nil {
|
||||
return
|
||||
@@ -108,6 +108,7 @@ func (self *dockerFactory) NewContainerHandler(name string) (handler container.C
|
||||
self.fsInfo,
|
||||
self.usesAufsDriver,
|
||||
&self.cgroupSubsystems,
|
||||
inHostNamespace,
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
91
Godeps/_workspace/src/github.com/google/cadvisor/container/docker/handler.go
generated
vendored
91
Godeps/_workspace/src/github.com/google/cadvisor/container/docker/handler.go
generated
vendored
@@ -71,6 +71,18 @@ type dockerContainerHandler struct {
|
||||
|
||||
// Metadata labels associated with the container.
|
||||
labels map[string]string
|
||||
|
||||
// The container PID used to switch namespaces as required
|
||||
pid int
|
||||
|
||||
// Image name used for this container.
|
||||
image string
|
||||
|
||||
// The host root FS to read
|
||||
rootFs string
|
||||
|
||||
// The network mode of the container
|
||||
networkMode string
|
||||
}
|
||||
|
||||
func newDockerContainerHandler(
|
||||
@@ -80,6 +92,7 @@ func newDockerContainerHandler(
|
||||
fsInfo fs.FsInfo,
|
||||
usesAufsDriver bool,
|
||||
cgroupSubsystems *containerLibcontainer.CgroupSubsystems,
|
||||
inHostNamespace bool,
|
||||
) (container.ContainerHandler, error) {
|
||||
// Create the cgroup paths.
|
||||
cgroupPaths := make(map[string]string, len(cgroupSubsystems.MountPoints))
|
||||
@@ -95,6 +108,11 @@ func newDockerContainerHandler(
|
||||
Paths: cgroupPaths,
|
||||
}
|
||||
|
||||
rootFs := "/"
|
||||
if !inHostNamespace {
|
||||
rootFs = "/rootfs"
|
||||
}
|
||||
|
||||
id := ContainerNameToDockerId(name)
|
||||
handler := &dockerContainerHandler{
|
||||
id: id,
|
||||
@@ -105,6 +123,7 @@ func newDockerContainerHandler(
|
||||
cgroupManager: cgroupManager,
|
||||
usesAufsDriver: usesAufsDriver,
|
||||
fsInfo: fsInfo,
|
||||
rootFs: rootFs,
|
||||
}
|
||||
handler.storageDirs = append(handler.storageDirs, path.Join(*dockerRootDir, pathToAufsDir, id))
|
||||
|
||||
@@ -114,11 +133,14 @@ func newDockerContainerHandler(
|
||||
return nil, fmt.Errorf("failed to inspect container %q: %v", id, err)
|
||||
}
|
||||
handler.creationTime = ctnr.Created
|
||||
handler.pid = ctnr.State.Pid
|
||||
|
||||
// Add the name and bare ID as aliases of the container.
|
||||
handler.aliases = append(handler.aliases, strings.TrimPrefix(ctnr.Name, "/"))
|
||||
handler.aliases = append(handler.aliases, id)
|
||||
handler.labels = ctnr.Config.Labels
|
||||
handler.image = ctnr.Config.Image
|
||||
handler.networkMode = ctnr.HostConfig.NetworkMode
|
||||
|
||||
return handler, nil
|
||||
}
|
||||
@@ -167,21 +189,23 @@ func libcontainerConfigToContainerSpec(config *libcontainerConfigs.Config, mi *i
|
||||
}
|
||||
spec.Cpu.Mask = utils.FixCpuMask(config.Cgroups.CpusetCpus, mi.NumCores)
|
||||
|
||||
// Docker reports a loop device for containers with --net=host. Ignore
|
||||
// those too.
|
||||
networkCount := 0
|
||||
for _, n := range config.Networks {
|
||||
if n.Type != "loopback" {
|
||||
networkCount += 1
|
||||
}
|
||||
}
|
||||
|
||||
spec.HasNetwork = networkCount > 0
|
||||
spec.HasDiskIo = true
|
||||
|
||||
return spec
|
||||
}
|
||||
|
||||
var (
|
||||
hasNetworkModes = map[string]bool{
|
||||
"host": true,
|
||||
"bridge": true,
|
||||
"default": true,
|
||||
}
|
||||
)
|
||||
|
||||
func hasNet(networkMode string) bool {
|
||||
return hasNetworkModes[networkMode]
|
||||
}
|
||||
|
||||
func (self *dockerContainerHandler) GetSpec() (info.ContainerSpec, error) {
|
||||
mi, err := self.machineInfoFactory.GetMachineInfo()
|
||||
if err != nil {
|
||||
@@ -198,6 +222,8 @@ func (self *dockerContainerHandler) GetSpec() (info.ContainerSpec, error) {
|
||||
spec.HasFilesystem = true
|
||||
}
|
||||
spec.Labels = self.labels
|
||||
spec.Image = self.image
|
||||
spec.HasNetwork = hasNet(self.networkMode)
|
||||
|
||||
return spec, err
|
||||
}
|
||||
@@ -247,32 +273,16 @@ func (self *dockerContainerHandler) getFsStats(stats *info.ContainerStats) error
|
||||
|
||||
// TODO(vmarmol): Get from libcontainer API instead of cgroup manager when we don't have to support older Dockers.
|
||||
func (self *dockerContainerHandler) GetStats() (*info.ContainerStats, error) {
|
||||
config, err := self.readLibcontainerConfig()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var networkInterfaces []string
|
||||
if len(config.Networks) > 0 {
|
||||
// ContainerStats only reports stat for one network device.
|
||||
// TODO(vmarmol): Handle multiple physical network devices.
|
||||
for _, n := range config.Networks {
|
||||
// Take the first non-loopback.
|
||||
if n.Type != "loopback" {
|
||||
networkInterfaces = []string{n.HostInterfaceName}
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
stats, err := containerLibcontainer.GetStats(self.cgroupManager, networkInterfaces)
|
||||
stats, err := containerLibcontainer.GetStats(self.cgroupManager, self.rootFs, self.pid)
|
||||
if err != nil {
|
||||
return stats, err
|
||||
}
|
||||
|
||||
// TODO(rjnagal): Remove the conversion when network stats are read from libcontainer.
|
||||
convertInterfaceStats(&stats.Network.InterfaceStats)
|
||||
for i := range stats.Network.Interfaces {
|
||||
convertInterfaceStats(&stats.Network.Interfaces[i])
|
||||
// Clean up stats for containers that don't have their own network - this
|
||||
// includes containers running in Kubernetes pods that use the network of the
|
||||
// infrastructure container. This stops metrics being reported multiple times
|
||||
// for each container in a pod.
|
||||
if !hasNet(self.networkMode) {
|
||||
stats.Network = info.NetworkStats{}
|
||||
}
|
||||
|
||||
// Get filesystem stats.
|
||||
@@ -284,21 +294,6 @@ func (self *dockerContainerHandler) GetStats() (*info.ContainerStats, error) {
|
||||
return stats, nil
|
||||
}
|
||||
|
||||
func convertInterfaceStats(stats *info.InterfaceStats) {
|
||||
net := *stats
|
||||
|
||||
// Ingress for host veth is from the container.
|
||||
// Hence tx_bytes stat on the host veth is actually number of bytes received by the container.
|
||||
stats.RxBytes = net.TxBytes
|
||||
stats.RxPackets = net.TxPackets
|
||||
stats.RxErrors = net.TxErrors
|
||||
stats.RxDropped = net.TxDropped
|
||||
stats.TxBytes = net.RxBytes
|
||||
stats.TxPackets = net.RxPackets
|
||||
stats.TxErrors = net.RxErrors
|
||||
stats.TxDropped = net.RxDropped
|
||||
}
|
||||
|
||||
func (self *dockerContainerHandler) ListContainers(listType container.ListType) ([]info.ContainerReference, error) {
|
||||
if self.name != "/docker" {
|
||||
return []info.ContainerReference{}, nil
|
||||
|
||||
6
Godeps/_workspace/src/github.com/google/cadvisor/container/factory.go
generated
vendored
6
Godeps/_workspace/src/github.com/google/cadvisor/container/factory.go
generated
vendored
@@ -23,7 +23,7 @@ import (
|
||||
|
||||
type ContainerHandlerFactory interface {
|
||||
// Create a new ContainerHandler using this factory. CanHandleAndAccept() must have returned true.
|
||||
NewContainerHandler(name string) (c ContainerHandler, err error)
|
||||
NewContainerHandler(name string, inHostNamespace bool) (c ContainerHandler, err error)
|
||||
|
||||
// Returns whether this factory can handle and accept the specified container.
|
||||
CanHandleAndAccept(name string) (handle bool, accept bool, err error)
|
||||
@@ -60,7 +60,7 @@ func HasFactories() bool {
|
||||
}
|
||||
|
||||
// Create a new ContainerHandler for the specified container.
|
||||
func NewContainerHandler(name string) (ContainerHandler, bool, error) {
|
||||
func NewContainerHandler(name string, inHostNamespace bool) (ContainerHandler, bool, error) {
|
||||
factoriesLock.RLock()
|
||||
defer factoriesLock.RUnlock()
|
||||
|
||||
@@ -76,7 +76,7 @@ func NewContainerHandler(name string) (ContainerHandler, bool, error) {
|
||||
return nil, false, nil
|
||||
}
|
||||
glog.V(3).Infof("Using factory %q for container %q", factory, name)
|
||||
handle, err := factory.NewContainerHandler(name)
|
||||
handle, err := factory.NewContainerHandler(name, inHostNamespace)
|
||||
return handle, canAccept, err
|
||||
} else {
|
||||
glog.V(4).Infof("Factory %q was unable to handle container %q", factory, name)
|
||||
|
||||
14
Godeps/_workspace/src/github.com/google/cadvisor/container/factory_test.go
generated
vendored
14
Godeps/_workspace/src/github.com/google/cadvisor/container/factory_test.go
generated
vendored
@@ -39,7 +39,7 @@ func (self *mockContainerHandlerFactory) CanHandleAndAccept(name string) (bool,
|
||||
return self.CanHandleValue, self.CanAcceptValue, nil
|
||||
}
|
||||
|
||||
func (self *mockContainerHandlerFactory) NewContainerHandler(name string) (ContainerHandler, error) {
|
||||
func (self *mockContainerHandlerFactory) NewContainerHandler(name string, isHostNamespace bool) (ContainerHandler, error) {
|
||||
args := self.Called(name)
|
||||
return args.Get(0).(ContainerHandler), args.Error(1)
|
||||
}
|
||||
@@ -60,13 +60,13 @@ func TestNewContainerHandler_FirstMatches(t *testing.T) {
|
||||
RegisterContainerHandlerFactory(allwaysYes)
|
||||
|
||||
// The yes factory should be asked to create the ContainerHandler.
|
||||
mockContainer, err := mockFactory.NewContainerHandler(testContainerName)
|
||||
mockContainer, err := mockFactory.NewContainerHandler(testContainerName, true)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
allwaysYes.On("NewContainerHandler", testContainerName).Return(mockContainer, nil)
|
||||
|
||||
cont, _, err := NewContainerHandler(testContainerName)
|
||||
cont, _, err := NewContainerHandler(testContainerName, true)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
@@ -93,13 +93,13 @@ func TestNewContainerHandler_SecondMatches(t *testing.T) {
|
||||
RegisterContainerHandlerFactory(allwaysYes)
|
||||
|
||||
// The yes factory should be asked to create the ContainerHandler.
|
||||
mockContainer, err := mockFactory.NewContainerHandler(testContainerName)
|
||||
mockContainer, err := mockFactory.NewContainerHandler(testContainerName, true)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
allwaysYes.On("NewContainerHandler", testContainerName).Return(mockContainer, nil)
|
||||
|
||||
cont, _, err := NewContainerHandler(testContainerName)
|
||||
cont, _, err := NewContainerHandler(testContainerName, true)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
@@ -125,7 +125,7 @@ func TestNewContainerHandler_NoneMatch(t *testing.T) {
|
||||
}
|
||||
RegisterContainerHandlerFactory(allwaysNo2)
|
||||
|
||||
_, _, err := NewContainerHandler(testContainerName)
|
||||
_, _, err := NewContainerHandler(testContainerName, true)
|
||||
if err == nil {
|
||||
t.Error("Expected NewContainerHandler to fail")
|
||||
}
|
||||
@@ -148,7 +148,7 @@ func TestNewContainerHandler_Accept(t *testing.T) {
|
||||
}
|
||||
RegisterContainerHandlerFactory(cannotAccept)
|
||||
|
||||
_, accept, err := NewContainerHandler(testContainerName)
|
||||
_, accept, err := NewContainerHandler(testContainerName, true)
|
||||
if err != nil {
|
||||
t.Error("Expected NewContainerHandler to succeed")
|
||||
}
|
||||
|
||||
90
Godeps/_workspace/src/github.com/google/cadvisor/container/libcontainer/helpers.go
generated
vendored
90
Godeps/_workspace/src/github.com/google/cadvisor/container/libcontainer/helpers.go
generated
vendored
@@ -15,14 +15,19 @@
|
||||
package libcontainer
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"path"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/docker/libcontainer"
|
||||
"github.com/docker/libcontainer/cgroups"
|
||||
"github.com/golang/glog"
|
||||
info "github.com/google/cadvisor/info/v1"
|
||||
"github.com/google/cadvisor/utils/sysinfo"
|
||||
)
|
||||
|
||||
type CgroupSubsystems struct {
|
||||
@@ -74,7 +79,7 @@ var supportedSubsystems map[string]struct{} = map[string]struct{}{
|
||||
}
|
||||
|
||||
// Get cgroup and networking stats of the specified container
|
||||
func GetStats(cgroupManager cgroups.Manager, networkInterfaces []string) (*info.ContainerStats, error) {
|
||||
func GetStats(cgroupManager cgroups.Manager, rootFs string, pid int) (*info.ContainerStats, error) {
|
||||
cgroupStats, err := cgroupManager.GetStats()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -84,23 +89,90 @@ func GetStats(cgroupManager cgroups.Manager, networkInterfaces []string) (*info.
|
||||
}
|
||||
stats := toContainerStats(libcontainerStats)
|
||||
|
||||
// TODO(rjnagal): Use networking stats directly from libcontainer.
|
||||
stats.Network.Interfaces = make([]info.InterfaceStats, len(networkInterfaces))
|
||||
for i := range networkInterfaces {
|
||||
interfaceStats, err := sysinfo.GetNetworkStats(networkInterfaces[i])
|
||||
// If we know the pid then get network stats from /proc/<pid>/net/dev
|
||||
if pid > 0 {
|
||||
netStats, err := networkStatsFromProc(rootFs, pid)
|
||||
if err != nil {
|
||||
return stats, err
|
||||
glog.V(2).Infof("Unable to get network stats from pid %d: %v", pid, err)
|
||||
} else {
|
||||
stats.Network.Interfaces = append(stats.Network.Interfaces, netStats...)
|
||||
}
|
||||
stats.Network.Interfaces[i] = interfaceStats
|
||||
}
|
||||
|
||||
// For backwards compatibility.
|
||||
if len(networkInterfaces) > 0 {
|
||||
if len(stats.Network.Interfaces) > 0 {
|
||||
stats.Network.InterfaceStats = stats.Network.Interfaces[0]
|
||||
}
|
||||
|
||||
return stats, nil
|
||||
}
|
||||
|
||||
func networkStatsFromProc(rootFs string, pid int) ([]info.InterfaceStats, error) {
|
||||
netStatsFile := path.Join(rootFs, "proc", strconv.Itoa(pid), "/net/dev")
|
||||
|
||||
ifaceStats, err := scanInterfaceStats(netStatsFile)
|
||||
if err != nil {
|
||||
return []info.InterfaceStats{}, fmt.Errorf("couldn't read network stats: %v", err)
|
||||
}
|
||||
|
||||
return ifaceStats, nil
|
||||
}
|
||||
|
||||
var (
|
||||
ignoredDevicePrefixes = []string{"lo", "veth", "docker"}
|
||||
netStatLineRE = regexp.MustCompile("[ ]*(.+):([ ]+[0-9]+){16}")
|
||||
)
|
||||
|
||||
func isIgnoredDevice(ifName string) bool {
|
||||
for _, prefix := range ignoredDevicePrefixes {
|
||||
if strings.HasPrefix(strings.ToLower(ifName), prefix) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func scanInterfaceStats(netStatsFile string) ([]info.InterfaceStats, error) {
|
||||
var (
|
||||
bkt uint64
|
||||
)
|
||||
|
||||
stats := []info.InterfaceStats{}
|
||||
|
||||
data, err := ioutil.ReadFile(netStatsFile)
|
||||
if err != nil {
|
||||
return stats, fmt.Errorf("failure opening %s: %v", netStatsFile, err)
|
||||
}
|
||||
|
||||
reader := strings.NewReader(string(data))
|
||||
scanner := bufio.NewScanner(reader)
|
||||
|
||||
scanner.Split(bufio.ScanLines)
|
||||
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
if netStatLineRE.MatchString(line) {
|
||||
line = strings.Replace(line, ":", "", -1)
|
||||
|
||||
i := info.InterfaceStats{}
|
||||
|
||||
_, err := fmt.Sscanf(line, "%s %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d",
|
||||
&i.Name, &i.RxBytes, &i.RxPackets, &i.RxErrors, &i.RxDropped, &bkt, &bkt, &bkt,
|
||||
&bkt, &i.TxBytes, &i.TxPackets, &i.TxErrors, &i.TxDropped, &bkt, &bkt, &bkt, &bkt)
|
||||
|
||||
if err != nil {
|
||||
return stats, fmt.Errorf("failure opening %s: %v", netStatsFile, err)
|
||||
}
|
||||
|
||||
if !isIgnoredDevice(i.Name) {
|
||||
stats = append(stats, i)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return stats, nil
|
||||
}
|
||||
|
||||
func GetProcesses(cgroupManager cgroups.Manager) ([]int, error) {
|
||||
pids, err := cgroupManager.GetPids()
|
||||
if err != nil {
|
||||
|
||||
2
Godeps/_workspace/src/github.com/google/cadvisor/container/mock.go
generated
vendored
2
Godeps/_workspace/src/github.com/google/cadvisor/container/mock.go
generated
vendored
@@ -109,7 +109,7 @@ func (self *FactoryForMockContainerHandler) String() string {
|
||||
return self.Name
|
||||
}
|
||||
|
||||
func (self *FactoryForMockContainerHandler) NewContainerHandler(name string) (ContainerHandler, error) {
|
||||
func (self *FactoryForMockContainerHandler) NewContainerHandler(name string, inHostNamespace bool) (ContainerHandler, error) {
|
||||
handler := &MockContainerHandler{}
|
||||
if self.PrepareContainerHandlerFunc != nil {
|
||||
self.PrepareContainerHandlerFunc(name, handler)
|
||||
|
||||
8
Godeps/_workspace/src/github.com/google/cadvisor/container/raw/factory.go
generated
vendored
8
Godeps/_workspace/src/github.com/google/cadvisor/container/raw/factory.go
generated
vendored
@@ -45,8 +45,12 @@ func (self *rawFactory) String() string {
|
||||
return "raw"
|
||||
}
|
||||
|
||||
func (self *rawFactory) NewContainerHandler(name string) (container.ContainerHandler, error) {
|
||||
return newRawContainerHandler(name, self.cgroupSubsystems, self.machineInfoFactory, self.fsInfo, self.watcher)
|
||||
func (self *rawFactory) NewContainerHandler(name string, inHostNamespace bool) (container.ContainerHandler, error) {
|
||||
rootFs := "/"
|
||||
if !inHostNamespace {
|
||||
rootFs = "/rootfs"
|
||||
}
|
||||
return newRawContainerHandler(name, self.cgroupSubsystems, self.machineInfoFactory, self.fsInfo, self.watcher, rootFs)
|
||||
}
|
||||
|
||||
// The raw factory can handle any container. If --docker_only is set to false, non-docker containers are ignored.
|
||||
|
||||
15
Godeps/_workspace/src/github.com/google/cadvisor/container/raw/handler.go
generated
vendored
15
Godeps/_workspace/src/github.com/google/cadvisor/container/raw/handler.go
generated
vendored
@@ -61,9 +61,11 @@ type rawContainerHandler struct {
|
||||
|
||||
fsInfo fs.FsInfo
|
||||
externalMounts []mount
|
||||
|
||||
rootFs string
|
||||
}
|
||||
|
||||
func newRawContainerHandler(name string, cgroupSubsystems *libcontainer.CgroupSubsystems, machineInfoFactory info.MachineInfoFactory, fsInfo fs.FsInfo, watcher *InotifyWatcher) (container.ContainerHandler, error) {
|
||||
func newRawContainerHandler(name string, cgroupSubsystems *libcontainer.CgroupSubsystems, machineInfoFactory info.MachineInfoFactory, fsInfo fs.FsInfo, watcher *InotifyWatcher, rootFs string) (container.ContainerHandler, error) {
|
||||
// Create the cgroup paths.
|
||||
cgroupPaths := make(map[string]string, len(cgroupSubsystems.MountPoints))
|
||||
for key, val := range cgroupSubsystems.MountPoints {
|
||||
@@ -108,6 +110,7 @@ func newRawContainerHandler(name string, cgroupSubsystems *libcontainer.CgroupSu
|
||||
hasNetwork: hasNetwork,
|
||||
externalMounts: externalMounts,
|
||||
watcher: watcher,
|
||||
rootFs: rootFs,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -326,15 +329,7 @@ func (self *rawContainerHandler) getFsStats(stats *info.ContainerStats) error {
|
||||
}
|
||||
|
||||
func (self *rawContainerHandler) GetStats() (*info.ContainerStats, error) {
|
||||
nd, err := self.GetRootNetworkDevices()
|
||||
if err != nil {
|
||||
return new(info.ContainerStats), err
|
||||
}
|
||||
networkInterfaces := make([]string, len(nd))
|
||||
for i := range nd {
|
||||
networkInterfaces[i] = nd[i].Name
|
||||
}
|
||||
stats, err := libcontainer.GetStats(self.cgroupManager, networkInterfaces)
|
||||
stats, err := libcontainer.GetStats(self.cgroupManager, self.rootFs, os.Getpid())
|
||||
if err != nil {
|
||||
return stats, err
|
||||
}
|
||||
|
||||
3
Godeps/_workspace/src/github.com/google/cadvisor/info/v1/container.go
generated
vendored
3
Godeps/_workspace/src/github.com/google/cadvisor/info/v1/container.go
generated
vendored
@@ -61,6 +61,9 @@ type ContainerSpec struct {
|
||||
|
||||
HasCustomMetrics bool `json:"has_custom_metrics"`
|
||||
CustomMetrics []MetricSpec `json:"custom_metrics,omitempty"`
|
||||
|
||||
// Image name used for this container.
|
||||
Image string `json:"image,omitempty"`
|
||||
}
|
||||
|
||||
// Container reference contains enough information to uniquely identify a container
|
||||
|
||||
34
Godeps/_workspace/src/github.com/google/cadvisor/info/v2/container.go
generated
vendored
34
Godeps/_workspace/src/github.com/google/cadvisor/info/v2/container.go
generated
vendored
@@ -80,14 +80,20 @@ type ContainerSpec struct {
|
||||
HasNetwork bool `json:"has_network"`
|
||||
HasFilesystem bool `json:"has_filesystem"`
|
||||
HasDiskIo bool `json:"has_diskio"`
|
||||
|
||||
// Image name used for this container.
|
||||
Image string `json:"image,omitempty"`
|
||||
}
|
||||
|
||||
type ContainerStats struct {
|
||||
// The time of this stat point.
|
||||
Timestamp time.Time `json:"timestamp"`
|
||||
// CPU statistics
|
||||
HasCpu bool `json:"has_cpu"`
|
||||
Cpu v1.CpuStats `json:"cpu,omitempty"`
|
||||
HasCpu bool `json:"has_cpu"`
|
||||
// In nanoseconds (aggregated)
|
||||
Cpu v1.CpuStats `json:"cpu,omitempty"`
|
||||
// In nanocores per second (instantaneous)
|
||||
CpuInst *CpuInstStats `json:"cpu_inst,omitempty"`
|
||||
// Disk IO statistics
|
||||
HasDiskIo bool `json:"has_diskio"`
|
||||
DiskIo v1.DiskIoStats `json:"diskio,omitempty"`
|
||||
@@ -204,3 +210,27 @@ type NetworkStats struct {
|
||||
// Network stats by interface.
|
||||
Interfaces []v1.InterfaceStats `json:"interfaces,omitempty"`
|
||||
}
|
||||
|
||||
// Instantaneous CPU stats
|
||||
type CpuInstStats struct {
|
||||
Usage CpuInstUsage `json:"usage"`
|
||||
}
|
||||
|
||||
// CPU usage time statistics.
|
||||
type CpuInstUsage struct {
|
||||
// Total CPU usage.
|
||||
// Units: nanocores per second
|
||||
Total uint64 `json:"total"`
|
||||
|
||||
// Per CPU/core usage of the container.
|
||||
// Unit: nanocores per second
|
||||
PerCpu []uint64 `json:"per_cpu_usage,omitempty"`
|
||||
|
||||
// Time spent in user space.
|
||||
// Unit: nanocores per second
|
||||
User uint64 `json:"user"`
|
||||
|
||||
// Time spent in kernel space.
|
||||
// Unit: nanocores per second
|
||||
System uint64 `json:"system"`
|
||||
}
|
||||
|
||||
3
Godeps/_workspace/src/github.com/google/cadvisor/manager/manager.go
generated
vendored
3
Godeps/_workspace/src/github.com/google/cadvisor/manager/manager.go
generated
vendored
@@ -380,6 +380,7 @@ func (self *manager) getV2Spec(cinfo *containerInfo) v2.ContainerSpec {
|
||||
HasNetwork: specV1.HasNetwork,
|
||||
HasDiskIo: specV1.HasDiskIo,
|
||||
HasCustomMetrics: specV1.HasCustomMetrics,
|
||||
Image: specV1.Image,
|
||||
}
|
||||
if specV1.HasCpu {
|
||||
specV2.Cpu.Limit = specV1.Cpu.Limit
|
||||
@@ -736,7 +737,7 @@ func (m *manager) registerCollectors(collectorConfigs map[string]string, cont *c
|
||||
|
||||
// Create a container.
|
||||
func (m *manager) createContainer(containerName string) error {
|
||||
handler, accept, err := container.NewContainerHandler(containerName)
|
||||
handler, accept, err := container.NewContainerHandler(containerName, m.inHostNamespace)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
133
Godeps/_workspace/src/github.com/google/cadvisor/metrics/prometheus.go
generated
vendored
133
Godeps/_workspace/src/github.com/google/cadvisor/metrics/prometheus.go
generated
vendored
@@ -61,7 +61,7 @@ type containerMetric struct {
|
||||
}
|
||||
|
||||
func (cm *containerMetric) desc() *prometheus.Desc {
|
||||
return prometheus.NewDesc(cm.name, cm.help, append([]string{"name", "id"}, cm.extraLabels...), nil)
|
||||
return prometheus.NewDesc(cm.name, cm.help, append([]string{"name", "id", "image"}, cm.extraLabels...), nil)
|
||||
}
|
||||
|
||||
// PrometheusCollector implements prometheus.Collector.
|
||||
@@ -287,60 +287,124 @@ func NewPrometheusCollector(infoProvider subcontainersInfoProvider) *PrometheusC
|
||||
})
|
||||
},
|
||||
}, {
|
||||
name: "container_network_receive_bytes_total",
|
||||
help: "Cumulative count of bytes received",
|
||||
valueType: prometheus.CounterValue,
|
||||
name: "container_network_receive_bytes_total",
|
||||
help: "Cumulative count of bytes received",
|
||||
valueType: prometheus.CounterValue,
|
||||
extraLabels: []string{"interface"},
|
||||
getValues: func(s *info.ContainerStats) metricValues {
|
||||
return metricValues{{value: float64(s.Network.RxBytes)}}
|
||||
values := make(metricValues, 0, len(s.Network.Interfaces))
|
||||
for _, value := range s.Network.Interfaces {
|
||||
values = append(values, metricValue{
|
||||
value: float64(value.RxBytes),
|
||||
labels: []string{value.Name},
|
||||
})
|
||||
}
|
||||
return values
|
||||
},
|
||||
}, {
|
||||
name: "container_network_receive_packets_total",
|
||||
help: "Cumulative count of packets received",
|
||||
valueType: prometheus.CounterValue,
|
||||
name: "container_network_receive_packets_total",
|
||||
help: "Cumulative count of packets received",
|
||||
valueType: prometheus.CounterValue,
|
||||
extraLabels: []string{"interface"},
|
||||
getValues: func(s *info.ContainerStats) metricValues {
|
||||
return metricValues{{value: float64(s.Network.RxPackets)}}
|
||||
values := make(metricValues, 0, len(s.Network.Interfaces))
|
||||
for _, value := range s.Network.Interfaces {
|
||||
values = append(values, metricValue{
|
||||
value: float64(value.RxPackets),
|
||||
labels: []string{value.Name},
|
||||
})
|
||||
}
|
||||
return values
|
||||
},
|
||||
}, {
|
||||
name: "container_network_receive_packets_dropped_total",
|
||||
help: "Cumulative count of packets dropped while receiving",
|
||||
valueType: prometheus.CounterValue,
|
||||
name: "container_network_receive_packets_dropped_total",
|
||||
help: "Cumulative count of packets dropped while receiving",
|
||||
valueType: prometheus.CounterValue,
|
||||
extraLabels: []string{"interface"},
|
||||
getValues: func(s *info.ContainerStats) metricValues {
|
||||
return metricValues{{value: float64(s.Network.RxDropped)}}
|
||||
values := make(metricValues, 0, len(s.Network.Interfaces))
|
||||
for _, value := range s.Network.Interfaces {
|
||||
values = append(values, metricValue{
|
||||
value: float64(value.RxDropped),
|
||||
labels: []string{value.Name},
|
||||
})
|
||||
}
|
||||
return values
|
||||
},
|
||||
}, {
|
||||
name: "container_network_receive_errors_total",
|
||||
help: "Cumulative count of errors encountered while receiving",
|
||||
valueType: prometheus.CounterValue,
|
||||
name: "container_network_receive_errors_total",
|
||||
help: "Cumulative count of errors encountered while receiving",
|
||||
valueType: prometheus.CounterValue,
|
||||
extraLabels: []string{"interface"},
|
||||
getValues: func(s *info.ContainerStats) metricValues {
|
||||
return metricValues{{value: float64(s.Network.RxErrors)}}
|
||||
values := make(metricValues, 0, len(s.Network.Interfaces))
|
||||
for _, value := range s.Network.Interfaces {
|
||||
values = append(values, metricValue{
|
||||
value: float64(value.RxErrors),
|
||||
labels: []string{value.Name},
|
||||
})
|
||||
}
|
||||
return values
|
||||
},
|
||||
}, {
|
||||
name: "container_network_transmit_bytes_total",
|
||||
help: "Cumulative count of bytes transmitted",
|
||||
valueType: prometheus.CounterValue,
|
||||
name: "container_network_transmit_bytes_total",
|
||||
help: "Cumulative count of bytes transmitted",
|
||||
valueType: prometheus.CounterValue,
|
||||
extraLabels: []string{"interface"},
|
||||
getValues: func(s *info.ContainerStats) metricValues {
|
||||
return metricValues{{value: float64(s.Network.TxBytes)}}
|
||||
values := make(metricValues, 0, len(s.Network.Interfaces))
|
||||
for _, value := range s.Network.Interfaces {
|
||||
values = append(values, metricValue{
|
||||
value: float64(value.TxBytes),
|
||||
labels: []string{value.Name},
|
||||
})
|
||||
}
|
||||
return values
|
||||
},
|
||||
}, {
|
||||
name: "container_network_transmit_packets_total",
|
||||
help: "Cumulative count of packets transmitted",
|
||||
valueType: prometheus.CounterValue,
|
||||
name: "container_network_transmit_packets_total",
|
||||
help: "Cumulative count of packets transmitted",
|
||||
valueType: prometheus.CounterValue,
|
||||
extraLabels: []string{"interface"},
|
||||
getValues: func(s *info.ContainerStats) metricValues {
|
||||
return metricValues{{value: float64(s.Network.TxPackets)}}
|
||||
values := make(metricValues, 0, len(s.Network.Interfaces))
|
||||
for _, value := range s.Network.Interfaces {
|
||||
values = append(values, metricValue{
|
||||
value: float64(value.TxPackets),
|
||||
labels: []string{value.Name},
|
||||
})
|
||||
}
|
||||
return values
|
||||
},
|
||||
}, {
|
||||
name: "container_network_transmit_packets_dropped_total",
|
||||
help: "Cumulative count of packets dropped while transmitting",
|
||||
valueType: prometheus.CounterValue,
|
||||
name: "container_network_transmit_packets_dropped_total",
|
||||
help: "Cumulative count of packets dropped while transmitting",
|
||||
valueType: prometheus.CounterValue,
|
||||
extraLabels: []string{"interface"},
|
||||
getValues: func(s *info.ContainerStats) metricValues {
|
||||
return metricValues{{value: float64(s.Network.TxDropped)}}
|
||||
values := make(metricValues, 0, len(s.Network.Interfaces))
|
||||
for _, value := range s.Network.Interfaces {
|
||||
values = append(values, metricValue{
|
||||
value: float64(value.TxDropped),
|
||||
labels: []string{value.Name},
|
||||
})
|
||||
}
|
||||
return values
|
||||
},
|
||||
}, {
|
||||
name: "container_network_transmit_errors_total",
|
||||
help: "Cumulative count of errors encountered while transmitting",
|
||||
valueType: prometheus.CounterValue,
|
||||
name: "container_network_transmit_errors_total",
|
||||
help: "Cumulative count of errors encountered while transmitting",
|
||||
valueType: prometheus.CounterValue,
|
||||
extraLabels: []string{"interface"},
|
||||
getValues: func(s *info.ContainerStats) metricValues {
|
||||
return metricValues{{value: float64(s.Network.TxErrors)}}
|
||||
values := make(metricValues, 0, len(s.Network.Interfaces))
|
||||
for _, value := range s.Network.Interfaces {
|
||||
values = append(values, metricValue{
|
||||
value: float64(value.TxErrors),
|
||||
labels: []string{value.Name},
|
||||
})
|
||||
}
|
||||
return values
|
||||
},
|
||||
}, {
|
||||
name: "container_tasks_state",
|
||||
@@ -401,12 +465,13 @@ func (c *PrometheusCollector) Collect(ch chan<- prometheus.Metric) {
|
||||
if len(container.Aliases) > 0 {
|
||||
name = container.Aliases[0]
|
||||
}
|
||||
image := container.Spec.Image
|
||||
stats := container.Stats[0]
|
||||
|
||||
for _, cm := range c.containerMetrics {
|
||||
desc := cm.desc()
|
||||
for _, metricValue := range cm.getValues(stats) {
|
||||
ch <- prometheus.MustNewConstMetric(desc, cm.valueType, float64(metricValue.value), append([]string{name, id}, metricValue.labels...)...)
|
||||
ch <- prometheus.MustNewConstMetric(desc, cm.valueType, float64(metricValue.value), append([]string{name, id, image}, metricValue.labels...)...)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
16
Godeps/_workspace/src/github.com/google/cadvisor/metrics/prometheus_test.go
generated
vendored
16
Godeps/_workspace/src/github.com/google/cadvisor/metrics/prometheus_test.go
generated
vendored
@@ -34,6 +34,9 @@ func (p testSubcontainersInfoProvider) SubcontainersInfo(string, *info.Container
|
||||
ContainerReference: info.ContainerReference{
|
||||
Name: "testcontainer",
|
||||
},
|
||||
Spec: info.ContainerSpec{
|
||||
Image: "test",
|
||||
},
|
||||
Stats: []*info.ContainerStats{
|
||||
{
|
||||
Cpu: info.CpuStats{
|
||||
@@ -68,6 +71,19 @@ func (p testSubcontainersInfoProvider) SubcontainersInfo(string, *info.Container
|
||||
TxErrors: 20,
|
||||
TxDropped: 21,
|
||||
},
|
||||
Interfaces: []info.InterfaceStats{
|
||||
{
|
||||
Name: "eth0",
|
||||
RxBytes: 14,
|
||||
RxPackets: 15,
|
||||
RxErrors: 16,
|
||||
RxDropped: 17,
|
||||
TxBytes: 18,
|
||||
TxPackets: 19,
|
||||
TxErrors: 20,
|
||||
TxDropped: 21,
|
||||
},
|
||||
},
|
||||
},
|
||||
Filesystem: []info.FsStats{
|
||||
{
|
||||
|
||||
155
Godeps/_workspace/src/github.com/google/cadvisor/metrics/testdata/prometheus_metrics
generated
vendored
155
Godeps/_workspace/src/github.com/google/cadvisor/metrics/testdata/prometheus_metrics
generated
vendored
@@ -1,155 +0,0 @@
|
||||
# HELP container_cpu_system_seconds_total Cumulative system cpu time consumed in seconds.
|
||||
# TYPE container_cpu_system_seconds_total counter
|
||||
container_cpu_system_seconds_total{id="testcontainer",name="testcontainer"} 7e-09
|
||||
# HELP container_cpu_usage_seconds_total Cumulative cpu time consumed per cpu in seconds.
|
||||
# TYPE container_cpu_usage_seconds_total counter
|
||||
container_cpu_usage_seconds_total{cpu="cpu00",id="testcontainer",name="testcontainer"} 2e-09
|
||||
container_cpu_usage_seconds_total{cpu="cpu01",id="testcontainer",name="testcontainer"} 3e-09
|
||||
container_cpu_usage_seconds_total{cpu="cpu02",id="testcontainer",name="testcontainer"} 4e-09
|
||||
container_cpu_usage_seconds_total{cpu="cpu03",id="testcontainer",name="testcontainer"} 5e-09
|
||||
# HELP container_cpu_user_seconds_total Cumulative user cpu time consumed in seconds.
|
||||
# TYPE container_cpu_user_seconds_total counter
|
||||
container_cpu_user_seconds_total{id="testcontainer",name="testcontainer"} 6e-09
|
||||
# HELP container_fs_io_current Number of I/Os currently in progress
|
||||
# TYPE container_fs_io_current gauge
|
||||
container_fs_io_current{device="sda1",id="testcontainer",name="testcontainer"} 42
|
||||
container_fs_io_current{device="sda2",id="testcontainer",name="testcontainer"} 47
|
||||
# HELP container_fs_io_time_seconds_total Cumulative count of seconds spent doing I/Os
|
||||
# TYPE container_fs_io_time_seconds_total counter
|
||||
container_fs_io_time_seconds_total{device="sda1",id="testcontainer",name="testcontainer"} 4.3e-08
|
||||
container_fs_io_time_seconds_total{device="sda2",id="testcontainer",name="testcontainer"} 4.8e-08
|
||||
# HELP container_fs_io_time_weighted_seconds_total Cumulative weighted I/O time in seconds
|
||||
# TYPE container_fs_io_time_weighted_seconds_total counter
|
||||
container_fs_io_time_weighted_seconds_total{device="sda1",id="testcontainer",name="testcontainer"} 4.4e-08
|
||||
container_fs_io_time_weighted_seconds_total{device="sda2",id="testcontainer",name="testcontainer"} 4.9e-08
|
||||
# HELP container_fs_limit_bytes Number of bytes that can be consumed by the container on this filesystem.
|
||||
# TYPE container_fs_limit_bytes gauge
|
||||
container_fs_limit_bytes{device="sda1",id="testcontainer",name="testcontainer"} 22
|
||||
container_fs_limit_bytes{device="sda2",id="testcontainer",name="testcontainer"} 37
|
||||
# HELP container_fs_read_seconds_total Cumulative count of seconds spent reading
|
||||
# TYPE container_fs_read_seconds_total counter
|
||||
container_fs_read_seconds_total{device="sda1",id="testcontainer",name="testcontainer"} 2.7e-08
|
||||
container_fs_read_seconds_total{device="sda2",id="testcontainer",name="testcontainer"} 4.2e-08
|
||||
# HELP container_fs_reads_merged_total Cumulative count of reads merged
|
||||
# TYPE container_fs_reads_merged_total counter
|
||||
container_fs_reads_merged_total{device="sda1",id="testcontainer",name="testcontainer"} 25
|
||||
container_fs_reads_merged_total{device="sda2",id="testcontainer",name="testcontainer"} 40
|
||||
# HELP container_fs_reads_total Cumulative count of reads completed
|
||||
# TYPE container_fs_reads_total counter
|
||||
container_fs_reads_total{device="sda1",id="testcontainer",name="testcontainer"} 24
|
||||
container_fs_reads_total{device="sda2",id="testcontainer",name="testcontainer"} 39
|
||||
# HELP container_fs_sector_reads_total Cumulative count of sector reads completed
|
||||
# TYPE container_fs_sector_reads_total counter
|
||||
container_fs_sector_reads_total{device="sda1",id="testcontainer",name="testcontainer"} 26
|
||||
container_fs_sector_reads_total{device="sda2",id="testcontainer",name="testcontainer"} 41
|
||||
# HELP container_fs_sector_writes_total Cumulative count of sector writes completed
|
||||
# TYPE container_fs_sector_writes_total counter
|
||||
container_fs_sector_writes_total{device="sda1",id="testcontainer",name="testcontainer"} 40
|
||||
container_fs_sector_writes_total{device="sda2",id="testcontainer",name="testcontainer"} 45
|
||||
# HELP container_fs_usage_bytes Number of bytes that are consumed by the container on this filesystem.
|
||||
# TYPE container_fs_usage_bytes gauge
|
||||
container_fs_usage_bytes{device="sda1",id="testcontainer",name="testcontainer"} 23
|
||||
container_fs_usage_bytes{device="sda2",id="testcontainer",name="testcontainer"} 38
|
||||
# HELP container_fs_write_seconds_total Cumulative count of seconds spent writing
|
||||
# TYPE container_fs_write_seconds_total counter
|
||||
container_fs_write_seconds_total{device="sda1",id="testcontainer",name="testcontainer"} 4.1e-08
|
||||
container_fs_write_seconds_total{device="sda2",id="testcontainer",name="testcontainer"} 4.6e-08
|
||||
# HELP container_fs_writes_merged_total Cumulative count of writes merged
|
||||
# TYPE container_fs_writes_merged_total counter
|
||||
container_fs_writes_merged_total{device="sda1",id="testcontainer",name="testcontainer"} 39
|
||||
container_fs_writes_merged_total{device="sda2",id="testcontainer",name="testcontainer"} 44
|
||||
# HELP container_fs_writes_total Cumulative count of writes completed
|
||||
# TYPE container_fs_writes_total counter
|
||||
container_fs_writes_total{device="sda1",id="testcontainer",name="testcontainer"} 28
|
||||
container_fs_writes_total{device="sda2",id="testcontainer",name="testcontainer"} 43
|
||||
# HELP container_last_seen Last time a container was seen by the exporter
|
||||
# TYPE container_last_seen gauge
|
||||
container_last_seen{id="testcontainer",name="testcontainer"} 1.426203694e+09
|
||||
# HELP container_memory_failures_total Cumulative count of memory allocation failures.
|
||||
# TYPE container_memory_failures_total counter
|
||||
container_memory_failures_total{id="testcontainer",name="testcontainer",scope="container",type="pgfault"} 10
|
||||
container_memory_failures_total{id="testcontainer",name="testcontainer",scope="container",type="pgmajfault"} 11
|
||||
container_memory_failures_total{id="testcontainer",name="testcontainer",scope="hierarchy",type="pgfault"} 12
|
||||
container_memory_failures_total{id="testcontainer",name="testcontainer",scope="hierarchy",type="pgmajfault"} 13
|
||||
# HELP container_memory_usage_bytes Current memory usage in bytes.
|
||||
# TYPE container_memory_usage_bytes gauge
|
||||
container_memory_usage_bytes{id="testcontainer",name="testcontainer"} 8
|
||||
# HELP container_memory_working_set_bytes Current working set in bytes.
|
||||
# TYPE container_memory_working_set_bytes gauge
|
||||
container_memory_working_set_bytes{id="testcontainer",name="testcontainer"} 9
|
||||
# HELP container_network_receive_bytes_total Cumulative count of bytes received
|
||||
# TYPE container_network_receive_bytes_total counter
|
||||
container_network_receive_bytes_total{id="testcontainer",name="testcontainer"} 14
|
||||
# HELP container_network_receive_errors_total Cumulative count of errors encountered while receiving
|
||||
# TYPE container_network_receive_errors_total counter
|
||||
container_network_receive_errors_total{id="testcontainer",name="testcontainer"} 16
|
||||
# HELP container_network_receive_packets_dropped_total Cumulative count of packets dropped while receiving
|
||||
# TYPE container_network_receive_packets_dropped_total counter
|
||||
container_network_receive_packets_dropped_total{id="testcontainer",name="testcontainer"} 17
|
||||
# HELP container_network_receive_packets_total Cumulative count of packets received
|
||||
# TYPE container_network_receive_packets_total counter
|
||||
container_network_receive_packets_total{id="testcontainer",name="testcontainer"} 15
|
||||
# HELP container_network_transmit_bytes_total Cumulative count of bytes transmitted
|
||||
# TYPE container_network_transmit_bytes_total counter
|
||||
container_network_transmit_bytes_total{id="testcontainer",name="testcontainer"} 18
|
||||
# HELP container_network_transmit_errors_total Cumulative count of errors encountered while transmitting
|
||||
# TYPE container_network_transmit_errors_total counter
|
||||
container_network_transmit_errors_total{id="testcontainer",name="testcontainer"} 20
|
||||
# HELP container_network_transmit_packets_dropped_total Cumulative count of packets dropped while transmitting
|
||||
# TYPE container_network_transmit_packets_dropped_total counter
|
||||
container_network_transmit_packets_dropped_total{id="testcontainer",name="testcontainer"} 21
|
||||
# HELP container_network_transmit_packets_total Cumulative count of packets transmitted
|
||||
# TYPE container_network_transmit_packets_total counter
|
||||
container_network_transmit_packets_total{id="testcontainer",name="testcontainer"} 19
|
||||
# HELP container_scrape_error 1 if there was an error while getting container metrics, 0 otherwise
|
||||
# TYPE container_scrape_error gauge
|
||||
container_scrape_error 0
|
||||
# HELP container_tasks_state Number of tasks in given state
|
||||
# TYPE container_tasks_state gauge
|
||||
container_tasks_state{id="testcontainer",name="testcontainer",state="iowaiting"} 54
|
||||
container_tasks_state{id="testcontainer",name="testcontainer",state="running"} 51
|
||||
container_tasks_state{id="testcontainer",name="testcontainer",state="sleeping"} 50
|
||||
container_tasks_state{id="testcontainer",name="testcontainer",state="stopped"} 52
|
||||
container_tasks_state{id="testcontainer",name="testcontainer",state="uninterruptible"} 53
|
||||
# HELP http_request_duration_microseconds The HTTP request latencies in microseconds.
|
||||
# TYPE http_request_duration_microseconds summary
|
||||
http_request_duration_microseconds{handler="prometheus",quantile="0.5"} 0
|
||||
http_request_duration_microseconds{handler="prometheus",quantile="0.9"} 0
|
||||
http_request_duration_microseconds{handler="prometheus",quantile="0.99"} 0
|
||||
http_request_duration_microseconds_sum{handler="prometheus"} 0
|
||||
http_request_duration_microseconds_count{handler="prometheus"} 0
|
||||
# HELP http_request_size_bytes The HTTP request sizes in bytes.
|
||||
# TYPE http_request_size_bytes summary
|
||||
http_request_size_bytes{handler="prometheus",quantile="0.5"} 0
|
||||
http_request_size_bytes{handler="prometheus",quantile="0.9"} 0
|
||||
http_request_size_bytes{handler="prometheus",quantile="0.99"} 0
|
||||
http_request_size_bytes_sum{handler="prometheus"} 0
|
||||
http_request_size_bytes_count{handler="prometheus"} 0
|
||||
# HELP http_response_size_bytes The HTTP response sizes in bytes.
|
||||
# TYPE http_response_size_bytes summary
|
||||
http_response_size_bytes{handler="prometheus",quantile="0.5"} 0
|
||||
http_response_size_bytes{handler="prometheus",quantile="0.9"} 0
|
||||
http_response_size_bytes{handler="prometheus",quantile="0.99"} 0
|
||||
http_response_size_bytes_sum{handler="prometheus"} 0
|
||||
http_response_size_bytes_count{handler="prometheus"} 0
|
||||
# HELP process_cpu_seconds_total Total user and system CPU time spent in seconds.
|
||||
# TYPE process_cpu_seconds_total counter
|
||||
process_cpu_seconds_total 0
|
||||
# HELP process_goroutines Number of goroutines that currently exist.
|
||||
# TYPE process_goroutines gauge
|
||||
process_goroutines 16
|
||||
# HELP process_max_fds Maximum number of open file descriptors.
|
||||
# TYPE process_max_fds gauge
|
||||
process_max_fds 1024
|
||||
# HELP process_open_fds Number of open file descriptors.
|
||||
# TYPE process_open_fds gauge
|
||||
process_open_fds 4
|
||||
# HELP process_resident_memory_bytes Resident memory size in bytes.
|
||||
# TYPE process_resident_memory_bytes gauge
|
||||
process_resident_memory_bytes 7.74144e+06
|
||||
# HELP process_start_time_seconds Start time of the process since unix epoch in seconds.
|
||||
# TYPE process_start_time_seconds gauge
|
||||
process_start_time_seconds 1.42620369439e+09
|
||||
# HELP process_virtual_memory_bytes Virtual memory size in bytes.
|
||||
# TYPE process_virtual_memory_bytes gauge
|
||||
process_virtual_memory_bytes 1.16420608e+08
|
||||
251
Godeps/_workspace/src/github.com/google/cadvisor/utils/machine/testdata/cpuinfo
generated
vendored
251
Godeps/_workspace/src/github.com/google/cadvisor/utils/machine/testdata/cpuinfo
generated
vendored
@@ -1,251 +0,0 @@
|
||||
processor : 0
|
||||
cpu family : 6
|
||||
stepping : 2
|
||||
microcode : 0x10
|
||||
cpu MHz : 1596.000
|
||||
cache size : 12288 KB
|
||||
physical id : 0
|
||||
siblings : 6
|
||||
core id : 0
|
||||
cpu cores : 6
|
||||
apicid : 0
|
||||
initial apicid : 0
|
||||
fpu : yes
|
||||
fpu_exception : yes
|
||||
cpuid level : 11
|
||||
wp : yes
|
||||
bogomips : 5333.60
|
||||
clflush size : 64
|
||||
cache_alignment : 64
|
||||
address sizes : 40 bits physical, 48 bits virtual
|
||||
|
||||
processor : 1
|
||||
cpu family : 6
|
||||
stepping : 2
|
||||
microcode : 0x10
|
||||
cpu MHz : 1596.000
|
||||
cache size : 12288 KB
|
||||
physical id : 0
|
||||
siblings : 6
|
||||
core id : 1
|
||||
cpu cores : 6
|
||||
apicid : 2
|
||||
initial apicid : 2
|
||||
fpu : yes
|
||||
fpu_exception : yes
|
||||
cpuid level : 11
|
||||
wp : yes
|
||||
bogomips : 5333.60
|
||||
clflush size : 64
|
||||
cache_alignment : 64
|
||||
address sizes : 40 bits physical, 48 bits virtual
|
||||
|
||||
processor : 2
|
||||
cpu family : 6
|
||||
stepping : 2
|
||||
microcode : 0x10
|
||||
cpu MHz : 1596.000
|
||||
cache size : 12288 KB
|
||||
physical id : 0
|
||||
siblings : 6
|
||||
core id : 2
|
||||
cpu cores : 6
|
||||
apicid : 4
|
||||
initial apicid : 4
|
||||
fpu : yes
|
||||
fpu_exception : yes
|
||||
cpuid level : 11
|
||||
wp : yes
|
||||
bogomips : 5333.60
|
||||
clflush size : 64
|
||||
cache_alignment : 64
|
||||
address sizes : 40 bits physical, 48 bits virtual
|
||||
|
||||
processor : 3
|
||||
cpu family : 6
|
||||
stepping : 2
|
||||
microcode : 0x10
|
||||
cpu MHz : 1596.000
|
||||
cache size : 12288 KB
|
||||
physical id : 1
|
||||
siblings : 6
|
||||
core id : 3
|
||||
cpu cores : 6
|
||||
apicid : 16
|
||||
initial apicid : 16
|
||||
fpu : yes
|
||||
fpu_exception : yes
|
||||
cpuid level : 11
|
||||
wp : yes
|
||||
bogomips : 5333.60
|
||||
clflush size : 64
|
||||
cache_alignment : 64
|
||||
address sizes : 40 bits physical, 48 bits virtual
|
||||
|
||||
processor : 4
|
||||
cpu family : 6
|
||||
stepping : 2
|
||||
microcode : 0x10
|
||||
cpu MHz : 1596.000
|
||||
cache size : 12288 KB
|
||||
physical id : 1
|
||||
siblings : 6
|
||||
core id : 4
|
||||
cpu cores : 6
|
||||
apicid : 18
|
||||
initial apicid : 18
|
||||
fpu : yes
|
||||
fpu_exception : yes
|
||||
cpuid level : 11
|
||||
wp : yes
|
||||
bogomips : 5333.60
|
||||
clflush size : 64
|
||||
cache_alignment : 64
|
||||
address sizes : 40 bits physical, 48 bits virtual
|
||||
|
||||
processor : 5
|
||||
cpu family : 6
|
||||
stepping : 2
|
||||
microcode : 0x10
|
||||
cpu MHz : 1596.000
|
||||
cache size : 12288 KB
|
||||
physical id : 1
|
||||
siblings : 6
|
||||
core id : 5
|
||||
cpu cores : 6
|
||||
apicid : 20
|
||||
initial apicid : 20
|
||||
fpu : yes
|
||||
fpu_exception : yes
|
||||
cpuid level : 11
|
||||
wp : yes
|
||||
bogomips : 5333.60
|
||||
clflush size : 64
|
||||
cache_alignment : 64
|
||||
address sizes : 40 bits physical, 48 bits virtual
|
||||
|
||||
processor : 6
|
||||
cpu family : 6
|
||||
stepping : 2
|
||||
microcode : 0x10
|
||||
cpu MHz : 2661.000
|
||||
cache size : 12288 KB
|
||||
physical id : 0
|
||||
siblings : 6
|
||||
core id : 0
|
||||
cpu cores : 6
|
||||
apicid : 1
|
||||
initial apicid : 1
|
||||
fpu : yes
|
||||
fpu_exception : yes
|
||||
cpuid level : 11
|
||||
wp : yes
|
||||
bogomips : 5333.60
|
||||
clflush size : 64
|
||||
cache_alignment : 64
|
||||
address sizes : 40 bits physical, 48 bits virtual
|
||||
|
||||
processor : 7
|
||||
cpu family : 6
|
||||
stepping : 2
|
||||
microcode : 0x10
|
||||
cpu MHz : 2661.000
|
||||
cache size : 12288 KB
|
||||
physical id : 0
|
||||
siblings : 6
|
||||
core id : 1
|
||||
cpu cores : 6
|
||||
apicid : 3
|
||||
initial apicid : 3
|
||||
fpu : yes
|
||||
fpu_exception : yes
|
||||
cpuid level : 11
|
||||
wp : yes
|
||||
bogomips : 5333.60
|
||||
clflush size : 64
|
||||
cache_alignment : 64
|
||||
address sizes : 40 bits physical, 48 bits virtual
|
||||
|
||||
processor : 8
|
||||
cpu family : 6
|
||||
stepping : 2
|
||||
microcode : 0x10
|
||||
cpu MHz : 1596.000
|
||||
cache size : 12288 KB
|
||||
physical id : 0
|
||||
siblings : 6
|
||||
core id : 2
|
||||
cpu cores : 6
|
||||
apicid : 5
|
||||
initial apicid : 5
|
||||
fpu : yes
|
||||
fpu_exception : yes
|
||||
cpuid level : 11
|
||||
wp : yes
|
||||
bogomips : 5333.60
|
||||
clflush size : 64
|
||||
cache_alignment : 64
|
||||
address sizes : 40 bits physical, 48 bits virtual
|
||||
|
||||
processor : 9
|
||||
cpu family : 6
|
||||
stepping : 2
|
||||
microcode : 0x10
|
||||
cpu MHz : 2661.000
|
||||
cache size : 12288 KB
|
||||
physical id : 1
|
||||
siblings : 6
|
||||
core id : 3
|
||||
cpu cores : 6
|
||||
apicid : 17
|
||||
initial apicid : 17
|
||||
fpu : yes
|
||||
fpu_exception : yes
|
||||
cpuid level : 11
|
||||
wp : yes
|
||||
bogomips : 5333.60
|
||||
clflush size : 64
|
||||
cache_alignment : 64
|
||||
address sizes : 40 bits physical, 48 bits virtual
|
||||
|
||||
processor : 10
|
||||
cpu family : 6
|
||||
stepping : 2
|
||||
microcode : 0x10
|
||||
cpu MHz : 1596.000
|
||||
cache size : 12288 KB
|
||||
physical id : 1
|
||||
siblings : 6
|
||||
core id : 4
|
||||
cpu cores : 6
|
||||
apicid : 19
|
||||
initial apicid : 19
|
||||
fpu : yes
|
||||
fpu_exception : yes
|
||||
cpuid level : 11
|
||||
wp : yes
|
||||
bogomips : 5333.60
|
||||
clflush size : 64
|
||||
cache_alignment : 64
|
||||
address sizes : 40 bits physical, 48 bits virtual
|
||||
processor : 11
|
||||
cpu family : 6
|
||||
stepping : 2
|
||||
microcode : 0x10
|
||||
cpu MHz : 2661.000
|
||||
cache size : 12288 KB
|
||||
physical id : 1
|
||||
siblings : 6
|
||||
core id : 5
|
||||
cpu cores : 6
|
||||
apicid : 21
|
||||
initial apicid : 21
|
||||
fpu : yes
|
||||
fpu_exception : yes
|
||||
cpuid level : 11
|
||||
wp : yes
|
||||
bogomips : 5333.60
|
||||
clflush size : 64
|
||||
cache_alignment : 64
|
||||
address sizes : 40 bits physical, 48 bits virtual
|
||||
|
||||
@@ -101,6 +101,10 @@ Do you want to help "shape the evolution of technologies that are container pack
|
||||
|
||||
You should consider joining the [Cloud Native Computing Foundation](https://cncf.io/about). For details about who's involved and how Kubernetes plays a role, read [their announcement](https://cncf.io/news/announcement/2015/07/new-cloud-native-computing-foundation-drive-alignment-among-container).
|
||||
|
||||
### Code of conduct
|
||||
|
||||
Participation in the Kubernetes community is governed by the [Kubernetes Code of Conduct](code-of-conduct.md).
|
||||
|
||||
#### Are you ready to add to the discussion?
|
||||
|
||||
We have presence on:
|
||||
|
||||
12
cluster/centos/.gitignore
vendored
Normal file
12
cluster/centos/.gitignore
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
binaries
|
||||
|
||||
master/bin/etcd
|
||||
master/bin/etcdctl
|
||||
master/bin/kube*
|
||||
|
||||
node/bin/docker
|
||||
node/bin/etcd
|
||||
node/bin/etcdctl
|
||||
node/bin/flanneld
|
||||
node/bin/kube*
|
||||
local-test.sh
|
||||
135
cluster/centos/build.sh
Executable file
135
cluster/centos/build.sh
Executable file
@@ -0,0 +1,135 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2015 The Kubernetes Authors All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Download the flannel, etcd, docker, bridge-utils and K8s binaries automatically
|
||||
# and store into binaries directory.
|
||||
# Run as sudoers only
|
||||
|
||||
# author @kevin-wangzefeng
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
readonly ROOT=$(dirname "${BASH_SOURCE}")
|
||||
source ${ROOT}/config-build.sh
|
||||
|
||||
# ensure $RELEASES_DIR is an absolute file path
|
||||
mkdir -p ${RELEASES_DIR}
|
||||
RELEASES_DIR=$(cd ${RELEASES_DIR}; pwd)
|
||||
|
||||
# get absolute file path of binaries
|
||||
BINARY_DIR=$(cd ${ROOT}; pwd)/binaries
|
||||
|
||||
function clean-up() {
|
||||
rm -rf ${RELEASES_DIR}
|
||||
rm -rf ${BINARY_DIR}
|
||||
}
|
||||
|
||||
function download-releases() {
|
||||
rm -rf ${RELEASES_DIR}
|
||||
mkdir -p ${RELEASES_DIR}
|
||||
|
||||
echo "Download flannel release v${FLANNEL_VERSION} ..."
|
||||
curl -L ${FLANNEL_DOWNLOAD_URL} -o ${RELEASES_DIR}/flannel.tar.gz
|
||||
|
||||
echo "Download etcd release v${ETCD_VERSION} ..."
|
||||
curl -L ${ETCD_DOWNLOAD_URL} -o ${RELEASES_DIR}/etcd.tar.gz
|
||||
|
||||
echo "Download kubernetes release v${K8S_VERSION} ..."
|
||||
curl -L ${K8S_DOWNLOAD_URL} -o ${RELEASES_DIR}/kubernetes.tar.gz
|
||||
|
||||
echo "Download docker-latest ..."
|
||||
curl -L https://get.docker.com/builds/Linux/x86_64/docker-latest -o ${RELEASES_DIR}/docker
|
||||
}
|
||||
|
||||
function unpack-releases() {
|
||||
rm -rf ${BINARY_DIR}
|
||||
mkdir -p ${BINARY_DIR}/master/bin
|
||||
mkdir -p ${BINARY_DIR}/node/bin
|
||||
|
||||
# flannel
|
||||
if [[ -f ${RELEASES_DIR}/flannel.tar.gz ]] ; then
|
||||
tar xzf ${RELEASES_DIR}/flannel.tar.gz -C ${RELEASES_DIR}
|
||||
cp ${RELEASES_DIR}/flannel-${FLANNEL_VERSION}/flanneld ${BINARY_DIR}/master/bin
|
||||
cp ${RELEASES_DIR}/flannel-${FLANNEL_VERSION}/flanneld ${BINARY_DIR}/node/bin
|
||||
fi
|
||||
|
||||
# ectd
|
||||
if [[ -f ${RELEASES_DIR}/etcd.tar.gz ]] ; then
|
||||
tar xzf ${RELEASES_DIR}/etcd.tar.gz -C ${RELEASES_DIR}
|
||||
ETCD="etcd-v${ETCD_VERSION}-linux-amd64"
|
||||
cp ${RELEASES_DIR}/$ETCD/etcd \
|
||||
${RELEASES_DIR}/$ETCD/etcdctl ${BINARY_DIR}/master/bin
|
||||
cp ${RELEASES_DIR}/$ETCD/etcd \
|
||||
${RELEASES_DIR}/$ETCD/etcdctl ${BINARY_DIR}/node/bin
|
||||
fi
|
||||
|
||||
# k8s
|
||||
if [[ -f ${RELEASES_DIR}/kubernetes.tar.gz ]] ; then
|
||||
tar xzf ${RELEASES_DIR}/kubernetes.tar.gz -C ${RELEASES_DIR}
|
||||
|
||||
pushd ${RELEASES_DIR}/kubernetes/server
|
||||
tar xzf kubernetes-server-linux-amd64.tar.gz
|
||||
popd
|
||||
cp ${RELEASES_DIR}/kubernetes/server/kubernetes/server/bin/kube-apiserver \
|
||||
${RELEASES_DIR}/kubernetes/server/kubernetes/server/bin/kube-controller-manager \
|
||||
${RELEASES_DIR}/kubernetes/server/kubernetes/server/bin/kube-scheduler ${BINARY_DIR}/master/bin
|
||||
|
||||
cp ${RELEASES_DIR}/kubernetes/server/kubernetes/server/bin/kubelet \
|
||||
${RELEASES_DIR}/kubernetes/server/kubernetes/server/bin/kube-proxy ${BINARY_DIR}/node/bin
|
||||
|
||||
cp ${RELEASES_DIR}/kubernetes/server/kubernetes/server/bin/kubectl ${BINARY_DIR}
|
||||
fi
|
||||
|
||||
if [[ -f ${RELEASES_DIR}/docker ]]; then
|
||||
cp ${RELEASES_DIR}/docker ${BINARY_DIR}/node/bin
|
||||
fi
|
||||
|
||||
chmod -R +x ${BINARY_DIR}
|
||||
echo "Done! All binaries are stored in ${BINARY_DIR}"
|
||||
}
|
||||
|
||||
function parse-opt() {
|
||||
local opt=${1-}
|
||||
|
||||
case $opt in
|
||||
download)
|
||||
download-releases
|
||||
;;
|
||||
unpack)
|
||||
unpack-releases
|
||||
;;
|
||||
clean)
|
||||
clean-up
|
||||
;;
|
||||
all)
|
||||
download-releases
|
||||
unpack-releases
|
||||
;;
|
||||
*)
|
||||
echo "Usage: "
|
||||
echo " build.sh <command>"
|
||||
echo "Commands:"
|
||||
echo " clean Clean up downloaded releases and unpacked binaries."
|
||||
echo " download Download releases to \"${RELEASES_DIR}\"."
|
||||
echo " unpack Unpack releases downloaded in \"${RELEASES_DIR}\", and copy binaries to \"${BINARY_DIR}\"."
|
||||
echo " all Download releases and unpack them."
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
parse-opt $@
|
||||
38
cluster/centos/config-build.sh
Executable file
38
cluster/centos/config-build.sh
Executable file
@@ -0,0 +1,38 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2015 The Kubernetes Authors All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
## Contains configuration values for the Binaries downloading and unpacking.
|
||||
|
||||
# Directory to store release packages that will be downloaded.
|
||||
RELEASES_DIR=${RELEASES_DIR:-/tmp/downloads}
|
||||
|
||||
# Define flannel version to use.
|
||||
FLANNEL_VERSION=${FLANNEL_VERSION:-"0.5.3"}
|
||||
|
||||
# Define etcd version to use.
|
||||
ETCD_VERSION=${ETCD_VERSION:-"2.0.12"}
|
||||
|
||||
# Define k8s version to use.
|
||||
K8S_VERSION=${K8S_VERSION:-"1.0.4"}
|
||||
|
||||
FLANNEL_DOWNLOAD_URL=\
|
||||
"https://github.com/coreos/flannel/releases/download/v${FLANNEL_VERSION}/flannel-${FLANNEL_VERSION}-linux-amd64.tar.gz"
|
||||
|
||||
ETCD_DOWNLOAD_URL=\
|
||||
"https://github.com/coreos/etcd/releases/download/v${ETCD_VERSION}/etcd-v${ETCD_VERSION}-linux-amd64.tar.gz"
|
||||
|
||||
K8S_DOWNLOAD_URL=\
|
||||
"https://github.com/kubernetes/kubernetes/releases/download/v${K8S_VERSION}/kubernetes.tar.gz"
|
||||
52
cluster/centos/config-default.sh
Executable file
52
cluster/centos/config-default.sh
Executable file
@@ -0,0 +1,52 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2015 The Kubernetes Authors All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
## Contains configuration values for the CentOS cluster
|
||||
# The user should have sudo privilege
|
||||
export MASTER=${MASTER:-"centos@172.10.0.11"}
|
||||
export MASTER_IP=${MASTER#*@}
|
||||
|
||||
# Define all your minion nodes,
|
||||
# And separated with blank space like <user_1@ip_1> <user_2@ip_2> <user_3@ip_3>.
|
||||
# The user should have sudo privilege
|
||||
export MINIONS=${MINIONS:-"centos@172.10.0.12 centos@172.10.0.13"}
|
||||
# If it practically impossible to set an array as an environment variable
|
||||
# from a script, so assume variable is a string then convert it to an array
|
||||
export MINIONS_ARRAY=($MINIONS)
|
||||
|
||||
# Number of nodes in your cluster.
|
||||
export NUM_MINIONS=${NUM_MINIONS:-2}
|
||||
|
||||
# By default, the cluster will use the etcd installed on master.
|
||||
export ETCD_SERVERS=${ETCD_SERVERS:-"http://$MASTER_IP:4001"}
|
||||
|
||||
# define the IP range used for service cluster IPs.
|
||||
# according to rfc 1918 ref: https://tools.ietf.org/html/rfc1918 choose a private ip range here.
|
||||
export SERVICE_CLUSTER_IP_RANGE=${SERVICE_CLUSTER_IP_RANGE:-"192.168.3.0/24"}
|
||||
|
||||
# define the IP range used for flannel overlay network, should not conflict with above SERVICE_CLUSTER_IP_RANGE
|
||||
export FLANNEL_NET=${FLANNEL_NET:-"172.16.0.0/16"}
|
||||
|
||||
# Admission Controllers to invoke prior to persisting objects in cluster
|
||||
export ADMISSION_CONTROL=NamespaceLifecycle,NamespaceExists,LimitRanger,ServiceAccount,ResourceQuota,SecurityContextDeny
|
||||
|
||||
# Extra options to set on the Docker command line.
|
||||
# This is useful for setting --insecure-registry for local registries.
|
||||
export DOCKER_OPTS=${DOCKER_OPTS:-""}
|
||||
|
||||
|
||||
# Timeouts for process checking on master and minion
|
||||
export PROCESS_CHECK_TIMEOUT=${PROCESS_CHECK_TIMEOUT:-180} # seconds.
|
||||
102
cluster/centos/master/scripts/apiserver.sh
Executable file
102
cluster/centos/master/scripts/apiserver.sh
Executable file
@@ -0,0 +1,102 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2014 The Kubernetes Authors All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
MASTER_ADDRESS=${1:-"8.8.8.18"}
|
||||
ETCD_SERVERS=${2:-"http://8.8.8.18:4001"}
|
||||
SERVICE_CLUSTER_IP_RANGE=${3:-"10.10.10.0/24"}
|
||||
ADMISSION_CONTROL=${4:-""}
|
||||
|
||||
cat <<EOF >/opt/kubernetes/cfg/kube-apiserver
|
||||
# --logtostderr=true: log to standard error instead of files
|
||||
KUBE_LOGTOSTDERR="--logtostderr=true"
|
||||
|
||||
# --v=0: log level for V logs
|
||||
KUBE_LOG_LEVEL="--v=4"
|
||||
|
||||
# --etcd-servers=[]: List of etcd servers to watch (http://ip:port),
|
||||
# comma separated. Mutually exclusive with -etcd-config
|
||||
KUBE_ETCD_SERVERS="--etcd-servers=${ETCD_SERVERS}"
|
||||
|
||||
# --address=127.0.0.1: DEPRECATED: see --insecure-bind-address instead
|
||||
KUBE_API_ADDRESS="--address=${MASTER_ADDRESS}"
|
||||
|
||||
# --port=8080: DEPRECATED: see --insecure-port instead
|
||||
KUBE_API_PORT="--port=8080"
|
||||
|
||||
# --kubelet-port=10250: Kubelet port
|
||||
MINION_PORT="--kubelet-port=10250"
|
||||
|
||||
# --allow-privileged=false: If true, allow privileged containers.
|
||||
KUBE_ALLOW_PRIV="--allow-privileged=false"
|
||||
|
||||
# --service-cluster-ip-range=<nil>: A CIDR notation IP range from which to assign service cluster IPs.
|
||||
# This must not overlap with any IP ranges assigned to nodes for pods.
|
||||
KUBE_SERVICE_ADDRESSES="--service-cluster-ip-range=${SERVICE_CLUSTER_IP_RANGE}"
|
||||
|
||||
# --admission-control="AlwaysAdmit": Ordered list of plug-ins
|
||||
# to do admission control of resources into cluster.
|
||||
# Comma-delimited list of:
|
||||
# LimitRanger, AlwaysDeny, SecurityContextDeny, NamespaceExists,
|
||||
# NamespaceLifecycle, NamespaceAutoProvision, DenyExecOnPrivileged,
|
||||
# AlwaysAdmit, ServiceAccount, ResourceQuota
|
||||
#KUBE_ADMISSION_CONTROL="--admission-control=\"${ADMISSION_CONTROL}\""
|
||||
|
||||
# --client-ca-file="": If set, any request presenting a client certificate signed
|
||||
# by one of the authorities in the client-ca-file is authenticated with an identity
|
||||
# corresponding to the CommonName of the client certificate.
|
||||
KUBE_API_CLIENT_CA_FILE="--client-ca-file=/srv/kubernetes/ca.crt"
|
||||
|
||||
# --tls-cert-file="": File containing x509 Certificate for HTTPS. (CA cert, if any,
|
||||
# concatenated after server cert). If HTTPS serving is enabled, and --tls-cert-file
|
||||
# and --tls-private-key-file are not provided, a self-signed certificate and key are
|
||||
# generated for the public address and saved to /var/run/kubernetes.
|
||||
KUBE_API_TLS_CERT_FILE="--tls-cert-file=/srv/kubernetes/server.cert"
|
||||
|
||||
# --tls-private-key-file="": File containing x509 private key matching --tls-cert-file.
|
||||
KUBE_API_TLS_PRIVATE_KEY_FILE="--tls-private-key-file=/srv/kubernetes/server.key"
|
||||
EOF
|
||||
|
||||
KUBE_APISERVER_OPTS=" \${KUBE_LOGTOSTDERR} \\
|
||||
\${KUBE_LOG_LEVEL} \\
|
||||
\${KUBE_ETCD_SERVERS} \\
|
||||
\${KUBE_API_ADDRESS} \\
|
||||
\${KUBE_API_PORT} \\
|
||||
\${MINION_PORT} \\
|
||||
\${KUBE_ALLOW_PRIV} \\
|
||||
\${KUBE_SERVICE_ADDRESSES} \\
|
||||
\${KUBE_API_CLIENT_CA_FILE} \\
|
||||
\${KUBE_API_TLS_CERT_FILE} \\
|
||||
\${KUBE_API_TLS_PRIVATE_KEY_FILE}"
|
||||
|
||||
|
||||
cat <<EOF >/usr/lib/systemd/system/kube-apiserver.service
|
||||
[Unit]
|
||||
Description=Kubernetes API Server
|
||||
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
|
||||
|
||||
[Service]
|
||||
EnvironmentFile=-/opt/kubernetes/cfg/kube-apiserver
|
||||
ExecStart=/opt/kubernetes/bin/kube-apiserver ${KUBE_APISERVER_OPTS}
|
||||
Restart=on-failure
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
EOF
|
||||
|
||||
systemctl daemon-reload
|
||||
systemctl enable kube-apiserver
|
||||
systemctl start kube-apiserver
|
||||
56
cluster/centos/master/scripts/controller-manager.sh
Executable file
56
cluster/centos/master/scripts/controller-manager.sh
Executable file
@@ -0,0 +1,56 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2014 The Kubernetes Authors All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
MASTER_ADDRESS=${1:-"8.8.8.18"}
|
||||
|
||||
cat <<EOF >/opt/kubernetes/cfg/kube-controller-manager
|
||||
KUBE_LOGTOSTDERR="--logtostderr=true"
|
||||
KUBE_LOG_LEVEL="--v=4"
|
||||
KUBE_MASTER="--master=${MASTER_ADDRESS}:8080"
|
||||
|
||||
# --root-ca-file="": If set, this root certificate authority will be included in
|
||||
# service account's token secret. This must be a valid PEM-encoded CA bundle.
|
||||
KUBE_CONTROLLER_MANAGER_ROOT_CA_FILE="--root-ca-file=/srv/kubernetes/ca.crt"
|
||||
|
||||
# --service-account-private-key-file="": Filename containing a PEM-encoded private
|
||||
# RSA key used to sign service account tokens.
|
||||
KUBE_CONTROLLER_MANAGER_SERVICE_ACCOUNT_PRIVATE_KEY_FILE="--service-account-private-key-file=/srv/kubernetes/server.key"
|
||||
EOF
|
||||
|
||||
KUBE_CONTROLLER_MANAGER_OPTS=" \${KUBE_LOGTOSTDERR} \\
|
||||
\${KUBE_LOG_LEVEL} \\
|
||||
\${KUBE_MASTER} \\
|
||||
\${KUBE_CONTROLLER_MANAGER_ROOT_CA_FILE} \\
|
||||
\${KUBE_CONTROLLER_MANAGER_SERVICE_ACCOUNT_PRIVATE_KEY_FILE}"
|
||||
|
||||
cat <<EOF >/usr/lib/systemd/system/kube-controller-manager.service
|
||||
[Unit]
|
||||
Description=Kubernetes Controller Manager
|
||||
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
|
||||
|
||||
[Service]
|
||||
EnvironmentFile=-/opt/kubernetes/cfg/kube-controller-manager
|
||||
ExecStart=/opt/kubernetes/bin/kube-controller-manager ${KUBE_CONTROLLER_MANAGER_OPTS}
|
||||
Restart=on-failure
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
EOF
|
||||
|
||||
systemctl daemon-reload
|
||||
systemctl enable kube-controller-manager
|
||||
systemctl start kube-controller-manager
|
||||
79
cluster/centos/master/scripts/etcd.sh
Executable file
79
cluster/centos/master/scripts/etcd.sh
Executable file
@@ -0,0 +1,79 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2014 The Kubernetes Authors All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
## Create etcd.conf, etcd.service, and start etcd service.
|
||||
|
||||
|
||||
etcd_data_dir=/var/lib/etcd/
|
||||
mkdir -p ${etcd_data_dir}
|
||||
|
||||
cat <<EOF >/opt/kubernetes/cfg/etcd.conf
|
||||
# [member]
|
||||
ETCD_NAME=default
|
||||
ETCD_DATA_DIR="${etcd_data_dir}/default.etcd"
|
||||
#ETCD_SNAPSHOT_COUNTER="10000"
|
||||
#ETCD_HEARTBEAT_INTERVAL="100"
|
||||
#ETCD_ELECTION_TIMEOUT="1000"
|
||||
#ETCD_LISTEN_PEER_URLS="http://localhost:2380,http://localhost:7001"
|
||||
ETCD_LISTEN_CLIENT_URLS="http://0.0.0.0:4001"
|
||||
#ETCD_MAX_SNAPSHOTS="5"
|
||||
#ETCD_MAX_WALS="5"
|
||||
#ETCD_CORS=""
|
||||
#
|
||||
#[cluster]
|
||||
#ETCD_INITIAL_ADVERTISE_PEER_URLS="http://localhost:2380,http://localhost:7001"
|
||||
# if you use different ETCD_NAME (e.g. test),
|
||||
# set ETCD_INITIAL_CLUSTER value for this name, i.e. "test=http://..."
|
||||
#ETCD_INITIAL_CLUSTER="default=http://localhost:2380,default=http://localhost:7001"
|
||||
#ETCD_INITIAL_CLUSTER_STATE="new"
|
||||
#ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
|
||||
ETCD_ADVERTISE_CLIENT_URLS="http://localhost:2379,http://localhost:4001"
|
||||
#ETCD_DISCOVERY=""
|
||||
#ETCD_DISCOVERY_SRV=""
|
||||
#ETCD_DISCOVERY_FALLBACK="proxy"
|
||||
#ETCD_DISCOVERY_PROXY=""
|
||||
#
|
||||
#[proxy]
|
||||
#ETCD_PROXY="off"
|
||||
#
|
||||
#[security]
|
||||
#ETCD_CA_FILE=""
|
||||
#ETCD_CERT_FILE=""
|
||||
#ETCD_KEY_FILE=""
|
||||
#ETCD_PEER_CA_FILE=""
|
||||
#ETCD_PEER_CERT_FILE=""
|
||||
#ETCD_PEER_KEY_FILE=""
|
||||
EOF
|
||||
|
||||
cat <<EOF >//usr/lib/systemd/system/etcd.service
|
||||
[Unit]
|
||||
Description=Etcd Server
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
WorkingDirectory=${etcd_data_dir}
|
||||
EnvironmentFile=-/opt/kubernetes/cfg/etcd.conf
|
||||
# set GOMAXPROCS to number of processors
|
||||
ExecStart=/bin/bash -c "GOMAXPROCS=\$(nproc) /opt/kubernetes/bin/etcd"
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
EOF
|
||||
|
||||
systemctl daemon-reload
|
||||
systemctl enable etcd
|
||||
systemctl start etcd
|
||||
58
cluster/centos/master/scripts/scheduler.sh
Executable file
58
cluster/centos/master/scripts/scheduler.sh
Executable file
@@ -0,0 +1,58 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2014 The Kubernetes Authors All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
MASTER_ADDRESS=${1:-"8.8.8.18"}
|
||||
|
||||
cat <<EOF >/opt/kubernetes/cfg/kube-scheduler
|
||||
###
|
||||
# kubernetes scheduler config
|
||||
|
||||
# --logtostderr=true: log to standard error instead of files
|
||||
KUBE_LOGTOSTDERR="--logtostderr=true"
|
||||
|
||||
# --v=0: log level for V logs
|
||||
KUBE_LOG_LEVEL="--v=4"
|
||||
|
||||
KUBE_MASTER="--master=${MASTER_ADDRESS}:8080"
|
||||
|
||||
# Add your own!
|
||||
KUBE_SCHEDULER_ARGS=""
|
||||
|
||||
EOF
|
||||
|
||||
KUBE_SCHEDULER_OPTS=" \${KUBE_LOGTOSTDERR} \\
|
||||
\${KUBE_LOG_LEVEL} \\
|
||||
\${KUBE_MASTER} \\
|
||||
\${KUBE_SCHEDULER_ARGS}"
|
||||
|
||||
cat <<EOF >/usr/lib/systemd/system/kube-scheduler.service
|
||||
[Unit]
|
||||
Description=Kubernetes Scheduler
|
||||
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
|
||||
|
||||
[Service]
|
||||
EnvironmentFile=-/opt/kubernetes/cfg/kube-scheduler
|
||||
ExecStart=/opt/kubernetes/bin/kube-scheduler ${KUBE_SCHEDULER_OPTS}
|
||||
Restart=on-failure
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
EOF
|
||||
|
||||
systemctl daemon-reload
|
||||
systemctl enable kube-scheduler
|
||||
systemctl start kube-scheduler
|
||||
108
cluster/centos/node/bin/mk-docker-opts.sh
Executable file
108
cluster/centos/node/bin/mk-docker-opts.sh
Executable file
@@ -0,0 +1,108 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2014 The Kubernetes Authors All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Generate Docker daemon options based on flannel env file.
|
||||
|
||||
# exit on any error
|
||||
set -e
|
||||
|
||||
usage() {
|
||||
echo "$0 [-f FLANNEL-ENV-FILE] [-d DOCKER-ENV-FILE] [-i] [-c] [-m] [-k COMBINED-KEY]
|
||||
|
||||
Generate Docker daemon options based on flannel env file
|
||||
OPTIONS:
|
||||
-f Path to flannel env file. Defaults to /run/flannel/subnet.env
|
||||
-d Path to Docker env file to write to. Defaults to /run/docker_opts.env
|
||||
-i Output each Docker option as individual var. e.g. DOCKER_OPT_MTU=1500
|
||||
-c Output combined Docker options into DOCKER_OPTS var
|
||||
-k Set the combined options key to this value (default DOCKER_OPTS=)
|
||||
-m Do not output --ip-masq (useful for older Docker version)
|
||||
" >/dev/stderr
|
||||
exit 1
|
||||
}
|
||||
|
||||
flannel_env="/run/flannel/subnet.env"
|
||||
docker_env="/run/docker_opts.env"
|
||||
combined_opts_key="DOCKER_OPTS"
|
||||
indiv_opts=false
|
||||
combined_opts=false
|
||||
ipmasq=true
|
||||
|
||||
while getopts "f:d:ick:" opt; do
|
||||
case $opt in
|
||||
f)
|
||||
flannel_env=$OPTARG
|
||||
;;
|
||||
d)
|
||||
docker_env=$OPTARG
|
||||
;;
|
||||
i)
|
||||
indiv_opts=true
|
||||
;;
|
||||
c)
|
||||
combined_opts=true
|
||||
;;
|
||||
m)
|
||||
ipmasq=false
|
||||
;;
|
||||
k)
|
||||
combined_opts_key=$OPTARG
|
||||
;;
|
||||
\?)
|
||||
usage
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
if [[ $indiv_opts = false ]] && [[ $combined_opts = false ]]; then
|
||||
indiv_opts=true
|
||||
combined_opts=true
|
||||
fi
|
||||
|
||||
if [[ -f "$flannel_env" ]]; then
|
||||
source $flannel_env
|
||||
fi
|
||||
|
||||
if [[ -n "$FLANNEL_SUBNET" ]]; then
|
||||
DOCKER_OPT_BIP="--bip=$FLANNEL_SUBNET"
|
||||
fi
|
||||
|
||||
if [[ -n "$FLANNEL_MTU" ]]; then
|
||||
DOCKER_OPT_MTU="--mtu=$FLANNEL_MTU"
|
||||
fi
|
||||
|
||||
if [[ "$FLANNEL_IPMASQ" = true ]] && [[ $ipmasq = true ]]; then
|
||||
DOCKER_OPT_IPMASQ="--ip-masq=false"
|
||||
fi
|
||||
|
||||
eval docker_opts="\$${combined_opts_key}"
|
||||
docker_opts+=" "
|
||||
|
||||
echo -n "" >$docker_env
|
||||
for opt in $(compgen -v DOCKER_OPT_); do
|
||||
eval val=\$$opt
|
||||
|
||||
if [[ "$indiv_opts" = true ]]; then
|
||||
echo "$opt=\"$val\"" >>$docker_env
|
||||
fi
|
||||
|
||||
docker_opts+="$val "
|
||||
done
|
||||
|
||||
if [[ "$combined_opts" = true ]]; then
|
||||
echo "${combined_opts_key}=\"${docker_opts}\"" >>$docker_env
|
||||
fi
|
||||
|
||||
27
cluster/centos/node/bin/remove-docker0.sh
Executable file
27
cluster/centos/node/bin/remove-docker0.sh
Executable file
@@ -0,0 +1,27 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2014 The Kubernetes Authors All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Delete default docker bridge, so that docker can start with flannel network.
|
||||
|
||||
# exit on any error
|
||||
set -e
|
||||
|
||||
rc=0
|
||||
ip link show docker0 >/dev/null 2>&1 || rc="$?"
|
||||
if [[ "$rc" -eq "0" ]]; then
|
||||
ip link set dev docker0 down
|
||||
ip link delete docker0
|
||||
fi
|
||||
49
cluster/centos/node/scripts/docker.sh
Executable file
49
cluster/centos/node/scripts/docker.sh
Executable file
@@ -0,0 +1,49 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2014 The Kubernetes Authors All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
DOCKER_OPTS=${1:-""}
|
||||
|
||||
DOCKER_CONFIG=/opt/kubernetes/cfg/docker
|
||||
|
||||
cat <<EOF >$DOCKER_CONFIG
|
||||
DOCKER_OPTS="-H tcp://127.0.0.1:4243 -H unix:///var/run/docker.sock -s devicemapper --selinux-enabled=false ${DOCKER_OPTS}"
|
||||
EOF
|
||||
|
||||
cat <<EOF >/usr/lib/systemd/system/docker.service
|
||||
[Unit]
|
||||
Description=Docker Application Container Engine
|
||||
Documentation=http://docs.docker.com
|
||||
After=network.target flannel.service
|
||||
Requires=flannel.service
|
||||
|
||||
[Service]
|
||||
Type=notify
|
||||
EnvironmentFile=-/run/flannel/docker
|
||||
EnvironmentFile=-/opt/kubernetes/cfg/docker
|
||||
WorkingDirectory=/opt/kubernetes/bin
|
||||
ExecStartPre=/opt/kubernetes/bin/remove-docker0.sh
|
||||
ExecStart=/opt/kubernetes/bin/docker daemon \$DOCKER_OPT_BIP \$DOCKER_OPT_MTU \$DOCKER_OPTS
|
||||
LimitNOFILE=1048576
|
||||
LimitNPROC=1048576
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
EOF
|
||||
|
||||
systemctl daemon-reload
|
||||
systemctl enable docker
|
||||
systemctl start docker
|
||||
66
cluster/centos/node/scripts/flannel.sh
Executable file
66
cluster/centos/node/scripts/flannel.sh
Executable file
@@ -0,0 +1,66 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2014 The Kubernetes Authors All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
ETCD_SERVERS=${1:-"http://8.8.8.18:4001"}
|
||||
FLANNEL_NET=${2:-"172.16.0.0/16"}
|
||||
|
||||
|
||||
cat <<EOF >/opt/kubernetes/cfg/flannel
|
||||
FLANNEL_ETCD="-etcd-endpoints=${ETCD_SERVERS}"
|
||||
FLANNEL_ETCD_KEY="-etcd-prefix=/coreos.com/network"
|
||||
EOF
|
||||
|
||||
cat <<EOF >/usr/lib/systemd/system/flannel.service
|
||||
[Unit]
|
||||
Description=Flanneld overlay address etcd agent
|
||||
After=network.target
|
||||
Before=docker.service
|
||||
|
||||
[Service]
|
||||
EnvironmentFile=-/opt/kubernetes/cfg/flannel
|
||||
ExecStart=/opt/kubernetes/bin/flanneld \${FLANNEL_ETCD} \${FLANNEL_ETCD_KEY}
|
||||
ExecStartPost=/opt/kubernetes/bin/mk-docker-opts.sh -d /run/flannel/docker
|
||||
|
||||
Type=notify
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
RequiredBy=docker.service
|
||||
EOF
|
||||
|
||||
# Store FLANNEL_NET to etcd.
|
||||
attempt=0
|
||||
while true; do
|
||||
/opt/kubernetes/bin/etcdctl --no-sync -C ${ETCD_SERVERS} \
|
||||
get /coreos.com/network/config >/dev/null 2>&1
|
||||
if [[ "$?" == 0 ]]; then
|
||||
break
|
||||
else
|
||||
if (( attempt > 600 )); then
|
||||
echo "timeout for waiting network config" > ~/kube/err.log
|
||||
exit 2
|
||||
fi
|
||||
|
||||
/opt/kubernetes/bin/etcdctl --no-sync -C ${ETCD_SERVERS} \
|
||||
mk /coreos.com/network/config "{\"Network\":\"${FLANNEL_NET}\"}" >/dev/null 2>&1
|
||||
attempt=$((attempt+1))
|
||||
sleep 3
|
||||
fi
|
||||
done
|
||||
wait
|
||||
|
||||
systemctl daemon-reload
|
||||
75
cluster/centos/node/scripts/kubelet.sh
Executable file
75
cluster/centos/node/scripts/kubelet.sh
Executable file
@@ -0,0 +1,75 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2014 The Kubernetes Authors All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
MASTER_ADDRESS=${1:-"8.8.8.18"}
|
||||
NODE_ADDRESS=${2:-"8.8.8.20"}
|
||||
|
||||
|
||||
cat <<EOF >/opt/kubernetes/cfg/kubelet
|
||||
# --logtostderr=true: log to standard error instead of files
|
||||
KUBE_LOGTOSTDERR="--logtostderr=true"
|
||||
|
||||
# --v=0: log level for V logs
|
||||
KUBE_LOG_LEVEL="--v=4"
|
||||
|
||||
# --address=0.0.0.0: The IP address for the Kubelet to serve on (set to 0.0.0.0 for all interfaces)
|
||||
MINION_ADDRESS="--address=${NODE_ADDRESS}"
|
||||
|
||||
# --port=10250: The port for the Kubelet to serve on. Note that "kubectl logs" will not work if you set this flag.
|
||||
MINION_PORT="--port=10250"
|
||||
|
||||
# --hostname-override="": If non-empty, will use this string as identification instead of the actual hostname.
|
||||
MINION_HOSTNAME="--hostname-override=${NODE_ADDRESS}"
|
||||
|
||||
# --api-servers=[]: List of Kubernetes API servers for publishing events,
|
||||
# and reading pods and services. (ip:port), comma separated.
|
||||
KUBELET_API_SERVER="--api-servers=${MASTER_ADDRESS}:8080"
|
||||
|
||||
# --allow-privileged=false: If true, allow containers to request privileged mode. [default=false]
|
||||
KUBE_ALLOW_PRIV="--allow-privileged=false"
|
||||
|
||||
# Add your own!
|
||||
KUBELET_ARGS=""
|
||||
EOF
|
||||
|
||||
KUBE_PROXY_OPTS=" \${KUBE_LOGTOSTDERR} \\
|
||||
\${KUBE_LOG_LEVEL} \\
|
||||
\${MINION_ADDRESS} \\
|
||||
\${MINION_PORT} \\
|
||||
\${MINION_HOSTNAME} \\
|
||||
\${KUBELET_API_SERVER} \\
|
||||
\${KUBE_ALLOW_PRIV} \\
|
||||
\${KUBELET_ARGS}"
|
||||
|
||||
cat <<EOF >/usr/lib/systemd/system/kubelet.service
|
||||
[Unit]
|
||||
Description=Kubernetes Kubelet
|
||||
After=docker.service
|
||||
Requires=docker.service
|
||||
|
||||
[Service]
|
||||
EnvironmentFile=-/opt/kubernetes/cfg/kubelet
|
||||
ExecStart=/opt/kubernetes/bin/kubelet ${KUBE_PROXY_OPTS}
|
||||
Restart=on-failure
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
EOF
|
||||
|
||||
systemctl daemon-reload
|
||||
systemctl enable kubelet
|
||||
systemctl start kubelet
|
||||
51
cluster/centos/node/scripts/proxy.sh
Executable file
51
cluster/centos/node/scripts/proxy.sh
Executable file
@@ -0,0 +1,51 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2014 The Kubernetes Authors All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
MASTER_ADDRESS=${1:-"8.8.8.18"}
|
||||
|
||||
cat <<EOF >/opt/kubernetes/cfg/kube-proxy
|
||||
# --logtostderr=true: log to standard error instead of files
|
||||
KUBE_LOGTOSTDERR="--logtostderr=true"
|
||||
|
||||
# --v=0: log level for V logs
|
||||
KUBE_LOG_LEVEL="--v=4"
|
||||
|
||||
# --master="": The address of the Kubernetes API server (overrides any value in kubeconfig)
|
||||
KUBE_MASTER="--master=http://${MASTER_ADDRESS}:8080"
|
||||
EOF
|
||||
|
||||
KUBE_PROXY_OPTS=" \${KUBE_LOGTOSTDERR} \\
|
||||
\${KUBE_LOG_LEVEL} \\
|
||||
\${KUBE_MASTER}"
|
||||
|
||||
cat <<EOF >/usr/lib/systemd/system/kube-proxy.service
|
||||
[Unit]
|
||||
Description=Kubernetes Proxy
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
EnvironmentFile=-/opt/kubernetes/cfg/kube-proxy
|
||||
ExecStart=/opt/kubernetes/bin/kube-proxy ${KUBE_PROXY_OPTS}
|
||||
Restart=on-failure
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
EOF
|
||||
|
||||
systemctl daemon-reload
|
||||
systemctl enable kube-proxy
|
||||
systemctl start kube-proxy
|
||||
315
cluster/centos/util.sh
Executable file
315
cluster/centos/util.sh
Executable file
@@ -0,0 +1,315 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2015 The Kubernetes Authors All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# A library of helper functions that each provider hosting Kubernetes must implement to use cluster/kube-*.sh scripts.
|
||||
|
||||
# exit on any error
|
||||
set -e
|
||||
|
||||
SSH_OPTS="-oStrictHostKeyChecking=no -oUserKnownHostsFile=/dev/null -oLogLevel=ERROR"
|
||||
|
||||
# Use the config file specified in $KUBE_CONFIG_FILE, or default to
|
||||
# config-default.sh.
|
||||
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
|
||||
readonly ROOT=$(dirname "${BASH_SOURCE}")
|
||||
source "${ROOT}/${KUBE_CONFIG_FILE:-"config-default.sh"}"
|
||||
source "$KUBE_ROOT/cluster/common.sh"
|
||||
|
||||
|
||||
KUBECTL_PATH=${KUBE_ROOT}/cluster/centos/binaries/kubectl
|
||||
|
||||
# Directory to be used for master and minion provisioning.
|
||||
KUBE_TEMP="~/kube_temp"
|
||||
|
||||
|
||||
# Must ensure that the following ENV vars are set
|
||||
function detect-master() {
|
||||
KUBE_MASTER=$MASTER
|
||||
KUBE_MASTER_IP=${MASTER#*@}
|
||||
echo "KUBE_MASTER_IP: ${KUBE_MASTER_IP}" 1>&2
|
||||
echo "KUBE_MASTER: ${MASTER}" 1>&2
|
||||
}
|
||||
|
||||
# Get minion IP addresses and store in KUBE_MINION_IP_ADDRESSES[]
|
||||
function detect-minions() {
|
||||
KUBE_MINION_IP_ADDRESSES=()
|
||||
for minion in ${MINIONS}; do
|
||||
KUBE_MINION_IP_ADDRESSES+=("${minion#*@}")
|
||||
done
|
||||
echo "KUBE_MINION_IP_ADDRESSES: [${KUBE_MINION_IP_ADDRESSES[*]}]" 1>&2
|
||||
}
|
||||
|
||||
# Verify prereqs on host machine
|
||||
function verify-prereqs() {
|
||||
local rc
|
||||
rc=0
|
||||
ssh-add -L 1> /dev/null 2> /dev/null || rc="$?"
|
||||
# "Could not open a connection to your authentication agent."
|
||||
if [[ "${rc}" -eq 2 ]]; then
|
||||
eval "$(ssh-agent)" > /dev/null
|
||||
trap-add "kill ${SSH_AGENT_PID}" EXIT
|
||||
fi
|
||||
rc=0
|
||||
ssh-add -L 1> /dev/null 2> /dev/null || rc="$?"
|
||||
# "The agent has no identities."
|
||||
if [[ "${rc}" -eq 1 ]]; then
|
||||
# Try adding one of the default identities, with or without passphrase.
|
||||
ssh-add || true
|
||||
fi
|
||||
rc=0
|
||||
# Expect at least one identity to be available.
|
||||
if ! ssh-add -L 1> /dev/null 2> /dev/null; then
|
||||
echo "Could not find or add an SSH identity."
|
||||
echo "Please start ssh-agent, add your identity, and retry."
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Install handler for signal trap
|
||||
function trap-add {
|
||||
local handler="$1"
|
||||
local signal="${2-EXIT}"
|
||||
local cur
|
||||
|
||||
cur="$(eval "sh -c 'echo \$3' -- $(trap -p ${signal})")"
|
||||
if [[ -n "${cur}" ]]; then
|
||||
handler="${cur}; ${handler}"
|
||||
fi
|
||||
|
||||
trap "${handler}" ${signal}
|
||||
}
|
||||
|
||||
# Validate a kubernetes cluster
|
||||
function validate-cluster() {
|
||||
# by default call the generic validate-cluster.sh script, customizable by
|
||||
# any cluster provider if this does not fit.
|
||||
"${KUBE_ROOT}/cluster/validate-cluster.sh"
|
||||
}
|
||||
|
||||
# Instantiate a kubernetes cluster
|
||||
function kube-up() {
|
||||
provision-master
|
||||
|
||||
for minion in ${MINIONS}; do
|
||||
provision-minion ${minion}
|
||||
done
|
||||
|
||||
verify-master
|
||||
for minion in ${MINIONS}; do
|
||||
verify-minion ${minion}
|
||||
done
|
||||
|
||||
detect-master
|
||||
|
||||
# set CONTEXT and KUBE_SERVER values for create-kubeconfig() and get-password()
|
||||
export CONTEXT="centos"
|
||||
export KUBE_SERVER="http://${KUBE_MASTER_IP}:8080"
|
||||
source "${KUBE_ROOT}/cluster/common.sh"
|
||||
|
||||
# set kubernetes user and password
|
||||
get-password
|
||||
create-kubeconfig
|
||||
}
|
||||
|
||||
# Delete a kubernetes cluster
|
||||
function kube-down() {
|
||||
tear-down-master
|
||||
for minion in ${MINIONS}; do
|
||||
tear-down-minion ${minion}
|
||||
done
|
||||
}
|
||||
|
||||
|
||||
function verify-master() {
|
||||
# verify master has all required daemons
|
||||
printf "[INFO] Validating master ${MASTER}"
|
||||
local -a required_daemon=("kube-apiserver" "kube-controller-manager" "kube-scheduler")
|
||||
local validated="1"
|
||||
local try_count=0
|
||||
until [[ "$validated" == "0" ]]; do
|
||||
validated="0"
|
||||
local daemon
|
||||
for daemon in "${required_daemon[@]}"; do
|
||||
local rc=0
|
||||
kube-ssh "${MASTER}" "sudo pgrep -f ${daemon}" >/dev/null 2>&1 || rc="$?"
|
||||
if [[ "${rc}" -ne "0" ]]; then
|
||||
printf "."
|
||||
validated="1"
|
||||
((try_count=try_count+2))
|
||||
if [[ ${try_count} -gt ${PROCESS_CHECK_TIMEOUT} ]]; then
|
||||
printf "\nWarning: Process \"${daemon}\" failed to run on ${MASTER}, please check.\n"
|
||||
exit 1
|
||||
fi
|
||||
sleep 2
|
||||
fi
|
||||
done
|
||||
done
|
||||
printf "\n"
|
||||
|
||||
}
|
||||
|
||||
function verify-minion() {
|
||||
# verify minion has all required daemons
|
||||
printf "[INFO] Validating minion ${1}"
|
||||
local -a required_daemon=("kube-proxy" "kubelet" "docker")
|
||||
local validated="1"
|
||||
local try_count=0
|
||||
until [[ "$validated" == "0" ]]; do
|
||||
validated="0"
|
||||
local daemon
|
||||
for daemon in "${required_daemon[@]}"; do
|
||||
local rc=0
|
||||
kube-ssh "${1}" "sudo pgrep -f ${daemon}" >/dev/null 2>&1 || rc="$?"
|
||||
if [[ "${rc}" -ne "0" ]]; then
|
||||
printf "."
|
||||
validated="1"
|
||||
((try_count=try_count+2))
|
||||
if [[ ${try_count} -gt ${PROCESS_CHECK_TIMEOUT} ]] ; then
|
||||
printf "\nWarning: Process \"${daemon}\" failed to run on ${1}, please check.\n"
|
||||
exit 1
|
||||
fi
|
||||
sleep 2
|
||||
fi
|
||||
done
|
||||
done
|
||||
printf "\n"
|
||||
}
|
||||
|
||||
# Clean up on master
|
||||
function tear-down-master() {
|
||||
echo "[INFO] tear-down-master on ${MASTER}"
|
||||
for service_name in etcd kube-apiserver kube-controller-manager kube-scheduler ; do
|
||||
service_file="/usr/lib/systemd/system/${service_name}.service"
|
||||
kube-ssh "$MASTER" " \
|
||||
if [[ -f $service_file ]]; then \
|
||||
sudo systemctl stop $service_name; \
|
||||
sudo systemctl disable $service_name; \
|
||||
sudo rm -f $service_file; \
|
||||
fi"
|
||||
done
|
||||
kube-ssh "${MASTER}" "sudo rm -rf /opt/kubernetes"
|
||||
kube-ssh "${MASTER}" "sudo rm -rf ${KUBE_TEMP}"
|
||||
kube-ssh "${MASTER}" "sudo rm -rf /var/lib/etcd"
|
||||
}
|
||||
|
||||
# Clean up on minion
|
||||
function tear-down-minion() {
|
||||
echo "[INFO] tear-down-minion on $1"
|
||||
for service_name in kube-proxy kubelet docker flannel ; do
|
||||
service_file="/usr/lib/systemd/system/${service_name}.service"
|
||||
kube-ssh "$1" " \
|
||||
if [[ -f $service_file ]]; then \
|
||||
sudo systemctl stop $service_name; \
|
||||
sudo systemctl disable $service_name; \
|
||||
sudo rm -f $service_file; \
|
||||
fi"
|
||||
done
|
||||
kube-ssh "$1" "sudo rm -rf /run/flannel"
|
||||
kube-ssh "$1" "sudo rm -rf /opt/kubernetes"
|
||||
kube-ssh "$1" "sudo rm -rf ${KUBE_TEMP}"
|
||||
}
|
||||
|
||||
# Provision master
|
||||
#
|
||||
# Assumed vars:
|
||||
# MASTER
|
||||
# KUBE_TEMP
|
||||
# ETCD_SERVERS
|
||||
# SERVICE_CLUSTER_IP_RANGE
|
||||
function provision-master() {
|
||||
echo "[INFO] Provision master on ${MASTER}"
|
||||
local master_ip=${MASTER#*@}
|
||||
ensure-setup-dir ${MASTER}
|
||||
|
||||
# scp -r ${SSH_OPTS} master config-default.sh copy-files.sh util.sh "${MASTER}:${KUBE_TEMP}"
|
||||
kube-scp ${MASTER} "${ROOT}/../saltbase/salt/generate-cert/make-ca-cert.sh ${ROOT}/binaries/master ${ROOT}/master ${ROOT}/config-default.sh ${ROOT}/util.sh" "${KUBE_TEMP}"
|
||||
kube-ssh "${MASTER}" " \
|
||||
sudo cp -r ${KUBE_TEMP}/master/bin /opt/kubernetes; \
|
||||
sudo chmod -R +x /opt/kubernetes/bin; \
|
||||
sudo bash ${KUBE_TEMP}/make-ca-cert.sh ${master_ip} IP:${master_ip},IP:${SERVICE_CLUSTER_IP_RANGE%.*}.1,DNS:kubernetes,DNS:kubernetes.default,DNS:kubernetes.default.svc,DNS:kubernetes.default.svc.cluster.local; \
|
||||
sudo bash ${KUBE_TEMP}/master/scripts/etcd.sh; \
|
||||
sudo bash ${KUBE_TEMP}/master/scripts/apiserver.sh ${master_ip} ${ETCD_SERVERS} ${SERVICE_CLUSTER_IP_RANGE} ${ADMISSION_CONTROL}; \
|
||||
sudo bash ${KUBE_TEMP}/master/scripts/controller-manager.sh ${master_ip}; \
|
||||
sudo bash ${KUBE_TEMP}/master/scripts/scheduler.sh ${master_ip}"
|
||||
}
|
||||
|
||||
|
||||
# Provision minion
|
||||
#
|
||||
# Assumed vars:
|
||||
# $1 (minion)
|
||||
# MASTER
|
||||
# KUBE_TEMP
|
||||
# ETCD_SERVERS
|
||||
# FLANNEL_NET
|
||||
# DOCKER_OPTS
|
||||
function provision-minion() {
|
||||
echo "[INFO] Provision minion on $1"
|
||||
local master_ip=${MASTER#*@}
|
||||
local minion=$1
|
||||
local minion_ip=${minion#*@}
|
||||
ensure-setup-dir ${minion}
|
||||
|
||||
# scp -r ${SSH_OPTS} minion config-default.sh copy-files.sh util.sh "${minion_ip}:${KUBE_TEMP}"
|
||||
kube-scp ${minion} "${ROOT}/binaries/node ${ROOT}/node ${ROOT}/config-default.sh ${ROOT}/util.sh" ${KUBE_TEMP}
|
||||
kube-ssh "${minion}" " \
|
||||
sudo cp -r ${KUBE_TEMP}/node/bin /opt/kubernetes; \
|
||||
sudo chmod -R +x /opt/kubernetes/bin; \
|
||||
sudo bash ${KUBE_TEMP}/node/scripts/flannel.sh ${ETCD_SERVERS} ${FLANNEL_NET}; \
|
||||
sudo bash ${KUBE_TEMP}/node/scripts/docker.sh \"${DOCKER_OPTS}\"; \
|
||||
sudo bash ${KUBE_TEMP}/node/scripts/kubelet.sh ${master_ip} ${minion_ip}; \
|
||||
sudo bash ${KUBE_TEMP}/node/scripts/proxy.sh ${master_ip}"
|
||||
}
|
||||
|
||||
# Create dirs that'll be used during setup on target machine.
|
||||
#
|
||||
# Assumed vars:
|
||||
# KUBE_TEMP
|
||||
function ensure-setup-dir() {
|
||||
kube-ssh "${1}" "mkdir -p ${KUBE_TEMP}; \
|
||||
sudo mkdir -p /opt/kubernetes/bin; \
|
||||
sudo mkdir -p /opt/kubernetes/cfg"
|
||||
}
|
||||
|
||||
# Run command over ssh
|
||||
function kube-ssh() {
|
||||
local host="$1"
|
||||
shift
|
||||
ssh ${SSH_OPTS} -t "${host}" "$@" >/dev/null 2>&1
|
||||
}
|
||||
|
||||
# Copy file recursively over ssh
|
||||
function kube-scp() {
|
||||
local host="$1"
|
||||
local src=($2)
|
||||
local dst="$3"
|
||||
scp -r ${SSH_OPTS} ${src[*]} "${host}:${dst}"
|
||||
}
|
||||
|
||||
# Ensure that we have a password created for validating to the master. Will
|
||||
# read from kubeconfig if available.
|
||||
#
|
||||
# Vars set:
|
||||
# KUBE_USER
|
||||
# KUBE_PASSWORD
|
||||
function get-password {
|
||||
get-kubeconfig-basicauth
|
||||
if [[ -z "${KUBE_USER}" || -z "${KUBE_PASSWORD}" ]]; then
|
||||
KUBE_USER=admin
|
||||
KUBE_PASSWORD=$(python -c 'import string,random; \
|
||||
print "".join(random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(16))')
|
||||
fi
|
||||
}
|
||||
@@ -58,7 +58,7 @@ ENABLE_CLUSTER_MONITORING="${KUBE_ENABLE_CLUSTER_MONITORING:-influxdb}"
|
||||
TEST_CLUSTER_LOG_LEVEL="${TEST_CLUSTER_LOG_LEVEL:---v=4}"
|
||||
|
||||
KUBELET_TEST_ARGS="--max-pods=100 $TEST_CLUSTER_LOG_LEVEL"
|
||||
APISERVER_TEST_ARGS="${TEST_CLUSTER_LOG_LEVEL}"
|
||||
APISERVER_TEST_ARGS="--runtime-config=experimental/v1 ${TEST_CLUSTER_LOG_LEVEL}"
|
||||
CONTROLLER_MANAGER_TEST_ARGS="${TEST_CLUSTER_LOG_LEVEL}"
|
||||
SCHEDULER_TEST_ARGS="${TEST_CLUSTER_LOG_LEVEL}"
|
||||
KUBEPROXY_TEST_ARGS="${TEST_CLUSTER_LOG_LEVEL}"
|
||||
|
||||
@@ -11,7 +11,7 @@ spec:
|
||||
limits:
|
||||
cpu: 100m
|
||||
args:
|
||||
- -qq
|
||||
- -q
|
||||
volumeMounts:
|
||||
- name: varlog
|
||||
mountPath: /var/log
|
||||
|
||||
@@ -19,6 +19,7 @@ spec:
|
||||
mountPath: /varlog
|
||||
- name: containers
|
||||
mountPath: /var/lib/docker/containers
|
||||
readOnly: true
|
||||
terminationGracePeriodSeconds: 30
|
||||
volumes:
|
||||
- name: varlog
|
||||
|
||||
@@ -30,7 +30,7 @@ import (
|
||||
_ "k8s.io/kubernetes/pkg/expapi"
|
||||
_ "k8s.io/kubernetes/pkg/expapi/v1"
|
||||
pkg_runtime "k8s.io/kubernetes/pkg/runtime"
|
||||
"k8s.io/kubernetes/pkg/util"
|
||||
"k8s.io/kubernetes/pkg/util/sets"
|
||||
|
||||
"github.com/golang/glog"
|
||||
flag "github.com/spf13/pflag"
|
||||
@@ -84,7 +84,7 @@ func main() {
|
||||
glog.Errorf("error while generating conversion functions for %v: %v", knownType, err)
|
||||
}
|
||||
}
|
||||
generator.RepackImports(util.NewStringSet())
|
||||
generator.RepackImports(sets.NewString())
|
||||
if err := generator.WriteImports(data); err != nil {
|
||||
glog.Fatalf("error while writing imports: %v", err)
|
||||
}
|
||||
|
||||
@@ -30,7 +30,7 @@ import (
|
||||
_ "k8s.io/kubernetes/pkg/expapi"
|
||||
_ "k8s.io/kubernetes/pkg/expapi/v1"
|
||||
pkg_runtime "k8s.io/kubernetes/pkg/runtime"
|
||||
"k8s.io/kubernetes/pkg/util"
|
||||
"k8s.io/kubernetes/pkg/util/sets"
|
||||
|
||||
"github.com/golang/glog"
|
||||
flag "github.com/spf13/pflag"
|
||||
@@ -80,7 +80,7 @@ func main() {
|
||||
}
|
||||
|
||||
versionPath := path.Join(pkgBase, group, version)
|
||||
generator := pkg_runtime.NewDeepCopyGenerator(api.Scheme.Raw(), versionPath, util.NewStringSet("k8s.io/kubernetes"))
|
||||
generator := pkg_runtime.NewDeepCopyGenerator(api.Scheme.Raw(), versionPath, sets.NewString("k8s.io/kubernetes"))
|
||||
generator.AddImport(path.Join(pkgBase, "api"))
|
||||
|
||||
if len(*overwrites) > 0 {
|
||||
|
||||
@@ -55,6 +55,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/master"
|
||||
"k8s.io/kubernetes/pkg/tools/etcdtest"
|
||||
"k8s.io/kubernetes/pkg/util"
|
||||
"k8s.io/kubernetes/pkg/util/sets"
|
||||
"k8s.io/kubernetes/pkg/util/wait"
|
||||
"k8s.io/kubernetes/pkg/volume/empty_dir"
|
||||
"k8s.io/kubernetes/plugin/pkg/admission/admit"
|
||||
@@ -204,7 +205,29 @@ func startComponents(firstManifestURL, secondManifestURL string) (string, string
|
||||
configFilePath := makeTempDirOrDie("config", testRootDir)
|
||||
glog.Infof("Using %s as root dir for kubelet #1", testRootDir)
|
||||
fakeDocker1.VersionInfo = docker.Env{"ApiVersion=1.15"}
|
||||
kcfg := kubeletapp.SimpleKubelet(cl, &fakeDocker1, "localhost", testRootDir, firstManifestURL, "127.0.0.1", 10250, api.NamespaceDefault, empty_dir.ProbeVolumePlugins(), nil, cadvisorInterface, configFilePath, nil, kubecontainer.FakeOS{})
|
||||
|
||||
kcfg := kubeletapp.SimpleKubelet(
|
||||
cl,
|
||||
&fakeDocker1,
|
||||
"localhost",
|
||||
testRootDir,
|
||||
firstManifestURL,
|
||||
"127.0.0.1",
|
||||
10250, /* KubeletPort */
|
||||
0, /* ReadOnlyPort */
|
||||
api.NamespaceDefault,
|
||||
empty_dir.ProbeVolumePlugins(),
|
||||
nil,
|
||||
cadvisorInterface,
|
||||
configFilePath,
|
||||
nil,
|
||||
kubecontainer.FakeOS{},
|
||||
1*time.Second, /* FileCheckFrequency */
|
||||
1*time.Second, /* HTTPCheckFrequency */
|
||||
10*time.Second, /* MinimumGCAge */
|
||||
3*time.Second, /* NodeStatusUpdateFrequency */
|
||||
10*time.Second /* SyncFrequency */)
|
||||
|
||||
kubeletapp.RunKubelet(kcfg, nil)
|
||||
// Kubelet (machine)
|
||||
// Create a second kubelet so that the guestbook example's two redis slaves both
|
||||
@@ -212,7 +235,29 @@ func startComponents(firstManifestURL, secondManifestURL string) (string, string
|
||||
testRootDir = makeTempDirOrDie("kubelet_integ_2.", "")
|
||||
glog.Infof("Using %s as root dir for kubelet #2", testRootDir)
|
||||
fakeDocker2.VersionInfo = docker.Env{"ApiVersion=1.15"}
|
||||
kcfg = kubeletapp.SimpleKubelet(cl, &fakeDocker2, "127.0.0.1", testRootDir, secondManifestURL, "127.0.0.1", 10251, api.NamespaceDefault, empty_dir.ProbeVolumePlugins(), nil, cadvisorInterface, "", nil, kubecontainer.FakeOS{})
|
||||
|
||||
kcfg = kubeletapp.SimpleKubelet(
|
||||
cl,
|
||||
&fakeDocker2,
|
||||
"127.0.0.1",
|
||||
testRootDir,
|
||||
secondManifestURL,
|
||||
"127.0.0.1",
|
||||
10251, /* KubeletPort */
|
||||
0, /* ReadOnlyPort */
|
||||
api.NamespaceDefault,
|
||||
empty_dir.ProbeVolumePlugins(),
|
||||
nil,
|
||||
cadvisorInterface,
|
||||
"",
|
||||
nil,
|
||||
kubecontainer.FakeOS{},
|
||||
1*time.Second, /* FileCheckFrequency */
|
||||
1*time.Second, /* HTTPCheckFrequency */
|
||||
10*time.Second, /* MinimumGCAge */
|
||||
3*time.Second, /* NodeStatusUpdateFrequency */
|
||||
10*time.Second /* SyncFrequency */)
|
||||
|
||||
kubeletapp.RunKubelet(kcfg, nil)
|
||||
return apiServer.URL, configFilePath
|
||||
}
|
||||
@@ -694,7 +739,7 @@ func runMasterServiceTest(client *client.Client) {
|
||||
glog.Fatalf("unexpected error listing services: %v", err)
|
||||
}
|
||||
var foundRW bool
|
||||
found := util.StringSet{}
|
||||
found := sets.String{}
|
||||
for i := range svcList.Items {
|
||||
found.Insert(svcList.Items[i].Name)
|
||||
if svcList.Items[i].Name == "kubernetes" {
|
||||
@@ -820,7 +865,7 @@ func runServiceTest(client *client.Client) {
|
||||
if err != nil {
|
||||
glog.Fatalf("Failed to list services across namespaces: %v", err)
|
||||
}
|
||||
names := util.NewStringSet()
|
||||
names := sets.NewString()
|
||||
for _, svc := range svcList.Items {
|
||||
names.Insert(fmt.Sprintf("%s/%s", svc.Namespace, svc.Name))
|
||||
}
|
||||
@@ -967,7 +1012,7 @@ func main() {
|
||||
// Check that kubelet tried to make the containers.
|
||||
// Using a set to list unique creation attempts. Our fake is
|
||||
// really stupid, so kubelet tries to create these multiple times.
|
||||
createdConts := util.StringSet{}
|
||||
createdConts := sets.String{}
|
||||
for _, p := range fakeDocker1.Created {
|
||||
// The last 8 characters are random, so slice them off.
|
||||
if n := len(p); n > 8 {
|
||||
|
||||
@@ -57,7 +57,7 @@ const (
|
||||
// Set to a value larger than the timeouts in each watch server.
|
||||
ReadWriteTimeout = time.Minute * 60
|
||||
//TODO: This can be tightened up. It still matches objects named watch or proxy.
|
||||
defaultLongRunningRequestRE = "(/|^)((watch|proxy)(/|$)|(logs|portforward|exec)/?$)"
|
||||
defaultLongRunningRequestRE = "(/|^)((watch|proxy)(/|$)|(logs?|portforward|exec|attach)/?$)"
|
||||
)
|
||||
|
||||
// APIServer runs a kubernetes api server.
|
||||
@@ -499,23 +499,24 @@ func (s *APIServer) Run(_ []string) error {
|
||||
}
|
||||
|
||||
glog.Infof("Serving securely on %s", secureLocation)
|
||||
if s.TLSCertFile == "" && s.TLSPrivateKeyFile == "" {
|
||||
s.TLSCertFile = path.Join(s.CertDirectory, "apiserver.crt")
|
||||
s.TLSPrivateKeyFile = path.Join(s.CertDirectory, "apiserver.key")
|
||||
// TODO (cjcullen): Is PublicAddress the right address to sign a cert with?
|
||||
alternateIPs := []net.IP{config.ServiceReadWriteIP}
|
||||
alternateDNS := []string{"kubernetes.default.svc", "kubernetes.default", "kubernetes"}
|
||||
// It would be nice to set a fqdn subject alt name, but only the kubelets know, the apiserver is clueless
|
||||
// alternateDNS = append(alternateDNS, "kubernetes.default.svc.CLUSTER.DNS.NAME")
|
||||
if err := util.GenerateSelfSignedCert(config.PublicAddress.String(), s.TLSCertFile, s.TLSPrivateKeyFile, alternateIPs, alternateDNS); err != nil {
|
||||
glog.Errorf("Unable to generate self signed cert: %v", err)
|
||||
} else {
|
||||
glog.Infof("Using self-signed cert (%s, %s)", s.TLSCertFile, s.TLSPrivateKeyFile)
|
||||
}
|
||||
}
|
||||
|
||||
go func() {
|
||||
defer util.HandleCrash()
|
||||
for {
|
||||
if s.TLSCertFile == "" && s.TLSPrivateKeyFile == "" {
|
||||
s.TLSCertFile = path.Join(s.CertDirectory, "apiserver.crt")
|
||||
s.TLSPrivateKeyFile = path.Join(s.CertDirectory, "apiserver.key")
|
||||
// TODO (cjcullen): Is PublicAddress the right address to sign a cert with?
|
||||
alternateIPs := []net.IP{config.ServiceReadWriteIP}
|
||||
alternateDNS := []string{"kubernetes.default.svc", "kubernetes.default", "kubernetes"}
|
||||
// It would be nice to set a fqdn subject alt name, but only the kubelets know, the apiserver is clueless
|
||||
// alternateDNS = append(alternateDNS, "kubernetes.default.svc.CLUSTER.DNS.NAME")
|
||||
if err := util.GenerateSelfSignedCert(config.PublicAddress.String(), s.TLSCertFile, s.TLSPrivateKeyFile, alternateIPs, alternateDNS); err != nil {
|
||||
glog.Errorf("Unable to generate self signed cert: %v", err)
|
||||
} else {
|
||||
glog.Infof("Using self-signed cert (%s, %s)", s.TLSCertFile, s.TLSPrivateKeyFile)
|
||||
}
|
||||
}
|
||||
// err == systemd.SdNotifyNoSocket when not running on a systemd system
|
||||
if err := systemd.SdNotify("READY=1\n"); err != nil && err != systemd.SdNotifyNoSocket {
|
||||
glog.Errorf("Unable to send systemd daemon successful start message: %v\n", err)
|
||||
|
||||
@@ -38,12 +38,16 @@ func TestLongRunningRequestRegexp(t *testing.T) {
|
||||
"/api/v1/watch/stuff",
|
||||
"/api/v1/default/service/proxy",
|
||||
"/api/v1/pods/proxy/path/to/thing",
|
||||
"/api/v1/namespaces/myns/pods/mypod/log",
|
||||
"/api/v1/namespaces/myns/pods/mypod/logs",
|
||||
"/api/v1/namespaces/myns/pods/mypod/portforward",
|
||||
"/api/v1/namespaces/myns/pods/mypod/exec",
|
||||
"/api/v1/namespaces/myns/pods/mypod/attach",
|
||||
"/api/v1/namespaces/myns/pods/mypod/log/",
|
||||
"/api/v1/namespaces/myns/pods/mypod/logs/",
|
||||
"/api/v1/namespaces/myns/pods/mypod/portforward/",
|
||||
"/api/v1/namespaces/myns/pods/mypod/exec/",
|
||||
"/api/v1/namespaces/myns/pods/mypod/attach/",
|
||||
"/api/v1/watch/namespaces/myns/pods",
|
||||
}
|
||||
for _, path := range dontMatch {
|
||||
|
||||
@@ -242,9 +242,16 @@ func (s *CMServer) Run(_ []string) error {
|
||||
resourceQuotaController := resourcequotacontroller.NewResourceQuotaController(kubeClient)
|
||||
resourceQuotaController.Run(s.ResourceQuotaSyncPeriod)
|
||||
|
||||
namespaceController := namespacecontroller.NewNamespaceController(kubeClient, s.NamespaceSyncPeriod)
|
||||
// An OR of all flags to enable/disable experimental features
|
||||
experimentalMode := s.EnableHorizontalPodAutoscaler
|
||||
namespaceController := namespacecontroller.NewNamespaceController(kubeClient, experimentalMode, s.NamespaceSyncPeriod)
|
||||
namespaceController.Run()
|
||||
|
||||
if s.EnableHorizontalPodAutoscaler {
|
||||
horizontalPodAutoscalerController := autoscalercontroller.New(kubeClient, metrics.NewHeapsterMetricsClient(kubeClient))
|
||||
horizontalPodAutoscalerController.Run(s.HorizontalPodAutoscalerSyncPeriod)
|
||||
}
|
||||
|
||||
pvclaimBinder := volumeclaimbinder.NewPersistentVolumeClaimBinder(kubeClient, s.PVClaimBinderSyncPeriod)
|
||||
pvclaimBinder.Run()
|
||||
pvRecycler, err := volumeclaimbinder.NewPersistentVolumeRecycler(kubeClient, s.PVClaimBinderSyncPeriod, ProbeRecyclableVolumePlugins(s.VolumeConfigFlags))
|
||||
@@ -287,15 +294,5 @@ func (s *CMServer) Run(_ []string) error {
|
||||
serviceaccount.DefaultServiceAccountsControllerOptions(),
|
||||
).Run()
|
||||
|
||||
if s.EnableHorizontalPodAutoscaler {
|
||||
expClient, err := client.NewExperimental(kubeconfig)
|
||||
if err != nil {
|
||||
glog.Fatalf("Invalid API configuration: %v", err)
|
||||
}
|
||||
horizontalPodAutoscalerController := autoscalercontroller.New(kubeClient, expClient,
|
||||
metrics.NewHeapsterMetricsClient(kubeClient))
|
||||
horizontalPodAutoscalerController.Run(s.HorizontalPodAutoscalerSyncPeriod)
|
||||
}
|
||||
|
||||
select {}
|
||||
}
|
||||
|
||||
@@ -165,12 +165,17 @@ func (s *ProxyServer) Run(_ []string) error {
|
||||
var proxier proxy.ProxyProvider
|
||||
var endpointsHandler config.EndpointsConfigHandler
|
||||
|
||||
// guaranteed false on error, error only necessary for debugging
|
||||
shouldUseIptables, err := iptables.ShouldUseIptablesProxier()
|
||||
if err != nil {
|
||||
glog.Errorf("Can't determine whether to use iptables or userspace, using userspace proxier: %v", err)
|
||||
shouldUseIptables := false
|
||||
if !s.ForceUserspaceProxy {
|
||||
var err error
|
||||
// guaranteed false on error, error only necessary for debugging
|
||||
shouldUseIptables, err = iptables.ShouldUseIptablesProxier()
|
||||
if err != nil {
|
||||
glog.Errorf("Can't determine whether to use iptables proxy, using userspace proxier: %v", err)
|
||||
}
|
||||
}
|
||||
if !s.ForceUserspaceProxy && shouldUseIptables {
|
||||
|
||||
if shouldUseIptables {
|
||||
glog.V(2).Info("Using iptables Proxier.")
|
||||
|
||||
execer := exec.New()
|
||||
|
||||
@@ -66,73 +66,75 @@ const defaultRootDir = "/var/lib/kubelet"
|
||||
// KubeletServer encapsulates all of the parameters necessary for starting up
|
||||
// a kubelet. These can either be set via command line or directly.
|
||||
type KubeletServer struct {
|
||||
Config string
|
||||
SyncFrequency time.Duration
|
||||
FileCheckFrequency time.Duration
|
||||
HTTPCheckFrequency time.Duration
|
||||
ManifestURL string
|
||||
ManifestURLHeader string
|
||||
EnableServer bool
|
||||
Address net.IP
|
||||
Port uint
|
||||
ReadOnlyPort uint
|
||||
HostnameOverride string
|
||||
PodInfraContainerImage string
|
||||
DockerEndpoint string
|
||||
RootDirectory string
|
||||
AllowPrivileged bool
|
||||
HostNetworkSources string
|
||||
RegistryPullQPS float64
|
||||
RegistryBurst int
|
||||
RunOnce bool
|
||||
EnableDebuggingHandlers bool
|
||||
MinimumGCAge time.Duration
|
||||
MaxPerPodContainerCount int
|
||||
MaxContainerCount int
|
||||
AuthPath util.StringFlag // Deprecated -- use KubeConfig instead
|
||||
KubeConfig util.StringFlag
|
||||
CadvisorPort uint
|
||||
HealthzPort int
|
||||
HealthzBindAddress net.IP
|
||||
OOMScoreAdj int
|
||||
APIServerList []string
|
||||
RegisterNode bool
|
||||
StandaloneMode bool
|
||||
ClusterDomain string
|
||||
MasterServiceNamespace string
|
||||
AuthPath util.StringFlag // Deprecated -- use KubeConfig instead
|
||||
CadvisorPort uint
|
||||
CertDirectory string
|
||||
CgroupRoot string
|
||||
CloudConfigFile string
|
||||
CloudProvider string
|
||||
ClusterDNS net.IP
|
||||
StreamingConnectionIdleTimeout time.Duration
|
||||
ClusterDomain string
|
||||
Config string
|
||||
ConfigureCBR0 bool
|
||||
ContainerRuntime string
|
||||
CPUCFSQuota bool
|
||||
DockerDaemonContainer string
|
||||
DockerEndpoint string
|
||||
DockerExecHandlerName string
|
||||
EnableDebuggingHandlers bool
|
||||
EnableServer bool
|
||||
EventBurst int
|
||||
EventRecordQPS float32
|
||||
FileCheckFrequency time.Duration
|
||||
HealthzBindAddress net.IP
|
||||
HealthzPort int
|
||||
HostnameOverride string
|
||||
HostNetworkSources string
|
||||
HTTPCheckFrequency time.Duration
|
||||
ImageGCHighThresholdPercent int
|
||||
ImageGCLowThresholdPercent int
|
||||
KubeConfig util.StringFlag
|
||||
LowDiskSpaceThresholdMB int
|
||||
NetworkPluginName string
|
||||
ManifestURL string
|
||||
ManifestURLHeader string
|
||||
MasterServiceNamespace string
|
||||
MaxContainerCount int
|
||||
MaxPerPodContainerCount int
|
||||
MaxPods int
|
||||
MinimumGCAge time.Duration
|
||||
NetworkPluginDir string
|
||||
CloudProvider string
|
||||
CloudConfigFile string
|
||||
NetworkPluginName string
|
||||
NodeStatusUpdateFrequency time.Duration
|
||||
OOMScoreAdj int
|
||||
PodCIDR string
|
||||
PodInfraContainerImage string
|
||||
Port uint
|
||||
ReadOnlyPort uint
|
||||
RegisterNode bool
|
||||
RegistryBurst int
|
||||
RegistryPullQPS float64
|
||||
ResolverConfig string
|
||||
ResourceContainer string
|
||||
RktPath string
|
||||
RootDirectory string
|
||||
RunOnce bool
|
||||
StandaloneMode bool
|
||||
StreamingConnectionIdleTimeout time.Duration
|
||||
SyncFrequency time.Duration
|
||||
SystemContainer string
|
||||
TLSCertFile string
|
||||
TLSPrivateKeyFile string
|
||||
CertDirectory string
|
||||
NodeStatusUpdateFrequency time.Duration
|
||||
ResourceContainer string
|
||||
CgroupRoot string
|
||||
ContainerRuntime string
|
||||
RktPath string
|
||||
DockerDaemonContainer string
|
||||
SystemContainer string
|
||||
ConfigureCBR0 bool
|
||||
PodCIDR string
|
||||
MaxPods int
|
||||
DockerExecHandlerName string
|
||||
ResolverConfig string
|
||||
CPUCFSQuota bool
|
||||
// Flags intended for testing
|
||||
|
||||
// Crash immediately, rather than eating panics.
|
||||
ReallyCrashForTesting bool
|
||||
// Insert a probability of random errors during calls to the master.
|
||||
ChaosChance float64
|
||||
// Flags intended for testing
|
||||
// Is the kubelet containerized?
|
||||
Containerized bool
|
||||
// Insert a probability of random errors during calls to the master.
|
||||
ChaosChance float64
|
||||
// Crash immediately, rather than eating panics.
|
||||
ReallyCrashForTesting bool
|
||||
}
|
||||
|
||||
// bootstrapping interface for kubelet, targets the initialization protocol
|
||||
@@ -151,45 +153,45 @@ type KubeletBuilder func(kc *KubeletConfig) (KubeletBootstrap, *config.PodConfig
|
||||
// NewKubeletServer will create a new KubeletServer with default values.
|
||||
func NewKubeletServer() *KubeletServer {
|
||||
return &KubeletServer{
|
||||
SyncFrequency: 10 * time.Second,
|
||||
FileCheckFrequency: 20 * time.Second,
|
||||
HTTPCheckFrequency: 20 * time.Second,
|
||||
EnableServer: true,
|
||||
Address: net.ParseIP("0.0.0.0"),
|
||||
Port: ports.KubeletPort,
|
||||
ReadOnlyPort: ports.KubeletReadOnlyPort,
|
||||
PodInfraContainerImage: dockertools.PodInfraContainerImage,
|
||||
RootDirectory: defaultRootDir,
|
||||
RegistryBurst: 10,
|
||||
EnableDebuggingHandlers: true,
|
||||
MinimumGCAge: 1 * time.Minute,
|
||||
MaxPerPodContainerCount: 2,
|
||||
MaxContainerCount: 100,
|
||||
AuthPath: util.NewStringFlag("/var/lib/kubelet/kubernetes_auth"), // deprecated
|
||||
KubeConfig: util.NewStringFlag("/var/lib/kubelet/kubeconfig"),
|
||||
CadvisorPort: 4194,
|
||||
HealthzPort: 10248,
|
||||
CertDirectory: "/var/run/kubernetes",
|
||||
CgroupRoot: "",
|
||||
ConfigureCBR0: false,
|
||||
ContainerRuntime: "docker",
|
||||
CPUCFSQuota: false,
|
||||
DockerDaemonContainer: "/docker-daemon",
|
||||
DockerExecHandlerName: "native",
|
||||
EnableDebuggingHandlers: true,
|
||||
EnableServer: true,
|
||||
FileCheckFrequency: 20 * time.Second,
|
||||
HealthzBindAddress: net.ParseIP("127.0.0.1"),
|
||||
RegisterNode: true, // will be ignored if no apiserver is configured
|
||||
OOMScoreAdj: qos.KubeletOomScoreAdj,
|
||||
MasterServiceNamespace: api.NamespaceDefault,
|
||||
HealthzPort: 10248,
|
||||
HostNetworkSources: kubelet.FileSource,
|
||||
HTTPCheckFrequency: 20 * time.Second,
|
||||
ImageGCHighThresholdPercent: 90,
|
||||
ImageGCLowThresholdPercent: 80,
|
||||
KubeConfig: util.NewStringFlag("/var/lib/kubelet/kubeconfig"),
|
||||
LowDiskSpaceThresholdMB: 256,
|
||||
NetworkPluginName: "",
|
||||
MasterServiceNamespace: api.NamespaceDefault,
|
||||
MaxContainerCount: 100,
|
||||
MaxPerPodContainerCount: 2,
|
||||
MinimumGCAge: 1 * time.Minute,
|
||||
NetworkPluginDir: "/usr/libexec/kubernetes/kubelet-plugins/net/exec/",
|
||||
HostNetworkSources: kubelet.FileSource,
|
||||
CertDirectory: "/var/run/kubernetes",
|
||||
NetworkPluginName: "",
|
||||
NodeStatusUpdateFrequency: 10 * time.Second,
|
||||
ResourceContainer: "/kubelet",
|
||||
CgroupRoot: "",
|
||||
ContainerRuntime: "docker",
|
||||
RktPath: "",
|
||||
DockerDaemonContainer: "/docker-daemon",
|
||||
SystemContainer: "",
|
||||
ConfigureCBR0: false,
|
||||
DockerExecHandlerName: "native",
|
||||
CPUCFSQuota: false,
|
||||
OOMScoreAdj: qos.KubeletOomScoreAdj,
|
||||
PodInfraContainerImage: dockertools.PodInfraContainerImage,
|
||||
Port: ports.KubeletPort,
|
||||
ReadOnlyPort: ports.KubeletReadOnlyPort,
|
||||
RegisterNode: true, // will be ignored if no apiserver is configured
|
||||
RegistryBurst: 10,
|
||||
ResourceContainer: "/kubelet",
|
||||
RktPath: "",
|
||||
RootDirectory: defaultRootDir,
|
||||
SyncFrequency: 10 * time.Second,
|
||||
SystemContainer: "",
|
||||
}
|
||||
}
|
||||
|
||||
@@ -220,6 +222,8 @@ func (s *KubeletServer) AddFlags(fs *pflag.FlagSet) {
|
||||
fs.StringVar(&s.HostNetworkSources, "host-network-sources", s.HostNetworkSources, "Comma-separated list of sources from which the Kubelet allows pods to use of host network. For all sources use \"*\" [default=\"file\"]")
|
||||
fs.Float64Var(&s.RegistryPullQPS, "registry-qps", s.RegistryPullQPS, "If > 0, limit registry pull QPS to this value. If 0, unlimited. [default=0.0]")
|
||||
fs.IntVar(&s.RegistryBurst, "registry-burst", s.RegistryBurst, "Maximum size of a bursty pulls, temporarily allows pulls to burst to this number, while still not exceeding registry-qps. Only used if --registry-qps > 0")
|
||||
fs.Float32Var(&s.EventRecordQPS, "event-qps", s.EventRecordQPS, "If > 0, limit event creations per second to this value. If 0, unlimited. [default=0.0]")
|
||||
fs.IntVar(&s.EventBurst, "event-burst", s.EventBurst, "Maximum size of a bursty event records, temporarily allows event records to burst to this number, while still not exceeding event-qps. Only used if --event-qps > 0")
|
||||
fs.BoolVar(&s.RunOnce, "runonce", s.RunOnce, "If true, exit after spawning pods from local manifests or remote urls. Exclusive with --api-servers, and --enable-server")
|
||||
fs.BoolVar(&s.EnableDebuggingHandlers, "enable-debugging-handlers", s.EnableDebuggingHandlers, "Enables server endpoints for log collection and local running of containers and commands")
|
||||
fs.DurationVar(&s.MinimumGCAge, "minimum-container-ttl-duration", s.MinimumGCAge, "Minimum age for a finished container before it is garbage collected. Examples: '300ms', '10s' or '2h45m'")
|
||||
@@ -313,58 +317,61 @@ func (s *KubeletServer) KubeletConfig() (*KubeletConfig, error) {
|
||||
}
|
||||
|
||||
return &KubeletConfig{
|
||||
Address: s.Address,
|
||||
AllowPrivileged: s.AllowPrivileged,
|
||||
HostNetworkSources: hostNetworkSources,
|
||||
HostnameOverride: s.HostnameOverride,
|
||||
RootDirectory: s.RootDirectory,
|
||||
ConfigFile: s.Config,
|
||||
ManifestURL: s.ManifestURL,
|
||||
ManifestURLHeader: manifestURLHeader,
|
||||
FileCheckFrequency: s.FileCheckFrequency,
|
||||
HTTPCheckFrequency: s.HTTPCheckFrequency,
|
||||
PodInfraContainerImage: s.PodInfraContainerImage,
|
||||
SyncFrequency: s.SyncFrequency,
|
||||
RegistryPullQPS: s.RegistryPullQPS,
|
||||
RegistryBurst: s.RegistryBurst,
|
||||
MinimumGCAge: s.MinimumGCAge,
|
||||
MaxPerPodContainerCount: s.MaxPerPodContainerCount,
|
||||
MaxContainerCount: s.MaxContainerCount,
|
||||
RegisterNode: s.RegisterNode,
|
||||
StandaloneMode: (len(s.APIServerList) == 0),
|
||||
ClusterDomain: s.ClusterDomain,
|
||||
ClusterDNS: s.ClusterDNS,
|
||||
Runonce: s.RunOnce,
|
||||
Address: s.Address,
|
||||
AllowPrivileged: s.AllowPrivileged,
|
||||
CadvisorInterface: nil, // launches background processes, not set here
|
||||
CgroupRoot: s.CgroupRoot,
|
||||
Cloud: nil, // cloud provider might start background processes
|
||||
ClusterDNS: s.ClusterDNS,
|
||||
ClusterDomain: s.ClusterDomain,
|
||||
ConfigFile: s.Config,
|
||||
ConfigureCBR0: s.ConfigureCBR0,
|
||||
ContainerRuntime: s.ContainerRuntime,
|
||||
CPUCFSQuota: s.CPUCFSQuota,
|
||||
DiskSpacePolicy: diskSpacePolicy,
|
||||
DockerClient: dockertools.ConnectToDockerOrDie(s.DockerEndpoint),
|
||||
DockerDaemonContainer: s.DockerDaemonContainer,
|
||||
DockerExecHandler: dockerExecHandler,
|
||||
EnableDebuggingHandlers: s.EnableDebuggingHandlers,
|
||||
EnableServer: s.EnableServer,
|
||||
EventBurst: s.EventBurst,
|
||||
EventRecordQPS: s.EventRecordQPS,
|
||||
FileCheckFrequency: s.FileCheckFrequency,
|
||||
HostnameOverride: s.HostnameOverride,
|
||||
HostNetworkSources: hostNetworkSources,
|
||||
HTTPCheckFrequency: s.HTTPCheckFrequency,
|
||||
ImageGCPolicy: imageGCPolicy,
|
||||
KubeClient: nil,
|
||||
ManifestURL: s.ManifestURL,
|
||||
ManifestURLHeader: manifestURLHeader,
|
||||
MasterServiceNamespace: s.MasterServiceNamespace,
|
||||
MaxContainerCount: s.MaxContainerCount,
|
||||
MaxPerPodContainerCount: s.MaxPerPodContainerCount,
|
||||
MaxPods: s.MaxPods,
|
||||
MinimumGCAge: s.MinimumGCAge,
|
||||
Mounter: mounter,
|
||||
NetworkPluginName: s.NetworkPluginName,
|
||||
NetworkPlugins: ProbeNetworkPlugins(s.NetworkPluginDir),
|
||||
NodeStatusUpdateFrequency: s.NodeStatusUpdateFrequency,
|
||||
OSInterface: kubecontainer.RealOS{},
|
||||
PodCIDR: s.PodCIDR,
|
||||
PodInfraContainerImage: s.PodInfraContainerImage,
|
||||
Port: s.Port,
|
||||
ReadOnlyPort: s.ReadOnlyPort,
|
||||
CadvisorInterface: nil, // launches background processes, not set here
|
||||
EnableServer: s.EnableServer,
|
||||
EnableDebuggingHandlers: s.EnableDebuggingHandlers,
|
||||
DockerClient: dockertools.ConnectToDockerOrDie(s.DockerEndpoint),
|
||||
KubeClient: nil,
|
||||
MasterServiceNamespace: s.MasterServiceNamespace,
|
||||
VolumePlugins: ProbeVolumePlugins(),
|
||||
NetworkPlugins: ProbeNetworkPlugins(s.NetworkPluginDir),
|
||||
NetworkPluginName: s.NetworkPluginName,
|
||||
RegisterNode: s.RegisterNode,
|
||||
RegistryBurst: s.RegistryBurst,
|
||||
RegistryPullQPS: s.RegistryPullQPS,
|
||||
ResolverConfig: s.ResolverConfig,
|
||||
ResourceContainer: s.ResourceContainer,
|
||||
RktPath: s.RktPath,
|
||||
RootDirectory: s.RootDirectory,
|
||||
Runonce: s.RunOnce,
|
||||
StandaloneMode: (len(s.APIServerList) == 0),
|
||||
StreamingConnectionIdleTimeout: s.StreamingConnectionIdleTimeout,
|
||||
SyncFrequency: s.SyncFrequency,
|
||||
SystemContainer: s.SystemContainer,
|
||||
TLSOptions: tlsOptions,
|
||||
ImageGCPolicy: imageGCPolicy,
|
||||
DiskSpacePolicy: diskSpacePolicy,
|
||||
Cloud: nil, // cloud provider might start background processes
|
||||
NodeStatusUpdateFrequency: s.NodeStatusUpdateFrequency,
|
||||
ResourceContainer: s.ResourceContainer,
|
||||
CgroupRoot: s.CgroupRoot,
|
||||
ContainerRuntime: s.ContainerRuntime,
|
||||
RktPath: s.RktPath,
|
||||
Mounter: mounter,
|
||||
DockerDaemonContainer: s.DockerDaemonContainer,
|
||||
SystemContainer: s.SystemContainer,
|
||||
ConfigureCBR0: s.ConfigureCBR0,
|
||||
PodCIDR: s.PodCIDR,
|
||||
MaxPods: s.MaxPods,
|
||||
DockerExecHandler: dockerExecHandler,
|
||||
ResolverConfig: s.ResolverConfig,
|
||||
CPUCFSQuota: s.CPUCFSQuota,
|
||||
VolumePlugins: ProbeVolumePlugins(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -554,13 +561,15 @@ func SimpleKubelet(client *client.Client,
|
||||
dockerClient dockertools.DockerInterface,
|
||||
hostname, rootDir, manifestURL, address string,
|
||||
port uint,
|
||||
readOnlyPort uint,
|
||||
masterServiceNamespace string,
|
||||
volumePlugins []volume.VolumePlugin,
|
||||
tlsOptions *kubelet.TLSOptions,
|
||||
cadvisorInterface cadvisor.Interface,
|
||||
configFilePath string,
|
||||
cloud cloudprovider.Interface,
|
||||
osInterface kubecontainer.OSInterface) *KubeletConfig {
|
||||
osInterface kubecontainer.OSInterface,
|
||||
fileCheckFrequency, httpCheckFrequency, minimumGCAge, nodeStatusUpdateFrequency, syncFrequency time.Duration) *KubeletConfig {
|
||||
|
||||
imageGCPolicy := kubelet.ImageGCPolicy{
|
||||
HighThresholdPercent: 90,
|
||||
@@ -571,43 +580,44 @@ func SimpleKubelet(client *client.Client,
|
||||
RootFreeDiskMB: 256,
|
||||
}
|
||||
kcfg := KubeletConfig{
|
||||
KubeClient: client,
|
||||
DockerClient: dockerClient,
|
||||
HostnameOverride: hostname,
|
||||
RootDirectory: rootDir,
|
||||
ManifestURL: manifestURL,
|
||||
PodInfraContainerImage: dockertools.PodInfraContainerImage,
|
||||
Port: port,
|
||||
Address: net.ParseIP(address),
|
||||
EnableServer: true,
|
||||
EnableDebuggingHandlers: true,
|
||||
HTTPCheckFrequency: 1 * time.Second,
|
||||
FileCheckFrequency: 1 * time.Second,
|
||||
SyncFrequency: 3 * time.Second,
|
||||
MinimumGCAge: 10 * time.Second,
|
||||
MaxPerPodContainerCount: 2,
|
||||
MaxContainerCount: 100,
|
||||
RegisterNode: true,
|
||||
MasterServiceNamespace: masterServiceNamespace,
|
||||
VolumePlugins: volumePlugins,
|
||||
TLSOptions: tlsOptions,
|
||||
CadvisorInterface: cadvisorInterface,
|
||||
ConfigFile: configFilePath,
|
||||
ImageGCPolicy: imageGCPolicy,
|
||||
DiskSpacePolicy: diskSpacePolicy,
|
||||
Cloud: cloud,
|
||||
NodeStatusUpdateFrequency: 10 * time.Second,
|
||||
ResourceContainer: "/kubelet",
|
||||
OSInterface: osInterface,
|
||||
Address: net.ParseIP(address),
|
||||
CadvisorInterface: cadvisorInterface,
|
||||
CgroupRoot: "",
|
||||
Cloud: cloud,
|
||||
ConfigFile: configFilePath,
|
||||
ContainerRuntime: "docker",
|
||||
Mounter: mount.New(),
|
||||
DockerDaemonContainer: "/docker-daemon",
|
||||
SystemContainer: "",
|
||||
MaxPods: 32,
|
||||
DockerExecHandler: &dockertools.NativeExecHandler{},
|
||||
ResolverConfig: kubelet.ResolvConfDefault,
|
||||
CPUCFSQuota: false,
|
||||
DiskSpacePolicy: diskSpacePolicy,
|
||||
DockerClient: dockerClient,
|
||||
DockerDaemonContainer: "/docker-daemon",
|
||||
DockerExecHandler: &dockertools.NativeExecHandler{},
|
||||
EnableDebuggingHandlers: true,
|
||||
EnableServer: true,
|
||||
FileCheckFrequency: fileCheckFrequency,
|
||||
HostnameOverride: hostname,
|
||||
HTTPCheckFrequency: httpCheckFrequency,
|
||||
ImageGCPolicy: imageGCPolicy,
|
||||
KubeClient: client,
|
||||
ManifestURL: manifestURL,
|
||||
MasterServiceNamespace: masterServiceNamespace,
|
||||
MaxContainerCount: 100,
|
||||
MaxPerPodContainerCount: 2,
|
||||
MaxPods: 32,
|
||||
MinimumGCAge: minimumGCAge,
|
||||
Mounter: mount.New(),
|
||||
NodeStatusUpdateFrequency: nodeStatusUpdateFrequency,
|
||||
OSInterface: osInterface,
|
||||
PodInfraContainerImage: dockertools.PodInfraContainerImage,
|
||||
Port: port,
|
||||
ReadOnlyPort: readOnlyPort,
|
||||
RegisterNode: true,
|
||||
ResolverConfig: kubelet.ResolvConfDefault,
|
||||
ResourceContainer: "/kubelet",
|
||||
RootDirectory: rootDir,
|
||||
SyncFrequency: syncFrequency,
|
||||
SystemContainer: "",
|
||||
TLSOptions: tlsOptions,
|
||||
VolumePlugins: volumePlugins,
|
||||
}
|
||||
return &kcfg
|
||||
}
|
||||
@@ -646,7 +656,13 @@ func RunKubelet(kcfg *KubeletConfig, builder KubeletBuilder) error {
|
||||
eventBroadcaster.StartLogging(glog.V(3).Infof)
|
||||
if kcfg.KubeClient != nil {
|
||||
glog.V(4).Infof("Sending events to api server.")
|
||||
eventBroadcaster.StartRecordingToSink(kcfg.KubeClient.Events(""))
|
||||
if kcfg.EventRecordQPS == 0.0 {
|
||||
eventBroadcaster.StartRecordingToSink(kcfg.KubeClient.Events(""))
|
||||
} else {
|
||||
eventClient := *kcfg.KubeClient
|
||||
eventClient.Throttle = util.NewTokenBucketRateLimiter(kcfg.EventRecordQPS, kcfg.EventBurst)
|
||||
eventBroadcaster.StartRecordingToSink(eventClient.Events(""))
|
||||
}
|
||||
} else {
|
||||
glog.Warning("No api server defined - no events will be sent to API server.")
|
||||
}
|
||||
@@ -723,62 +739,64 @@ func makePodSourceConfig(kc *KubeletConfig) *config.PodConfig {
|
||||
// KubeletConfig is all of the parameters necessary for running a kubelet.
|
||||
// TODO: This should probably be merged with KubeletServer. The extra object is a consequence of refactoring.
|
||||
type KubeletConfig struct {
|
||||
KubeClient *client.Client
|
||||
DockerClient dockertools.DockerInterface
|
||||
CadvisorInterface cadvisor.Interface
|
||||
Address net.IP
|
||||
AllowPrivileged bool
|
||||
HostNetworkSources []string
|
||||
HostnameOverride string
|
||||
RootDirectory string
|
||||
CadvisorInterface cadvisor.Interface
|
||||
CgroupRoot string
|
||||
Cloud cloudprovider.Interface
|
||||
ClusterDNS net.IP
|
||||
ClusterDomain string
|
||||
ConfigFile string
|
||||
ConfigureCBR0 bool
|
||||
ContainerRuntime string
|
||||
CPUCFSQuota bool
|
||||
DiskSpacePolicy kubelet.DiskSpacePolicy
|
||||
DockerClient dockertools.DockerInterface
|
||||
DockerDaemonContainer string
|
||||
DockerExecHandler dockertools.ExecHandler
|
||||
EnableDebuggingHandlers bool
|
||||
EnableServer bool
|
||||
EventBurst int
|
||||
EventRecordQPS float32
|
||||
FileCheckFrequency time.Duration
|
||||
Hostname string
|
||||
HostnameOverride string
|
||||
HostNetworkSources []string
|
||||
HTTPCheckFrequency time.Duration
|
||||
ImageGCPolicy kubelet.ImageGCPolicy
|
||||
KubeClient *client.Client
|
||||
ManifestURL string
|
||||
ManifestURLHeader http.Header
|
||||
FileCheckFrequency time.Duration
|
||||
HTTPCheckFrequency time.Duration
|
||||
Hostname string
|
||||
NodeName string
|
||||
PodInfraContainerImage string
|
||||
SyncFrequency time.Duration
|
||||
RegistryPullQPS float64
|
||||
RegistryBurst int
|
||||
MinimumGCAge time.Duration
|
||||
MaxPerPodContainerCount int
|
||||
MasterServiceNamespace string
|
||||
MaxContainerCount int
|
||||
RegisterNode bool
|
||||
StandaloneMode bool
|
||||
ClusterDomain string
|
||||
ClusterDNS net.IP
|
||||
EnableServer bool
|
||||
EnableDebuggingHandlers bool
|
||||
MaxPerPodContainerCount int
|
||||
MaxPods int
|
||||
MinimumGCAge time.Duration
|
||||
Mounter mount.Interface
|
||||
NetworkPluginName string
|
||||
NetworkPlugins []network.NetworkPlugin
|
||||
NodeName string
|
||||
NodeStatusUpdateFrequency time.Duration
|
||||
OSInterface kubecontainer.OSInterface
|
||||
PodCIDR string
|
||||
PodInfraContainerImage string
|
||||
Port uint
|
||||
ReadOnlyPort uint
|
||||
Runonce bool
|
||||
MasterServiceNamespace string
|
||||
VolumePlugins []volume.VolumePlugin
|
||||
NetworkPlugins []network.NetworkPlugin
|
||||
NetworkPluginName string
|
||||
StreamingConnectionIdleTimeout time.Duration
|
||||
Recorder record.EventRecorder
|
||||
TLSOptions *kubelet.TLSOptions
|
||||
ImageGCPolicy kubelet.ImageGCPolicy
|
||||
DiskSpacePolicy kubelet.DiskSpacePolicy
|
||||
Cloud cloudprovider.Interface
|
||||
NodeStatusUpdateFrequency time.Duration
|
||||
ResourceContainer string
|
||||
OSInterface kubecontainer.OSInterface
|
||||
CgroupRoot string
|
||||
ContainerRuntime string
|
||||
RktPath string
|
||||
Mounter mount.Interface
|
||||
DockerDaemonContainer string
|
||||
SystemContainer string
|
||||
ConfigureCBR0 bool
|
||||
PodCIDR string
|
||||
MaxPods int
|
||||
DockerExecHandler dockertools.ExecHandler
|
||||
RegisterNode bool
|
||||
RegistryBurst int
|
||||
RegistryPullQPS float64
|
||||
ResolverConfig string
|
||||
CPUCFSQuota bool
|
||||
ResourceContainer string
|
||||
RktPath string
|
||||
RootDirectory string
|
||||
Runonce bool
|
||||
StandaloneMode bool
|
||||
StreamingConnectionIdleTimeout time.Duration
|
||||
SyncFrequency time.Duration
|
||||
SystemContainer string
|
||||
TLSOptions *kubelet.TLSOptions
|
||||
VolumePlugins []volume.VolumePlugin
|
||||
}
|
||||
|
||||
func createAndInitKubelet(kc *KubeletConfig) (k KubeletBootstrap, pc *config.PodConfig, err error) {
|
||||
@@ -809,6 +827,8 @@ func createAndInitKubelet(kc *KubeletConfig) (k KubeletBootstrap, pc *config.Pod
|
||||
kc.SyncFrequency,
|
||||
float32(kc.RegistryPullQPS),
|
||||
kc.RegistryBurst,
|
||||
kc.EventRecordQPS,
|
||||
kc.EventBurst,
|
||||
gcPolicy,
|
||||
pc.SeenAllSources,
|
||||
kc.RegisterNode,
|
||||
|
||||
@@ -43,7 +43,7 @@ var exampleMungeTagRE = regexp.MustCompile(beginMungeTag(fmt.Sprintf("%s %s", ex
|
||||
// bar:
|
||||
// ```
|
||||
//
|
||||
// [Download example](../../examples/guestbook/frontend-controller.yaml)
|
||||
// [Download example](../../examples/guestbook/frontend-controller.yaml?raw=true)
|
||||
// <!-- END MUNGE: EXAMPLE -->
|
||||
func syncExamples(filePath string, mlines mungeLines) (mungeLines, error) {
|
||||
var err error
|
||||
@@ -108,7 +108,7 @@ func exampleContent(filePath, linkPath, fileType string) (mungeLines, error) {
|
||||
|
||||
// remove leading and trailing spaces and newlines
|
||||
trimmedFileContent := strings.TrimSpace(string(dat))
|
||||
content := fmt.Sprintf("\n```%s\n%s\n```\n\n[Download example](%s)", fileType, trimmedFileContent, fileRel)
|
||||
content := fmt.Sprintf("\n```%s\n%s\n```\n\n[Download example](%s?raw=true)", fileType, trimmedFileContent, fileRel)
|
||||
out := getMungeLines(content)
|
||||
return out, nil
|
||||
}
|
||||
|
||||
@@ -41,11 +41,11 @@ spec:
|
||||
{"", ""},
|
||||
{
|
||||
"<!-- BEGIN MUNGE: EXAMPLE testdata/pod.yaml -->\n<!-- END MUNGE: EXAMPLE testdata/pod.yaml -->\n",
|
||||
"<!-- BEGIN MUNGE: EXAMPLE testdata/pod.yaml -->\n\n```yaml\n" + podExample + "```\n\n[Download example](testdata/pod.yaml)\n<!-- END MUNGE: EXAMPLE testdata/pod.yaml -->\n",
|
||||
"<!-- BEGIN MUNGE: EXAMPLE testdata/pod.yaml -->\n\n```yaml\n" + podExample + "```\n\n[Download example](testdata/pod.yaml?raw=true)\n<!-- END MUNGE: EXAMPLE testdata/pod.yaml -->\n",
|
||||
},
|
||||
{
|
||||
"<!-- BEGIN MUNGE: EXAMPLE ../mungedocs/testdata/pod.yaml -->\n<!-- END MUNGE: EXAMPLE ../mungedocs/testdata/pod.yaml -->\n",
|
||||
"<!-- BEGIN MUNGE: EXAMPLE ../mungedocs/testdata/pod.yaml -->\n\n```yaml\n" + podExample + "```\n\n[Download example](../mungedocs/testdata/pod.yaml)\n<!-- END MUNGE: EXAMPLE ../mungedocs/testdata/pod.yaml -->\n",
|
||||
"<!-- BEGIN MUNGE: EXAMPLE ../mungedocs/testdata/pod.yaml -->\n\n```yaml\n" + podExample + "```\n\n[Download example](../mungedocs/testdata/pod.yaml?raw=true)\n<!-- END MUNGE: EXAMPLE ../mungedocs/testdata/pod.yaml -->\n",
|
||||
},
|
||||
}
|
||||
repoRoot = ""
|
||||
|
||||
59
code-of-conduct.md
Normal file
59
code-of-conduct.md
Normal file
@@ -0,0 +1,59 @@
|
||||
## Kubernetes Community Code of Conduct
|
||||
|
||||
### Contributor Code of Conduct
|
||||
|
||||
As contributors and maintainers of this project, and in the interest of fostering
|
||||
an open and welcoming community, we pledge to respect all people who contribute
|
||||
through reporting issues, posting feature requests, updating documentation,
|
||||
submitting pull requests or patches, and other activities.
|
||||
|
||||
We are committed to making participation in this project a harassment-free experience for
|
||||
everyone, regardless of level of experience, gender, gender identity and expression,
|
||||
sexual orientation, disability, personal appearance, body size, race, ethnicity, age,
|
||||
religion, or nationality.
|
||||
|
||||
Examples of unacceptable behavior by participants include:
|
||||
|
||||
* The use of sexualized language or imagery
|
||||
* Personal attacks
|
||||
* Trolling or insulting/derogatory comments
|
||||
* Public or private harassment
|
||||
* Publishing other's private information, such as physical or electronic addresses,
|
||||
without explicit permission
|
||||
* Other unethical or unprofessional conduct.
|
||||
|
||||
Project maintainers have the right and responsibility to remove, edit, or reject
|
||||
comments, commits, code, wiki edits, issues, and other contributions that are not
|
||||
aligned to this Code of Conduct. By adopting this Code of Conduct, project maintainers
|
||||
commit themselves to fairly and consistently applying these principles to every aspect
|
||||
of managing this project. Project maintainers who do not follow or enforce the Code of
|
||||
Conduct may be permanently removed from the project team.
|
||||
|
||||
This code of conduct applies both within project spaces and in public spaces
|
||||
when an individual is representing the project or its community.
|
||||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by
|
||||
opening an issue or contacting one or more of the project maintainers.
|
||||
|
||||
This Code of Conduct is adapted from the Contributor Covenant
|
||||
(http://contributor-covenant.org), version 1.2.0, available at
|
||||
http://contributor-covenant.org/version/1/2/0/
|
||||
|
||||
### Kubernetes Events Code of Conduct
|
||||
Kubernetes events are working conferences intended for professional networking and collaboration in the
|
||||
Kubernetes community. Attendees are expected to behave according to professional standards and in accordance
|
||||
with their employer's policies on appropriate workplace behavior.
|
||||
|
||||
While at Kubernetes events or related social networking opportunities, attendees should not engage in
|
||||
discriminatory or offensive speech or actions regarding gender, sexuality, race, or religion. Speakers should
|
||||
be especially aware of these concerns.
|
||||
|
||||
The Kubernetes team does not condone any statements by speakers contrary to these standards. The Kubernetes
|
||||
team reserves the right to deny entrance and/or eject from an event (without refund) any individual found to
|
||||
be engaging in discriminatory or offensive speech or actions.
|
||||
|
||||
Please bring any concerns to to the immediate attention of Kubernetes event staff
|
||||
|
||||
|
||||
|
||||
[]()
|
||||
@@ -144,7 +144,7 @@ func (s *CMServer) Run(_ []string) error {
|
||||
resourceQuotaController := resourcequotacontroller.NewResourceQuotaController(kubeClient)
|
||||
resourceQuotaController.Run(s.ResourceQuotaSyncPeriod)
|
||||
|
||||
namespaceController := namespacecontroller.NewNamespaceController(kubeClient, s.NamespaceSyncPeriod)
|
||||
namespaceController := namespacecontroller.NewNamespaceController(kubeClient, false, s.NamespaceSyncPeriod)
|
||||
namespaceController.Run()
|
||||
|
||||
pvclaimBinder := volumeclaimbinder.NewPersistentVolumeClaimBinder(kubeClient, s.PVClaimBinderSyncPeriod)
|
||||
|
||||
@@ -301,6 +301,8 @@ func (ks *KubeletExecutorServer) createAndInitKubelet(
|
||||
kc.SyncFrequency,
|
||||
float32(kc.RegistryPullQPS),
|
||||
kc.RegistryBurst,
|
||||
kc.EventRecordQPS,
|
||||
kc.EventBurst,
|
||||
gcPolicy,
|
||||
pc.SeenAllSources,
|
||||
kc.RegisterNode,
|
||||
|
||||
@@ -30,7 +30,7 @@ import (
|
||||
"k8s.io/kubernetes/contrib/mesos/pkg/queue"
|
||||
"k8s.io/kubernetes/contrib/mesos/pkg/runtime"
|
||||
"k8s.io/kubernetes/pkg/client/unversioned/cache"
|
||||
"k8s.io/kubernetes/pkg/util"
|
||||
"k8s.io/kubernetes/pkg/util/sets"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -453,7 +453,7 @@ func (s *offerStorage) nextListener() *offerListener {
|
||||
// notify listeners if we find an acceptable offer for them. listeners
|
||||
// are garbage collected after a certain age (see offerListenerMaxAge).
|
||||
// ids lists offer IDs that are retrievable from offer storage.
|
||||
func (s *offerStorage) notifyListeners(ids func() (util.StringSet, uint64)) {
|
||||
func (s *offerStorage) notifyListeners(ids func() (sets.String, uint64)) {
|
||||
listener := s.nextListener() // blocking
|
||||
|
||||
offerIds, version := ids()
|
||||
@@ -493,8 +493,8 @@ func (s *offerStorage) Init(done <-chan struct{}) {
|
||||
|
||||
// cached offer ids for the purposes of listener notification
|
||||
idCache := &stringsCache{
|
||||
refill: func() util.StringSet {
|
||||
result := util.NewStringSet()
|
||||
refill: func() sets.String {
|
||||
result := sets.NewString()
|
||||
for _, v := range s.offers.List() {
|
||||
if offer, ok := v.(Perishable); ok {
|
||||
result.Insert(offer.Id())
|
||||
@@ -510,14 +510,14 @@ func (s *offerStorage) Init(done <-chan struct{}) {
|
||||
|
||||
type stringsCache struct {
|
||||
expiresAt time.Time
|
||||
cached util.StringSet
|
||||
cached sets.String
|
||||
ttl time.Duration
|
||||
refill func() util.StringSet
|
||||
refill func() sets.String
|
||||
version uint64
|
||||
}
|
||||
|
||||
// not thread-safe
|
||||
func (c *stringsCache) Strings() (util.StringSet, uint64) {
|
||||
func (c *stringsCache) Strings() (sets.String, uint64) {
|
||||
now := time.Now()
|
||||
if c.expiresAt.Before(now) {
|
||||
old := c.cached
|
||||
@@ -549,8 +549,8 @@ func (self *slaveStorage) add(slaveId, offerId string) {
|
||||
}
|
||||
|
||||
// delete the slave-offer mappings for slaveId, returns the IDs of the offers that were unmapped
|
||||
func (self *slaveStorage) deleteSlave(slaveId string) util.StringSet {
|
||||
offerIds := util.NewStringSet()
|
||||
func (self *slaveStorage) deleteSlave(slaveId string) sets.String {
|
||||
offerIds := sets.NewString()
|
||||
self.Lock()
|
||||
defer self.Unlock()
|
||||
for oid, sid := range self.index {
|
||||
|
||||
@@ -21,7 +21,7 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/util"
|
||||
"k8s.io/kubernetes/pkg/util/sets"
|
||||
)
|
||||
|
||||
type qitem struct {
|
||||
@@ -277,13 +277,13 @@ func (f *DelayFIFO) List() []UniqueID {
|
||||
return list
|
||||
}
|
||||
|
||||
// ContainedIDs returns a util.StringSet containing all IDs of the stored items.
|
||||
// ContainedIDs returns a stringset.StringSet containing all IDs of the stored items.
|
||||
// This is a snapshot of a moment in time, and one should keep in mind that
|
||||
// other go routines can add or remove items after you call this.
|
||||
func (c *DelayFIFO) ContainedIDs() util.StringSet {
|
||||
func (c *DelayFIFO) ContainedIDs() sets.String {
|
||||
c.rlock()
|
||||
defer c.runlock()
|
||||
set := util.StringSet{}
|
||||
set := sets.String{}
|
||||
for id := range c.items {
|
||||
set.Insert(id)
|
||||
}
|
||||
|
||||
@@ -22,7 +22,7 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/util"
|
||||
"k8s.io/kubernetes/pkg/util/sets"
|
||||
)
|
||||
|
||||
type entry struct {
|
||||
@@ -177,13 +177,13 @@ func (f *HistoricalFIFO) ListKeys() []string {
|
||||
return list
|
||||
}
|
||||
|
||||
// ContainedIDs returns a util.StringSet containing all IDs of the stored items.
|
||||
// ContainedIDs returns a stringset.StringSet containing all IDs of the stored items.
|
||||
// This is a snapshot of a moment in time, and one should keep in mind that
|
||||
// other go routines can add or remove items after you call this.
|
||||
func (c *HistoricalFIFO) ContainedIDs() util.StringSet {
|
||||
func (c *HistoricalFIFO) ContainedIDs() sets.String {
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
set := util.StringSet{}
|
||||
set := sets.String{}
|
||||
for id, entry := range c.items {
|
||||
if entry.Is(DELETE_EVENT | POP_EVENT) {
|
||||
continue
|
||||
|
||||
@@ -47,7 +47,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/kubelet/container"
|
||||
"k8s.io/kubernetes/pkg/labels"
|
||||
"k8s.io/kubernetes/pkg/tools"
|
||||
"k8s.io/kubernetes/pkg/util"
|
||||
"k8s.io/kubernetes/pkg/util/sets"
|
||||
)
|
||||
|
||||
type Slave struct {
|
||||
@@ -711,7 +711,7 @@ func (k *KubernetesScheduler) explicitlyReconcileTasks(driver bindings.Scheduler
|
||||
|
||||
// tell mesos to send us the latest status updates for all the non-terminal tasks that we know about
|
||||
statusList := []*mesos.TaskStatus{}
|
||||
remaining := util.KeySet(reflect.ValueOf(taskToSlave))
|
||||
remaining := sets.KeySet(reflect.ValueOf(taskToSlave))
|
||||
for taskId, slaveId := range taskToSlave {
|
||||
if slaveId == "" {
|
||||
delete(taskToSlave, taskId)
|
||||
|
||||
@@ -34,6 +34,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/labels"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
"k8s.io/kubernetes/pkg/util"
|
||||
"k8s.io/kubernetes/pkg/util/sets"
|
||||
"k8s.io/kubernetes/pkg/util/workqueue"
|
||||
"k8s.io/kubernetes/pkg/watch"
|
||||
|
||||
@@ -132,8 +133,8 @@ func (e *endpointController) Run(workers int, stopCh <-chan struct{}) {
|
||||
e.queue.ShutDown()
|
||||
}
|
||||
|
||||
func (e *endpointController) getPodServiceMemberships(pod *api.Pod) (util.StringSet, error) {
|
||||
set := util.StringSet{}
|
||||
func (e *endpointController) getPodServiceMemberships(pod *api.Pod) (sets.String, error) {
|
||||
set := sets.String{}
|
||||
services, err := e.serviceStore.GetPodServices(pod)
|
||||
if err != nil {
|
||||
// don't log this error because this function makes pointless
|
||||
|
||||
@@ -98,7 +98,7 @@ Use the file [`namespace-dev.json`](namespace-dev.json) which describes a develo
|
||||
}
|
||||
```
|
||||
|
||||
[Download example](namespace-dev.json)
|
||||
[Download example](namespace-dev.json?raw=true)
|
||||
<!-- END MUNGE: EXAMPLE namespace-dev.json -->
|
||||
|
||||
Create the development namespace using kubectl.
|
||||
|
||||
@@ -50,6 +50,7 @@ Code conventions
|
||||
- so pkg/controllers/autoscaler/foo.go should say `package autoscaler` not `package autoscalercontroller`.
|
||||
- Unless there's a good reason, the `package foo` line should match the name of the directory in which the .go file exists.
|
||||
- Importers can use a different name if they need to disambiguate.
|
||||
- Locks should be called `lock` and should never be embedded (always `lock sync.Mutex`). When multiple locks are present, give each lock a distinct name following Go conventions - `stateLock`, `mapLock` etc.
|
||||
- API conventions
|
||||
- [API changes](api_changes.md)
|
||||
- [API conventions](api-conventions.md)
|
||||
|
||||
@@ -96,7 +96,7 @@ git push -f origin myfeature
|
||||
|
||||
### Creating a pull request
|
||||
|
||||
1. Visit http://github.com/$YOUR_GITHUB_USERNAME/kubernetes
|
||||
1. Visit https://github.com/$YOUR_GITHUB_USERNAME/kubernetes
|
||||
2. Click the "Compare and pull request" button next to your "myfeature" branch.
|
||||
3. Check out the pull request [process](pull-requests.md) for more details
|
||||
|
||||
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 88 KiB After Width: | Height: | Size: 112 KiB |
@@ -76,7 +76,7 @@ using [cluster/aws/config-default.sh](http://releases.k8s.io/HEAD/cluster/aws/co
|
||||
|
||||
This process takes about 5 to 10 minutes. Once the cluster is up, the IP addresses of your master and node(s) will be printed,
|
||||
as well as information about the default services running in the cluster (monitoring, logging, dns). User credentials and security
|
||||
tokens are written in `~/.kube/kubeconfig`, they will be necessary to use the CLI or the HTTP Basic Auth.
|
||||
tokens are written in `~/.kube/config`, they will be necessary to use the CLI or the HTTP Basic Auth.
|
||||
|
||||
By default, the script will provision a new VPC and a 4 node k8s cluster in us-west-2a (Oregon) with `t2.micro` instances running on Ubuntu.
|
||||
You can override the variables defined in [config-default.sh](http://releases.k8s.io/HEAD/cluster/aws/config-default.sh) to change this behavior as follows:
|
||||
|
||||
@@ -43,7 +43,7 @@ Getting started on Microsoft Azure
|
||||
|
||||
## Prerequisites
|
||||
|
||||
** Azure Prerequisites**
|
||||
**Azure Prerequisites**
|
||||
|
||||
1. You need an Azure account. Visit http://azure.microsoft.com/ to get started.
|
||||
2. Install and configure the Azure cross-platform command-line interface. http://azure.microsoft.com/en-us/documentation/articles/xplat-cli/
|
||||
|
||||
@@ -62,7 +62,7 @@ fed-node = 192.168.121.65
|
||||
**Prepare the hosts:**
|
||||
|
||||
* Install Kubernetes on all hosts - fed-{master,node}. This will also pull in docker. Also install etcd on fed-master. This guide has been tested with kubernetes-0.18 and beyond.
|
||||
* The [--enablerepo=update-testing](https://fedoraproject.org/wiki/QA:Updates_Testing) directive in the yum command below will ensure that the most recent Kubernetes version that is scheduled for pre-release will be installed. This should be a more recent version than the Fedora "stable" release for Kubernetes that you would get without adding the directive.
|
||||
* The [--enablerepo=updates-testing](https://fedoraproject.org/wiki/QA:Updates_Testing) directive in the yum command below will ensure that the most recent Kubernetes version that is scheduled for pre-release will be installed. This should be a more recent version than the Fedora "stable" release for Kubernetes that you would get without adding the directive.
|
||||
* If you want the very latest Kubernetes release [you can download and yum install the RPM directly from Fedora Koji](http://koji.fedoraproject.org/koji/packageinfo?packageID=19202) instead of using the yum install command below.
|
||||
|
||||
```sh
|
||||
|
||||
@@ -73,7 +73,7 @@ spec:
|
||||
'for ((i = 0; ; i++)); do echo "$i: $(date)"; sleep 1; done']
|
||||
```
|
||||
|
||||
[Download example](../../examples/blog-logging/counter-pod.yaml)
|
||||
[Download example](../../examples/blog-logging/counter-pod.yaml?raw=true)
|
||||
<!-- END MUNGE: EXAMPLE ../../examples/blog-logging/counter-pod.yaml -->
|
||||
|
||||
This pod specification has one container which runs a bash script when the container is born. This script simply writes out the value of a counter and the date once per second and runs indefinitely. Let’s create the pod in the default
|
||||
@@ -182,6 +182,7 @@ spec:
|
||||
mountPath: /varlog
|
||||
- name: containers
|
||||
mountPath: /var/lib/docker/containers
|
||||
readOnly: true
|
||||
terminationGracePeriodSeconds: 30
|
||||
volumes:
|
||||
- name: varlog
|
||||
@@ -192,7 +193,7 @@ spec:
|
||||
path: /var/lib/docker/containers
|
||||
```
|
||||
|
||||
[Download example](../../cluster/saltbase/salt/fluentd-gcp/fluentd-gcp.yaml)
|
||||
[Download example](../../cluster/saltbase/salt/fluentd-gcp/fluentd-gcp.yaml?raw=true)
|
||||
<!-- END MUNGE: EXAMPLE ../../cluster/saltbase/salt/fluentd-gcp/fluentd-gcp.yaml -->
|
||||
|
||||
This pod specification maps the directory on the host containing the Docker log files, `/var/lib/docker/containers`, to a directory inside the container which has the same path. The pod runs one image, `gcr.io/google_containers/fluentd-gcp:1.6`, which is configured to collect the Docker log files from the logs directory and ingest them into Google Cloud Logging. One instance of this pod runs on each node of the cluster. Kubernetes will notice if this pod fails and automatically restart it.
|
||||
|
||||
@@ -30,7 +30,7 @@ exists, it will output details for every resource that has a name prefixed with
|
||||
Possible resource types include (case insensitive): pods (po), services (svc),
|
||||
replicationcontrollers (rc), nodes (no), events (ev), limitranges (limits),
|
||||
persistentvolumes (pv), persistentvolumeclaims (pvc), resourcequotas (quota),
|
||||
namespaces (ns) or secrets.
|
||||
namespaces (ns), serviceaccounts or secrets.
|
||||
|
||||
|
||||
.SH OPTIONS
|
||||
|
||||
@@ -19,7 +19,7 @@ Display one or many resources.
|
||||
Possible resource types include (case insensitive): pods (po), services (svc),
|
||||
replicationcontrollers (rc), nodes (no), events (ev), componentstatuses (cs),
|
||||
limitranges (limits), persistentvolumes (pv), persistentvolumeclaims (pvc),
|
||||
resourcequotas (quota), namespaces (ns), endpoints (ep) or secrets.
|
||||
resourcequotas (quota), namespaces (ns), endpoints (ep), serviceaccounts or secrets.
|
||||
|
||||
.PP
|
||||
By specifying the output as 'template' and providing a Go template as the value
|
||||
|
||||
BIN
docs/proposals/Kubemark_architecture.png
Normal file
BIN
docs/proposals/Kubemark_architecture.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 30 KiB |
152
docs/proposals/api-group.md
Normal file
152
docs/proposals/api-group.md
Normal file
@@ -0,0 +1,152 @@
|
||||
<!-- BEGIN MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
<!-- BEGIN STRIP_FOR_RELEASE -->
|
||||
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
|
||||
<h2>PLEASE NOTE: This document applies to the HEAD of the source tree</h2>
|
||||
|
||||
If you are using a released version of Kubernetes, you should
|
||||
refer to the docs that go with that version.
|
||||
|
||||
<strong>
|
||||
The latest 1.0.x release of this document can be found
|
||||
[here](http://releases.k8s.io/release-1.0/docs/proposals/api-group.md).
|
||||
|
||||
Documentation for other releases can be found at
|
||||
[releases.k8s.io](http://releases.k8s.io).
|
||||
</strong>
|
||||
--
|
||||
|
||||
<!-- END STRIP_FOR_RELEASE -->
|
||||
|
||||
<!-- END MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
# Supporting multiple API groups
|
||||
|
||||
## Goal
|
||||
|
||||
1. Breaking the monolithic v1 API into modular groups and allowing groups to be enabled/disabled individually. This allows us to break the monolithic API server to smaller components in the future.
|
||||
|
||||
2. Supporting different versions in different groups. This allows different groups to evolve at different speed.
|
||||
|
||||
3. Supporting identically named kinds to exist in different groups. This is useful when we experiment new features of an API in the experimental group while supporting the stable API in the original group at the same time.
|
||||
|
||||
4. Exposing the API groups and versions supported by the server. This is required to develop a dynamic client.
|
||||
|
||||
5. Laying the basis for [API Plugin](../../docs/design/extending-api.md).
|
||||
|
||||
6. Keeping the user interaction easy. For example, we should allow users to omit group name when using kubectl if there is no ambiguity.
|
||||
|
||||
|
||||
## Bookkeeping for groups
|
||||
|
||||
1. No changes to TypeMeta:
|
||||
|
||||
Currently many internal structures, such as RESTMapper and Scheme, are indexed and retrieved by APIVersion. For a fast implementation targeting the v1.1 deadline, we will concatenate group with version, in the form of "group/version", and use it where a version string is expected, so that many code can be reused. This implies we will not add a new field to TypeMeta, we will use TypeMeta.APIVersion to hold "group/version".
|
||||
|
||||
For backward compatibility, v1 objects belong to the group with an empty name, so existing v1 config files will remain valid.
|
||||
|
||||
2. /pkg/conversion#Scheme:
|
||||
|
||||
The key of /pkg/conversion#Scheme.versionMap for versioned types will be "group/version". For now, the internal version types of all groups will be registered to versionMap[""], as we don't have any identically named kinds in different groups yet. In the near future, internal version types will be registered to versionMap["group/"], and pkg/conversion#Scheme.InternalVersion will have type []string.
|
||||
|
||||
We will need a mechanism to express if two kinds in different groups (e.g., compute/pods and experimental/pods) are convertible, and auto-generate the conversions if they are.
|
||||
|
||||
3. meta.RESTMapper:
|
||||
|
||||
Each group will have its own RESTMapper (of type DefaultRESTMapper), and these mappers will be registered to pkg/api#RESTMapper (of type MultiRESTMapper).
|
||||
|
||||
To support identically named kinds in different groups, We need to expand the input of RESTMapper.VersionAndKindForResource from (resource string) to (group, resource string). If group is not specified and there is ambiguity (i.e., the resource exists in multiple groups), an error should be returned to force the user to specify the group.
|
||||
|
||||
## Server-side implementation
|
||||
|
||||
1. resource handlers' URL:
|
||||
|
||||
We will force the URL to be in the form of prefix/group/version/...
|
||||
|
||||
Prefix is used to differentiate API paths from other paths like /healthz. All groups will use the same prefix="apis", except when backward compatibility requires otherwise. No "/" is allowed in prefix, group, or version. Specifically,
|
||||
|
||||
* for /api/v1, we set the prefix="api" (which is populated from cmd/kube-apiserver/app#APIServer.APIPrefix), group="", version="v1", so the URL remains to be /api/v1.
|
||||
|
||||
* for new kube API groups, we will set the prefix="apis" (we will add a field in type APIServer to hold this prefix), group=GROUP_NAME, version=VERSION. For example, the URL of the experimental resources will be /apis/experimental/v1alpha1.
|
||||
|
||||
* for OpenShift v1 API, because it's currently registered at /oapi/v1, to be backward compatible, OpenShift may set prefix="oapi", group="".
|
||||
|
||||
* for other new third-party API, they should also use the prefix="apis" and choose the group and version. This can be done through the thirdparty API plugin mechanism in [13000](http://pr.k8s.io/13000).
|
||||
|
||||
2. supporting API discovery:
|
||||
|
||||
* At /prefix (e.g., /apis), API server will return the supported groups and their versions using pkg/api/unversioned#APIVersions type, setting the Versions field to "group/version". This is backward compatible, because currently API server does return "v1" encoded in pkg/api/unversioned#APIVersions at /api. (We will also rename the JSON field name from `versions` to `apiVersions`, to be consistent with pkg/api#TypeMeta.APIVersion field)
|
||||
|
||||
* At /prefix/group, API server will return all supported versions of the group. We will create a new type VersionList (name is open to discussion) in pkg/api/unversioned as the API.
|
||||
|
||||
* At /prefix/group/version, API server will return all supported resources in this group, and whether each resource is namespaced. We will create a new type APIResourceList (name is open to discussion) in pkg/api/unversioned as the API.
|
||||
|
||||
We will design how to handle deeper path in other proposals.
|
||||
|
||||
* At /swaggerapi/swagger-version/prefix/group/version, API server will return the Swagger spec of that group/version in `swagger-version` (e.g. we may support both Swagger v1.2 and v2.0).
|
||||
|
||||
3. handling common API objects:
|
||||
|
||||
* top-level common API objects:
|
||||
|
||||
To handle the top-level API objects that are used by all groups, we either have to register them to all schemes, or we can choose not to encode them to a version. We plan to take the latter approach and place such types in a new package called `unversioned`, because many of the common top-level objects, such as APIVersions, VersionList, and APIResourceList, which are used in the API discovery, and pkg/api#Status, are part of the protocol between client and server, and do not belong to the domain-specific parts of the API, which will evolve independently over time.
|
||||
|
||||
Types in the unversioned package will not have the APIVersion field, but may retain the Kind field.
|
||||
|
||||
For backward compatibility, when hanlding the Status, the server will encode it to v1 if the client expects the Status to be encoded in v1, otherwise the server will send the unversioned#Status. If an error occurs before the version can be determined, the server will send the unversioned#Status.
|
||||
|
||||
* non-top-level common API objects:
|
||||
|
||||
Assuming object o belonging to group X is used as a field in an object belonging to group Y, currently genconversion will generate the conversion functions for o in package Y. Hence, we don't need any special treatment for non-top-level common API objects.
|
||||
|
||||
TypeMeta is an exception, because it is a common object that is used by objects in all groups but does not logically belong to any group. We plan to move it to the package `unversioned`.
|
||||
|
||||
## Client-side implementation
|
||||
|
||||
1. clients:
|
||||
|
||||
Currently we have structured (pkg/client/unversioned#ExperimentalClient, pkg/client/unversioned#Client) and unstructured (pkg/kubectl/resource#Helper) clients. The structured clients are not scalable because each of them implements specific interface, e.g., [here](../../pkg/client/unversioned/client.go#L32). Only the unstructured clients are scalable. We should either auto-generate the code for structured clients or migrate to use the unstructured clients as much as possible.
|
||||
|
||||
We should also move the unstructured client to pkg/client/.
|
||||
|
||||
2. Spelling the URL:
|
||||
|
||||
The URL is in the form of prefix/group/version/. The prefix is hard-coded in the client/unversioned.Config (see [here](../../pkg/client/unversioned/experimental.go#L101)). The client should be able to figure out `group` and `version` using the RESTMapper. For a third-party client which does not have access to the RESTMapper, it should discover the mapping of `group`, `version` and `kind` by querying the server as described in point 2 of #server-side-implementation.
|
||||
|
||||
3. kubectl:
|
||||
|
||||
kubectl should accept arguments like `group/resource`, `group/resource/name`. Nevertheless, the user can omit the `group`, then kubectl shall rely on RESTMapper.VersionAndKindForResource() to figure out the default group/version of the resource. For example, for resources (like `node`) that exist in both k8s v1 API and k8s modularized API (like `infra/v2`), we should set kubectl default to use one of them. If there is no default group, kubectl should return an error for the ambiguity.
|
||||
|
||||
When kubectl is used with a single resource type, the --api-version and --output-version flag of kubectl should accept values in the form of `group/version`, and they should work as they do today. For multi-resource operations, we will disable these two flags initially.
|
||||
|
||||
Currently, by setting pkg/client/unversioned/clientcmd/api/v1#Config.NamedCluster[x].Cluster.APIVersion ([here](../../pkg/client/unversioned/clientcmd/api/v1/types.go#L58)), user can configure the default apiVersion used by kubectl to talk to server. It does not make sense to set a global version used by kubectl when there are multiple groups, so we plan to deprecate this field. We may extend the version negotiation function to negotiate the preferred version of each group. Details will be in another proposal.
|
||||
|
||||
## OpenShift integration
|
||||
|
||||
OpenShift can take a similar approach to break monolithic v1 API: keeping the v1 where they are, and gradually adding groups.
|
||||
|
||||
For the v1 objects in OpenShift, they should keep doing what they do now: they should remain registered to Scheme.versionMap["v1"] scheme, they should keep being added to originMapper.
|
||||
|
||||
For new OpenShift groups, they should do the same as native Kubernetes groups would do: each group should register to Scheme.versionMap["group/version"], each should has separate RESTMapper and the register the MultiRESTMapper.
|
||||
|
||||
To expose a list of the supported Openshift groups to clients, OpenShift just has to call to pkg/cmd/server/origin#call initAPIVersionRoute() as it does now, passing in the supported "group/versions" instead of "versions".
|
||||
|
||||
|
||||
## Future work
|
||||
|
||||
1. Dependencies between groups: we need an interface to register the dependencies between groups. It is not our priority now as the use cases are not clear yet.
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
<!-- END MUNGE: GENERATED_ANALYTICS -->
|
||||
190
docs/proposals/kubemark.md
Normal file
190
docs/proposals/kubemark.md
Normal file
@@ -0,0 +1,190 @@
|
||||
<!-- BEGIN MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
<!-- BEGIN STRIP_FOR_RELEASE -->
|
||||
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
|
||||
<h2>PLEASE NOTE: This document applies to the HEAD of the source tree</h2>
|
||||
|
||||
If you are using a released version of Kubernetes, you should
|
||||
refer to the docs that go with that version.
|
||||
|
||||
<strong>
|
||||
The latest 1.0.x release of this document can be found
|
||||
[here](http://releases.k8s.io/release-1.0/docs/proposals/kubemark.md).
|
||||
|
||||
Documentation for other releases can be found at
|
||||
[releases.k8s.io](http://releases.k8s.io).
|
||||
</strong>
|
||||
--
|
||||
|
||||
<!-- END STRIP_FOR_RELEASE -->
|
||||
|
||||
<!-- END MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
# Kubemark proposal
|
||||
|
||||
## Goal of this document
|
||||
|
||||
This document describes a design of Kubemark - a system that allows performance testing of a Kubernetes cluster. It describes the
|
||||
assumption, high level design and discusses possible solutions for lower-level problems. It is supposed to be a starting point for more
|
||||
detailed discussion.
|
||||
|
||||
## Current state and objective
|
||||
|
||||
Currently performance testing happens on ‘live’ clusters of up to 100 Nodes. It takes quite a while to start such cluster or to push
|
||||
updates to all Nodes, and it uses quite a lot of resources. At this scale the amount of wasted time and used resources is still acceptable.
|
||||
In the next quarter or two we’re targeting 1000 Node cluster, which will push it way beyond ‘acceptable’ level. Additionally we want to
|
||||
enable people without many resources to run scalability tests on bigger clusters than they can afford at given time. Having an ability to
|
||||
cheaply run scalability tests will enable us to run some set of them on "normal" test clusters, which in turn would mean ability to run
|
||||
them on every PR.
|
||||
|
||||
This means that we need a system that will allow for realistic performance testing on (much) smaller number of “real” machines. First
|
||||
assumption we make is that Nodes are independent, i.e. number of existing Nodes do not impact performance of a single Node. This is not
|
||||
entirely true, as number of Nodes can increase latency of various components on Master machine, which in turn may increase latency of Node
|
||||
operations, but we’re not interested in measuring this effect here. Instead we want to measure how number of Nodes and the load imposed by
|
||||
Node daemons affects the performance of Master components.
|
||||
|
||||
## Kubemark architecture overview
|
||||
|
||||
The high-level idea behind Kubemark is to write library that allows running artificial "Hollow" Nodes that will be able to simulate a
|
||||
behavior of real Kubelet and KubeProxy in a single, lightweight binary. Hollow components will need to correctly respond to Controllers
|
||||
(via API server), and preferably, in the fullness of time, be able to ‘replay’ previously recorded real traffic (this is out of scope for
|
||||
initial version). To teach Hollow components replaying recorded traffic they will need to store data specifying when given Pod/Container
|
||||
should die (e.g. observed lifetime). Such data can be extracted e.g. from etcd Raft logs, or it can be reconstructed from Events. In the
|
||||
initial version we only want them to be able to fool Master components and put some configurable (in what way TBD) load on them.
|
||||
|
||||
When we have Hollow Node ready, we’ll be able to test performance of Master Components by creating a real Master Node, with API server,
|
||||
Controllers, etcd and whatnot, and create number of Hollow Nodes that will register to the running Master.
|
||||
|
||||
To make Kubemark easier to maintain when system evolves Hollow components will reuse real "production" code for Kubelet and KubeProxy, but
|
||||
will mock all the backends with no-op or very simple mocks. We believe that this approach is better in the long run than writing special
|
||||
"performance-test-aimed" separate version of them. This may take more time to create an initial version, but we think maintenance cost will
|
||||
be noticeably smaller.
|
||||
|
||||
### Option 1
|
||||
|
||||
For the initial version we will teach Master components to use port number to identify Kubelet/KubeProxy. This will allow running those
|
||||
components on non-default ports, and in the same time will allow to run multiple Hollow Nodes on a single machine. During setup we will
|
||||
generate credentials for cluster communication and pass them to HollowKubelet/HollowProxy to use. Master will treat all HollowNodes as
|
||||
normal ones.
|
||||
|
||||

|
||||
*Kubmark architecture diagram for option 1*
|
||||
|
||||
### Option 2
|
||||
|
||||
As a second (equivalent) option we will run Kubemark on top of 'real' Kubernetes cluster, where both Master and Hollow Nodes will be Pods.
|
||||
In this option we'll be able to use Kubernetes mechanisms to streamline setup, e.g. by using Kubernetes networking to ensure unique IPs for
|
||||
Hollow Nodes, or using Secrets to distribute Kubelet credentials. The downside of this configuration is that it's likely that some noise
|
||||
will appear in Kubemark results from either CPU/Memory pressure from other things running on Nodes (e.g. FluentD, or Kubelet) or running
|
||||
cluster over an overlay network. We believe that it'll be possible to turn off cluster monitoring for Kubemark runs, so that the impact
|
||||
of real Node daemons will be minimized, but we don't know what will be the impact of using higher level networking stack. Running a
|
||||
comparison will be an interesting test in itself.
|
||||
|
||||
### Discussion
|
||||
|
||||
Before taking a closer look at steps necessary to set up a minimal Hollow cluster it's hard to tell which approach will be simpler. It's
|
||||
quite possible that the initial version will end up as hybrid between running the Hollow cluster directly on top of VMs and running the
|
||||
Hollow cluster on top of a Kubernetes cluster that is running on top of VMs. E.g. running Nodes as Pods in Kubernetes cluster and Master
|
||||
directly on top of VM.
|
||||
|
||||
## Things to simulate
|
||||
|
||||
In real Kubernetes on a single Node we run two daemons that communicate with Master in some way: Kubelet and KubeProxy.
|
||||
|
||||
### KubeProxy
|
||||
|
||||
As a replacement for KubeProxy we'll use HollowProxy, which will be a real KubeProxy with injected no-op mocks everywhere it makes sense.
|
||||
|
||||
### Kubelet
|
||||
|
||||
As a replacement for Kubelet we'll use HollowKubelet, which will be a real Kubelet with injected no-op or simple mocks everywhere it makes
|
||||
sense.
|
||||
|
||||
Kubelet also exposes cadvisor endpoint which is scraped by Heapster, healthz to be read by supervisord, and we have FluentD running as a
|
||||
Pod on each Node that exports logs to Elasticsearch (or Google Cloud Logging). Both Heapster and Elasticsearch are running in Pods in the
|
||||
cluster so do not add any load on a Master components by themselves. There can be other systems that scrape Heapster through proxy running
|
||||
on Master, which adds additional load, but they're not the part of default setup, so in the first version we won't simulate this behavior.
|
||||
|
||||
In the first version we’ll assume that all started Pods will run indefinitely if not explicitly deleted. In the future we can add a model
|
||||
of short-running batch jobs, but in the initial version we’ll assume only serving-like Pods.
|
||||
|
||||
### Heapster
|
||||
|
||||
In addition to system components we run Heapster as a part of cluster monitoring setup. Heapster currently watches Events, Pods and Nodes
|
||||
through the API server. In the test setup we can use real heapster for watching API server, with mocked out piece that scrapes cAdvisor
|
||||
data from Kubelets.
|
||||
|
||||
### Elasticsearch and Fluentd
|
||||
|
||||
Similarly to Heapster Elasticsearch runs outside the Master machine but generates some traffic on it. Fluentd “daemon” running on Master
|
||||
periodically sends Docker logs it gathered to the Elasticsearch running on one of the Nodes. In the initial version we omit Elasticsearch,
|
||||
as it produces only a constant small load on Master Node that does not change with the size of the cluster.
|
||||
|
||||
## Necessary work
|
||||
|
||||
There are three more or less independent things that needs to be worked on:
|
||||
- HollowNode implementation, creating a library/binary that will be able to listen to Watches and respond in a correct fashion with Status
|
||||
updates. This also involves creation of a CloudProvider that can produce such Hollow Nodes, or making sure that HollowNodes can correctly
|
||||
self-register in no-provider Master.
|
||||
- Kubemark setup, including figuring networking model, number of Hollow Nodes that will be allowed to run on a single “machine”, writing
|
||||
setup/run/teardown scripts (in [option 1](#option-1)), or figuring out how to run Master and Hollow Nodes on top of Kubernetes
|
||||
(in [option 2](#option-2))
|
||||
- Creating a Player component that will send requests to the API server putting a load on a cluster. This involves creating a way to
|
||||
specify desired workload. This task is
|
||||
very well isolated from the rest, as it is about sending requests to the real API server. Because of that we can discuss requirements
|
||||
separately.
|
||||
|
||||
## Concerns
|
||||
|
||||
Network performance most likely won't be a problem for the initial version if running on directly on VMs rather than on top of a Kubernetes
|
||||
cluster, as Kubemark will be running on standard networking stack (no cloud-provider software routes, or overlay network is needed, as we
|
||||
don't need custom routing between Pods). Similarly we don't think that running Kubemark on Kubernetes virtualized cluster networking will
|
||||
cause noticeable performance impact, but it requires testing.
|
||||
|
||||
On the other hand when adding additional features it may turn out that we need to simulate Kubernetes Pod network. In such, when running
|
||||
'pure' Kubemark we may try one of the following:
|
||||
- running overlay network like Flannel or OVS instead of using cloud providers routes,
|
||||
- write simple network multiplexer to multiplex communications from the Hollow Kubelets/KubeProxies on the machine.
|
||||
|
||||
In case of Kubemark on Kubernetes it may turn that we run into a problem with adding yet another layer of network virtualization, but we
|
||||
don't need to solve this problem now.
|
||||
|
||||
## Work plan
|
||||
|
||||
- Teach/make sure that Master can talk to multiple Kubelets on the same Machine [option 1](#option-1):
|
||||
- make sure that Master can talk to a Kubelet on non-default port,
|
||||
- make sure that Master can talk to all Kubelets on different ports,
|
||||
- Write HollowNode library:
|
||||
- new HollowProxy,
|
||||
- new HollowKubelet,
|
||||
- new HollowNode combining the two,
|
||||
- make sure that Master can talk to two HollowKubelets running on the same machine
|
||||
- Make sure that we can run Hollow cluster on top of Kubernetes [option 2](#option-2)
|
||||
- Write a player that will automatically put some predefined load on Master, <- this is the moment when it’s possible to play with it and is useful by itself for
|
||||
scalability tests. Alternatively we can just use current density/load tests,
|
||||
- Benchmark our machines - see how many Watch clients we can have before everything explodes,
|
||||
- See how many HollowNodes we can run on a single machine by attaching them to the real master <- this is the moment it starts to useful
|
||||
- Update kube-up/kube-down scripts to enable creating “HollowClusters”/write a new scripts/something, integrate HollowCluster with a Elasticsearch/Heapster equivalents,
|
||||
- Allow passing custom configuration to the Player
|
||||
|
||||
## Future work
|
||||
|
||||
In the future we want to add following capabilities to the Kubemark system:
|
||||
- replaying real traffic reconstructed from the recorded Events stream,
|
||||
- simulating scraping things running on Nodes through Master proxy.
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
<!-- END MUNGE: GENERATED_ANALYTICS -->
|
||||
@@ -108,7 +108,7 @@ spec:
|
||||
restartPolicy: Never
|
||||
```
|
||||
|
||||
[Download example](downward-api/dapi-pod.yaml)
|
||||
[Download example](downward-api/dapi-pod.yaml?raw=true)
|
||||
<!-- END MUNGE: EXAMPLE downward-api/dapi-pod.yaml -->
|
||||
|
||||
|
||||
@@ -178,7 +178,7 @@ spec:
|
||||
fieldPath: metadata.annotations
|
||||
```
|
||||
|
||||
[Download example](downward-api/volume/dapi-volume.yaml)
|
||||
[Download example](downward-api/volume/dapi-volume.yaml?raw=true)
|
||||
<!-- END MUNGE: EXAMPLE downward-api/volume/dapi-volume.yaml -->
|
||||
|
||||
Some more thorough examples:
|
||||
|
||||
@@ -100,7 +100,7 @@ kubectl
|
||||
* [kubectl stop](kubectl_stop.md) - Deprecated: Gracefully shut down a resource by name or filename.
|
||||
* [kubectl version](kubectl_version.md) - Print the client and server version information.
|
||||
|
||||
###### Auto generated by spf13/cobra at 2015-09-03 21:06:22.476725335 +0000 UTC
|
||||
###### Auto generated by spf13/cobra at 2015-09-10 18:53:03.165115265 +0000 UTC
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
|
||||
@@ -119,7 +119,7 @@ $ kubectl annotate pods foo description-
|
||||
|
||||
* [kubectl](kubectl.md) - kubectl controls the Kubernetes cluster manager
|
||||
|
||||
###### Auto generated by spf13/cobra at 2015-09-02 06:24:17.720533039 +0000 UTC
|
||||
###### Auto generated by spf13/cobra at 2015-09-10 18:53:03.16095949 +0000 UTC
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
|
||||
@@ -76,7 +76,7 @@ kubectl api-versions
|
||||
|
||||
* [kubectl](kubectl.md) - kubectl controls the Kubernetes cluster manager
|
||||
|
||||
###### Auto generated by spf13/cobra at 2015-09-03 21:06:22.476265479 +0000 UTC
|
||||
###### Auto generated by spf13/cobra at 2015-09-10 18:53:03.164255617 +0000 UTC
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
|
||||
@@ -98,7 +98,7 @@ $ kubectl attach 123456-7890 -c ruby-container -i -t
|
||||
|
||||
* [kubectl](kubectl.md) - kubectl controls the Kubernetes cluster manager
|
||||
|
||||
###### Auto generated by spf13/cobra at 2015-09-03 21:06:22.471309711 +0000 UTC
|
||||
###### Auto generated by spf13/cobra at 2015-09-10 18:53:03.155651469 +0000 UTC
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
|
||||
@@ -76,7 +76,7 @@ kubectl cluster-info
|
||||
|
||||
* [kubectl](kubectl.md) - kubectl controls the Kubernetes cluster manager
|
||||
|
||||
###### Auto generated by spf13/cobra at 2015-09-03 21:06:22.476078738 +0000 UTC
|
||||
###### Auto generated by spf13/cobra at 2015-09-10 18:53:03.163962347 +0000 UTC
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
|
||||
@@ -94,7 +94,7 @@ kubectl config SUBCOMMAND
|
||||
* [kubectl config use-context](kubectl_config_use-context.md) - Sets the current-context in a kubeconfig file
|
||||
* [kubectl config view](kubectl_config_view.md) - displays Merged kubeconfig settings or a specified kubeconfig file.
|
||||
|
||||
###### Auto generated by spf13/cobra at 2015-09-03 21:06:22.475888484 +0000 UTC
|
||||
###### Auto generated by spf13/cobra at 2015-09-10 18:53:03.163685546 +0000 UTC
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
|
||||
@@ -96,7 +96,7 @@ $ kubectl config set-cluster e2e --insecure-skip-tls-verify=true
|
||||
|
||||
* [kubectl config](kubectl_config.md) - config modifies kubeconfig files
|
||||
|
||||
###### Auto generated by spf13/cobra at 2015-09-03 21:06:22.474677631 +0000 UTC
|
||||
###### Auto generated by spf13/cobra at 2015-09-10 18:53:03.161700827 +0000 UTC
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
|
||||
@@ -89,7 +89,7 @@ $ kubectl config set-context gce --user=cluster-admin
|
||||
|
||||
* [kubectl config](kubectl_config.md) - config modifies kubeconfig files
|
||||
|
||||
###### Auto generated by spf13/cobra at 2015-09-03 21:06:22.475093212 +0000 UTC
|
||||
###### Auto generated by spf13/cobra at 2015-09-10 18:53:03.162402642 +0000 UTC
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
|
||||
@@ -109,7 +109,7 @@ $ kubectl config set-credentials cluster-admin --client-certificate=~/.kube/admi
|
||||
|
||||
* [kubectl config](kubectl_config.md) - config modifies kubeconfig files
|
||||
|
||||
###### Auto generated by spf13/cobra at 2015-09-03 21:06:22.474882527 +0000 UTC
|
||||
###### Auto generated by spf13/cobra at 2015-09-10 18:53:03.162045132 +0000 UTC
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
|
||||
@@ -78,7 +78,7 @@ kubectl config set PROPERTY_NAME PROPERTY_VALUE
|
||||
|
||||
* [kubectl config](kubectl_config.md) - config modifies kubeconfig files
|
||||
|
||||
###### Auto generated by spf13/cobra at 2015-09-03 21:06:22.475281504 +0000 UTC
|
||||
###### Auto generated by spf13/cobra at 2015-09-10 18:53:03.162716308 +0000 UTC
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
|
||||
@@ -77,7 +77,7 @@ kubectl config unset PROPERTY_NAME
|
||||
|
||||
* [kubectl config](kubectl_config.md) - config modifies kubeconfig files
|
||||
|
||||
###### Auto generated by spf13/cobra at 2015-09-03 21:06:22.475473658 +0000 UTC
|
||||
###### Auto generated by spf13/cobra at 2015-09-10 18:53:03.163015642 +0000 UTC
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
|
||||
@@ -76,7 +76,7 @@ kubectl config use-context CONTEXT_NAME
|
||||
|
||||
* [kubectl config](kubectl_config.md) - config modifies kubeconfig files
|
||||
|
||||
###### Auto generated by spf13/cobra at 2015-09-03 21:06:22.475674294 +0000 UTC
|
||||
###### Auto generated by spf13/cobra at 2015-09-10 18:53:03.163336177 +0000 UTC
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
|
||||
@@ -103,7 +103,7 @@ $ kubectl config view -o template --template='{{range .users}}{{ if eq .name "e2
|
||||
|
||||
* [kubectl config](kubectl_config.md) - config modifies kubeconfig files
|
||||
|
||||
###### Auto generated by spf13/cobra at 2015-08-29 13:01:26.775349034 +0000 UTC
|
||||
###### Auto generated by spf13/cobra at 2015-09-10 18:53:03.161359997 +0000 UTC
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
|
||||
@@ -96,7 +96,7 @@ $ cat pod.json | kubectl create -f -
|
||||
|
||||
* [kubectl](kubectl.md) - kubectl controls the Kubernetes cluster manager
|
||||
|
||||
###### Auto generated by spf13/cobra at 2015-09-03 21:06:22.469492371 +0000 UTC
|
||||
###### Auto generated by spf13/cobra at 2015-09-10 18:53:03.152429973 +0000 UTC
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
|
||||
@@ -119,7 +119,7 @@ $ kubectl delete pods --all
|
||||
|
||||
* [kubectl](kubectl.md) - kubectl controls the Kubernetes cluster manager
|
||||
|
||||
###### Auto generated by spf13/cobra at 2015-09-03 21:06:22.470182255 +0000 UTC
|
||||
###### Auto generated by spf13/cobra at 2015-09-10 18:53:03.153952299 +0000 UTC
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
|
||||
@@ -51,7 +51,7 @@ exists, it will output details for every resource that has a name prefixed with
|
||||
Possible resource types include (case insensitive): pods (po), services (svc),
|
||||
replicationcontrollers (rc), nodes (no), events (ev), limitranges (limits),
|
||||
persistentvolumes (pv), persistentvolumeclaims (pvc), resourcequotas (quota),
|
||||
namespaces (ns) or secrets.
|
||||
namespaces (ns), serviceaccounts or secrets.
|
||||
|
||||
```
|
||||
kubectl describe (-f FILENAME | TYPE [NAME_PREFIX | -l label] | TYPE/NAME)
|
||||
@@ -119,7 +119,7 @@ $ kubectl describe pods frontend
|
||||
|
||||
* [kubectl](kubectl.md) - kubectl controls the Kubernetes cluster manager
|
||||
|
||||
###### Auto generated by spf13/cobra at 2015-09-03 21:06:22.469291072 +0000 UTC
|
||||
###### Auto generated by spf13/cobra at 2015-09-10 18:53:03.152057668 +0000 UTC
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
|
||||
@@ -99,7 +99,7 @@ $ kubectl exec 123456-7890 -c ruby-container -i -t -- bash -il
|
||||
|
||||
* [kubectl](kubectl.md) - kubectl controls the Kubernetes cluster manager
|
||||
|
||||
###### Auto generated by spf13/cobra at 2015-09-03 21:06:22.471517301 +0000 UTC
|
||||
###### Auto generated by spf13/cobra at 2015-09-10 18:53:03.156052759 +0000 UTC
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
|
||||
@@ -121,7 +121,7 @@ $ kubectl expose rc streamer --port=4100 --protocol=udp --name=video-stream
|
||||
|
||||
* [kubectl](kubectl.md) - kubectl controls the Kubernetes cluster manager
|
||||
|
||||
###### Auto generated by spf13/cobra at 2015-09-03 09:05:42.928698484 +0000 UTC
|
||||
###### Auto generated by spf13/cobra at 2015-09-10 18:53:03.159044239 +0000 UTC
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
|
||||
@@ -43,7 +43,7 @@ Display one or many resources.
|
||||
Possible resource types include (case insensitive): pods (po), services (svc),
|
||||
replicationcontrollers (rc), nodes (no), events (ev), componentstatuses (cs),
|
||||
limitranges (limits), persistentvolumes (pv), persistentvolumeclaims (pvc),
|
||||
resourcequotas (quota), namespaces (ns), endpoints (ep) or secrets.
|
||||
resourcequotas (quota), namespaces (ns), endpoints (ep), serviceaccounts or secrets.
|
||||
|
||||
By specifying the output as 'template' and providing a Go template as the value
|
||||
of the --template flag, you can filter the attributes of the fetched resource(s).
|
||||
@@ -132,7 +132,7 @@ $ kubectl get rc/web service/frontend pods/web-pod-13je7
|
||||
|
||||
* [kubectl](kubectl.md) - kubectl controls the Kubernetes cluster manager
|
||||
|
||||
###### Auto generated by spf13/cobra at 2015-08-29 13:01:26.761418557 +0000 UTC
|
||||
###### Auto generated by spf13/cobra at 2015-09-10 18:53:03.151532564 +0000 UTC
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
|
||||
@@ -120,7 +120,7 @@ $ kubectl label pods foo bar-
|
||||
|
||||
* [kubectl](kubectl.md) - kubectl controls the Kubernetes cluster manager
|
||||
|
||||
###### Auto generated by spf13/cobra at 2015-08-29 13:01:26.773776248 +0000 UTC
|
||||
###### Auto generated by spf13/cobra at 2015-09-10 18:53:03.160594172 +0000 UTC
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
|
||||
@@ -98,7 +98,7 @@ $ kubectl logs -f 123456-7890 ruby-container
|
||||
|
||||
* [kubectl](kubectl.md) - kubectl controls the Kubernetes cluster manager
|
||||
|
||||
###### Auto generated by spf13/cobra at 2015-09-03 21:06:22.470591683 +0000 UTC
|
||||
###### Auto generated by spf13/cobra at 2015-09-10 18:53:03.154570214 +0000 UTC
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
|
||||
@@ -79,7 +79,7 @@ kubectl namespace [namespace]
|
||||
|
||||
* [kubectl](kubectl.md) - kubectl controls the Kubernetes cluster manager
|
||||
|
||||
###### Auto generated by spf13/cobra at 2015-09-03 21:06:22.470380367 +0000 UTC
|
||||
###### Auto generated by spf13/cobra at 2015-09-10 18:53:03.154262869 +0000 UTC
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
|
||||
@@ -102,7 +102,7 @@ kubectl patch pod valid-pod -p '{"spec":{"containers":[{"name":"kubernetes-serve
|
||||
|
||||
* [kubectl](kubectl.md) - kubectl controls the Kubernetes cluster manager
|
||||
|
||||
###### Auto generated by spf13/cobra at 2015-09-03 21:06:22.469927571 +0000 UTC
|
||||
###### Auto generated by spf13/cobra at 2015-09-10 18:53:03.153568922 +0000 UTC
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
|
||||
@@ -99,7 +99,7 @@ $ kubectl port-forward mypod 0:5000
|
||||
|
||||
* [kubectl](kubectl.md) - kubectl controls the Kubernetes cluster manager
|
||||
|
||||
###### Auto generated by spf13/cobra at 2015-09-03 21:06:22.471732563 +0000 UTC
|
||||
###### Auto generated by spf13/cobra at 2015-09-10 18:53:03.156433376 +0000 UTC
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
|
||||
@@ -121,7 +121,7 @@ $ kubectl proxy --api-prefix=/k8s-api
|
||||
|
||||
* [kubectl](kubectl.md) - kubectl controls the Kubernetes cluster manager
|
||||
|
||||
###### Auto generated by spf13/cobra at 2015-09-03 21:06:22.472010935 +0000 UTC
|
||||
###### Auto generated by spf13/cobra at 2015-09-10 18:53:03.156927042 +0000 UTC
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
|
||||
@@ -110,7 +110,7 @@ kubectl replace --force -f ./pod.json
|
||||
|
||||
* [kubectl](kubectl.md) - kubectl controls the Kubernetes cluster manager
|
||||
|
||||
###### Auto generated by spf13/cobra at 2015-09-03 21:06:22.469727962 +0000 UTC
|
||||
###### Auto generated by spf13/cobra at 2015-09-10 18:53:03.153166598 +0000 UTC
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
|
||||
@@ -118,7 +118,7 @@ $ kubectl rolling-update frontend --image=image:v2
|
||||
|
||||
* [kubectl](kubectl.md) - kubectl controls the Kubernetes cluster manager
|
||||
|
||||
###### Auto generated by spf13/cobra at 2015-08-29 13:01:26.768458355 +0000 UTC
|
||||
###### Auto generated by spf13/cobra at 2015-09-10 18:53:03.154895732 +0000 UTC
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
|
||||
@@ -133,7 +133,7 @@ $ kubectl run nginx --image=nginx --command -- <cmd> <arg1> ... <argN>
|
||||
|
||||
* [kubectl](kubectl.md) - kubectl controls the Kubernetes cluster manager
|
||||
|
||||
###### Auto generated by spf13/cobra at 2015-09-07 06:40:12.142439604 +0000 UTC
|
||||
###### Auto generated by spf13/cobra at 2015-09-10 18:53:03.15783835 +0000 UTC
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
|
||||
@@ -108,7 +108,7 @@ $ kubectl scale --replicas=5 rc/foo rc/bar
|
||||
|
||||
* [kubectl](kubectl.md) - kubectl controls the Kubernetes cluster manager
|
||||
|
||||
###### Auto generated by spf13/cobra at 2015-09-03 21:06:22.471116954 +0000 UTC
|
||||
###### Auto generated by spf13/cobra at 2015-09-10 18:53:03.155304524 +0000 UTC
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
|
||||
@@ -110,7 +110,7 @@ $ kubectl stop -f path/to/resources
|
||||
|
||||
* [kubectl](kubectl.md) - kubectl controls the Kubernetes cluster manager
|
||||
|
||||
###### Auto generated by spf13/cobra at 2015-09-03 21:06:22.47250815 +0000 UTC
|
||||
###### Auto generated by spf13/cobra at 2015-09-10 18:53:03.158360787 +0000 UTC
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
|
||||
@@ -82,7 +82,7 @@ kubectl version
|
||||
|
||||
* [kubectl](kubectl.md) - kubectl controls the Kubernetes cluster manager
|
||||
|
||||
###### Auto generated by spf13/cobra at 2015-09-03 21:06:22.476464324 +0000 UTC
|
||||
###### Auto generated by spf13/cobra at 2015-09-10 18:53:03.164581808 +0000 UTC
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user