mirror of
				https://github.com/optim-enterprises-bv/kubernetes.git
				synced 2025-11-03 19:58:17 +00:00 
			
		
		
		
	Merge pull request #91980 from rosti/kubeadm-cc-manual-upgrade
kubeadm upgrade: Allow supplying hand migrated component configs
This commit is contained in:
		@@ -254,7 +254,7 @@ func getInternalCfg(cfgPath string, kubeconfigPath string, cfg kubeadmapiv1beta2
 | 
			
		||||
	if cfgPath == "" {
 | 
			
		||||
		client, err := kubeconfigutil.ClientSetFromFile(kubeconfigPath)
 | 
			
		||||
		if err == nil {
 | 
			
		||||
			internalcfg, err := configutil.FetchInitConfigurationFromCluster(client, out, logPrefix, false)
 | 
			
		||||
			internalcfg, err := configutil.FetchInitConfigurationFromCluster(client, out, logPrefix, false, false)
 | 
			
		||||
			if err == nil {
 | 
			
		||||
				fmt.Println() // add empty line to separate the FetchInitConfigurationFromCluster output from the command output
 | 
			
		||||
				return internalcfg, nil
 | 
			
		||||
 
 | 
			
		||||
@@ -545,7 +545,7 @@ func fetchInitConfiguration(tlsBootstrapCfg *clientcmdapi.Config) (*kubeadmapi.I
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Fetches the init configuration
 | 
			
		||||
	initConfiguration, err := configutil.FetchInitConfigurationFromCluster(tlsClient, os.Stdout, "preflight", true)
 | 
			
		||||
	initConfiguration, err := configutil.FetchInitConfigurationFromCluster(tlsClient, os.Stdout, "preflight", true, false)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return nil, errors.Wrap(err, "unable to fetch the kubeadm-config ConfigMap")
 | 
			
		||||
	}
 | 
			
		||||
 
 | 
			
		||||
@@ -94,7 +94,7 @@ func newResetData(cmd *cobra.Command, options *resetOptions, in io.Reader, out i
 | 
			
		||||
	client, err := getClientset(options.kubeconfigPath, false)
 | 
			
		||||
	if err == nil {
 | 
			
		||||
		klog.V(1).Infof("[reset] Loaded client set from kubeconfig file: %s", options.kubeconfigPath)
 | 
			
		||||
		cfg, err = configutil.FetchInitConfigurationFromCluster(client, out, "reset", false)
 | 
			
		||||
		cfg, err = configutil.FetchInitConfigurationFromCluster(client, out, "reset", false, false)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			klog.Warningf("[reset] Unable to fetch the kubeadm-config ConfigMap from cluster: %v", err)
 | 
			
		||||
		}
 | 
			
		||||
 
 | 
			
		||||
@@ -20,6 +20,7 @@ go_library(
 | 
			
		||||
        "//cmd/kubeadm/app/cmd/phases/upgrade/node:go_default_library",
 | 
			
		||||
        "//cmd/kubeadm/app/cmd/phases/workflow:go_default_library",
 | 
			
		||||
        "//cmd/kubeadm/app/cmd/util:go_default_library",
 | 
			
		||||
        "//cmd/kubeadm/app/componentconfigs:go_default_library",
 | 
			
		||||
        "//cmd/kubeadm/app/constants:go_default_library",
 | 
			
		||||
        "//cmd/kubeadm/app/features:go_default_library",
 | 
			
		||||
        "//cmd/kubeadm/app/phases/controlplane:go_default_library",
 | 
			
		||||
 
 | 
			
		||||
@@ -73,12 +73,7 @@ func NewCmdApply(apf *applyPlanFlags) *cobra.Command {
 | 
			
		||||
		DisableFlagsInUseLine: true,
 | 
			
		||||
		Short:                 "Upgrade your Kubernetes cluster to the specified version",
 | 
			
		||||
		RunE: func(cmd *cobra.Command, args []string) error {
 | 
			
		||||
			userVersion, err := getK8sVersionFromUserInput(flags.applyPlanFlags, args, true)
 | 
			
		||||
			if err != nil {
 | 
			
		||||
				return err
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			return runApply(flags, userVersion)
 | 
			
		||||
			return runApply(flags, args)
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
@@ -110,12 +105,12 @@ func NewCmdApply(apf *applyPlanFlags) *cobra.Command {
 | 
			
		||||
//   - Creating the RBAC rules for the bootstrap tokens and the cluster-info ConfigMap
 | 
			
		||||
//   - Applying new kube-dns and kube-proxy manifests
 | 
			
		||||
//   - Uploads the newly used configuration to the cluster ConfigMap
 | 
			
		||||
func runApply(flags *applyFlags, userVersion string) error {
 | 
			
		||||
func runApply(flags *applyFlags, args []string) error {
 | 
			
		||||
 | 
			
		||||
	// Start with the basics, verify that the cluster is healthy and get the configuration from the cluster (using the ConfigMap)
 | 
			
		||||
	klog.V(1).Infoln("[upgrade/apply] verifying health of cluster")
 | 
			
		||||
	klog.V(1).Infoln("[upgrade/apply] retrieving configuration from cluster")
 | 
			
		||||
	client, versionGetter, cfg, err := enforceRequirements(flags.applyPlanFlags, flags.dryRun, userVersion)
 | 
			
		||||
	client, versionGetter, cfg, err := enforceRequirements(flags.applyPlanFlags, args, flags.dryRun, true)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return err
 | 
			
		||||
	}
 | 
			
		||||
 
 | 
			
		||||
@@ -21,6 +21,7 @@ import (
 | 
			
		||||
	"bytes"
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"io"
 | 
			
		||||
	"io/ioutil"
 | 
			
		||||
	"os"
 | 
			
		||||
	"strings"
 | 
			
		||||
	"time"
 | 
			
		||||
@@ -36,47 +37,91 @@ import (
 | 
			
		||||
	kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
 | 
			
		||||
	"k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/validation"
 | 
			
		||||
	cmdutil "k8s.io/kubernetes/cmd/kubeadm/app/cmd/util"
 | 
			
		||||
	"k8s.io/kubernetes/cmd/kubeadm/app/componentconfigs"
 | 
			
		||||
	"k8s.io/kubernetes/cmd/kubeadm/app/constants"
 | 
			
		||||
	"k8s.io/kubernetes/cmd/kubeadm/app/features"
 | 
			
		||||
	"k8s.io/kubernetes/cmd/kubeadm/app/phases/upgrade"
 | 
			
		||||
	"k8s.io/kubernetes/cmd/kubeadm/app/preflight"
 | 
			
		||||
	kubeadmutil "k8s.io/kubernetes/cmd/kubeadm/app/util"
 | 
			
		||||
	"k8s.io/kubernetes/cmd/kubeadm/app/util/apiclient"
 | 
			
		||||
	configutil "k8s.io/kubernetes/cmd/kubeadm/app/util/config"
 | 
			
		||||
	dryrunutil "k8s.io/kubernetes/cmd/kubeadm/app/util/dryrun"
 | 
			
		||||
	kubeconfigutil "k8s.io/kubernetes/cmd/kubeadm/app/util/kubeconfig"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
func getK8sVersionFromUserInput(flags *applyPlanFlags, args []string, versionIsMandatory bool) (string, error) {
 | 
			
		||||
	var userVersion string
 | 
			
		||||
// isKubeadmConfigPresent checks if a kubeadm config type is found in the provided document map
 | 
			
		||||
func isKubeadmConfigPresent(docmap kubeadmapi.DocumentMap) bool {
 | 
			
		||||
	for gvk := range docmap {
 | 
			
		||||
		if gvk.Group == kubeadmapi.GroupName {
 | 
			
		||||
			return true
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	return false
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
	// If the version is specified in config file, pick up that value.
 | 
			
		||||
	if flags.cfgPath != "" {
 | 
			
		||||
		// Note that cfg isn't preserved here, it's just an one-off to populate userVersion based on --config
 | 
			
		||||
		cfg, err := configutil.LoadInitConfigurationFromFile(flags.cfgPath)
 | 
			
		||||
// loadConfig loads configuration from a file and/or the cluster. InitConfiguration, ClusterConfiguration and (optionally) component configs
 | 
			
		||||
// are loaded. This function allows the component configs to be loaded from a file that contains only them. If the file contains any kubeadm types
 | 
			
		||||
// in it (API group "kubeadm.kubernetes.io" present), then the supplied file is treaded as a legacy reconfiguration style "--config" use and the
 | 
			
		||||
// returned bool value is set to true (the only case to be done so).
 | 
			
		||||
func loadConfig(cfgPath string, client clientset.Interface, skipComponentConfigs bool) (*kubeadmapi.InitConfiguration, bool, error) {
 | 
			
		||||
	// Used for info logs here
 | 
			
		||||
	const logPrefix = "upgrade/config"
 | 
			
		||||
 | 
			
		||||
	// The usual case here is to not have a config file, but rather load the config from the cluster.
 | 
			
		||||
	// This is probably 90% of the time. So we handle it first.
 | 
			
		||||
	if cfgPath == "" {
 | 
			
		||||
		cfg, err := configutil.FetchInitConfigurationFromCluster(client, os.Stdout, logPrefix, false, skipComponentConfigs)
 | 
			
		||||
		return cfg, false, err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Otherwise, we have a config file. Let's load it.
 | 
			
		||||
	configBytes, err := ioutil.ReadFile(cfgPath)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
			return "", err
 | 
			
		||||
		return nil, false, errors.Wrapf(err, "unable to load config from file %q", cfgPath)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
		userVersion = cfg.KubernetesVersion
 | 
			
		||||
	// Split the YAML documents in the file into a DocumentMap
 | 
			
		||||
	docmap, err := kubeadmutil.SplitYAMLDocuments(configBytes)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return nil, false, err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// the version arg is mandatory unless version is specified in the config file
 | 
			
		||||
	if versionIsMandatory && userVersion == "" {
 | 
			
		||||
		if err := cmdutil.ValidateExactArgNumber(args, []string{"version"}); err != nil {
 | 
			
		||||
			return "", err
 | 
			
		||||
	// If there are kubeadm types (API group kubeadm.kubernetes.io) present, we need to keep the existing behavior
 | 
			
		||||
	// here. Basically, we have to load all of the configs from the file and none from the cluster. Configs that are
 | 
			
		||||
	// missing from the file will be automatically regenerated by kubeadm even if they are present in the cluster.
 | 
			
		||||
	// The resulting configs overwrite the existing cluster ones at the end of a successful upgrade apply operation.
 | 
			
		||||
	if isKubeadmConfigPresent(docmap) {
 | 
			
		||||
		klog.Warning("WARNING: Usage of the --config flag with kubeadm config types for reconfiguring the cluster during upgrade is not recommended!")
 | 
			
		||||
		cfg, err := configutil.BytesToInitConfiguration(configBytes)
 | 
			
		||||
		return cfg, true, err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// If no kubeadm config types are present, we assume that there are manually upgraded component configs in the file.
 | 
			
		||||
	// Hence, we load the kubeadm types from the cluster.
 | 
			
		||||
	initCfg, err := configutil.FetchInitConfigurationFromCluster(client, os.Stdout, logPrefix, false, true)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return nil, false, err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Stop here if the caller does not want us to load the component configs
 | 
			
		||||
	if !skipComponentConfigs {
 | 
			
		||||
		// Load the component configs with upgrades
 | 
			
		||||
		if err := componentconfigs.FetchFromClusterWithLocalOverwrites(&initCfg.ClusterConfiguration, client, docmap); err != nil {
 | 
			
		||||
			return nil, false, err
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		// Now default and validate the configs
 | 
			
		||||
		componentconfigs.Default(&initCfg.ClusterConfiguration, &initCfg.LocalAPIEndpoint, &initCfg.NodeRegistration)
 | 
			
		||||
		if errs := componentconfigs.Validate(&initCfg.ClusterConfiguration); len(errs) != 0 {
 | 
			
		||||
			return nil, false, errs.ToAggregate()
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// If option was specified in both args and config file, args will overwrite the config file.
 | 
			
		||||
	if len(args) == 1 {
 | 
			
		||||
		userVersion = args[0]
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return userVersion, nil
 | 
			
		||||
	return initCfg, false, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// enforceRequirements verifies that it's okay to upgrade and then returns the variables needed for the rest of the procedure
 | 
			
		||||
func enforceRequirements(flags *applyPlanFlags, dryRun bool, newK8sVersion string) (clientset.Interface, upgrade.VersionGetter, *kubeadmapi.InitConfiguration, error) {
 | 
			
		||||
func enforceRequirements(flags *applyPlanFlags, args []string, dryRun bool, upgradeApply bool) (clientset.Interface, upgrade.VersionGetter, *kubeadmapi.InitConfiguration, error) {
 | 
			
		||||
	client, err := getClient(flags.kubeConfigPath, dryRun)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return nil, nil, nil, errors.Wrapf(err, "couldn't create a Kubernetes client from file %q", flags.kubeConfigPath)
 | 
			
		||||
@@ -90,14 +135,8 @@ func enforceRequirements(flags *applyPlanFlags, dryRun bool, newK8sVersion strin
 | 
			
		||||
	// Fetch the configuration from a file or ConfigMap and validate it
 | 
			
		||||
	fmt.Println("[upgrade/config] Making sure the configuration is correct:")
 | 
			
		||||
 | 
			
		||||
	var cfg *kubeadmapi.InitConfiguration
 | 
			
		||||
	if flags.cfgPath != "" {
 | 
			
		||||
		klog.Warning("WARNING: Usage of the --config flag for reconfiguring the cluster during upgrade is not recommended!")
 | 
			
		||||
		cfg, err = configutil.LoadInitConfigurationFromFile(flags.cfgPath)
 | 
			
		||||
	} else {
 | 
			
		||||
		cfg, err = configutil.FetchInitConfigurationFromCluster(client, os.Stdout, "upgrade/config", false)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	var newK8sVersion string
 | 
			
		||||
	cfg, legacyReconfigure, err := loadConfig(flags.cfgPath, client, !upgradeApply)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		if apierrors.IsNotFound(err) {
 | 
			
		||||
			fmt.Printf("[upgrade/config] In order to upgrade, a ConfigMap called %q in the %s namespace must exist.\n", constants.KubeadmConfigConfigMap, metav1.NamespaceSystem)
 | 
			
		||||
@@ -111,6 +150,11 @@ func enforceRequirements(flags *applyPlanFlags, dryRun bool, newK8sVersion strin
 | 
			
		||||
			err = errors.Errorf("the ConfigMap %q in the %s namespace used for getting configuration information was not found", constants.KubeadmConfigConfigMap, metav1.NamespaceSystem)
 | 
			
		||||
		}
 | 
			
		||||
		return nil, nil, nil, errors.Wrap(err, "[upgrade/config] FATAL")
 | 
			
		||||
	} else if legacyReconfigure {
 | 
			
		||||
		// Set the newK8sVersion to the value in the ClusterConfiguration. This is done, so that users who use the --config option
 | 
			
		||||
		// to supply a new ClusterConfiguration don't have to specify the Kubernetes version twice,
 | 
			
		||||
		// if they don't want to upgrade but just change a setting.
 | 
			
		||||
		newK8sVersion = cfg.KubernetesVersion
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	ignorePreflightErrorsSet, err := validation.ValidateIgnorePreflightErrors(flags.ignorePreflightErrors, cfg.NodeRegistration.IgnorePreflightErrors)
 | 
			
		||||
@@ -131,8 +175,16 @@ func enforceRequirements(flags *applyPlanFlags, dryRun bool, newK8sVersion strin
 | 
			
		||||
		return nil, nil, nil, errors.Wrap(err, "[upgrade/health] FATAL")
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// If a new k8s version should be set, apply the change before printing the config
 | 
			
		||||
	if len(newK8sVersion) != 0 {
 | 
			
		||||
	// The version arg is mandatory, during upgrade apply, unless it's specified in the config file
 | 
			
		||||
	if upgradeApply && newK8sVersion == "" {
 | 
			
		||||
		if err := cmdutil.ValidateExactArgNumber(args, []string{"version"}); err != nil {
 | 
			
		||||
			return nil, nil, nil, err
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// If option was specified in both args and config file, args will overwrite the config file.
 | 
			
		||||
	if len(args) == 1 {
 | 
			
		||||
		newK8sVersion = args[0]
 | 
			
		||||
		cfg.KubernetesVersion = newK8sVersion
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -18,98 +18,11 @@ package upgrade
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"bytes"
 | 
			
		||||
	"io/ioutil"
 | 
			
		||||
	"os"
 | 
			
		||||
	"testing"
 | 
			
		||||
 | 
			
		||||
	kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
 | 
			
		||||
	"k8s.io/kubernetes/cmd/kubeadm/app/constants"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
func TestGetK8sVersionFromUserInput(t *testing.T) {
 | 
			
		||||
	currentVersion := "v" + constants.CurrentKubernetesVersion.String()
 | 
			
		||||
	validConfig := "apiVersion: kubeadm.k8s.io/v1beta2\n" +
 | 
			
		||||
		"kind: ClusterConfiguration\n" +
 | 
			
		||||
		"kubernetesVersion: " + currentVersion
 | 
			
		||||
 | 
			
		||||
	var tcases = []struct {
 | 
			
		||||
		name               string
 | 
			
		||||
		isVersionMandatory bool
 | 
			
		||||
		clusterConfig      string
 | 
			
		||||
		args               []string
 | 
			
		||||
		expectedErr        bool
 | 
			
		||||
		expectedVersion    string
 | 
			
		||||
	}{
 | 
			
		||||
		{
 | 
			
		||||
			name:               "No config and version as an argument",
 | 
			
		||||
			isVersionMandatory: true,
 | 
			
		||||
			args:               []string{"v1.13.1"},
 | 
			
		||||
			expectedVersion:    "v1.13.1",
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			name:               "Neither config nor version specified",
 | 
			
		||||
			isVersionMandatory: true,
 | 
			
		||||
			expectedErr:        true,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			name:               "No config and empty version as an argument",
 | 
			
		||||
			isVersionMandatory: true,
 | 
			
		||||
			args:               []string{""},
 | 
			
		||||
			expectedErr:        true,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			name:               "Valid config, but no version specified",
 | 
			
		||||
			isVersionMandatory: true,
 | 
			
		||||
			clusterConfig:      validConfig,
 | 
			
		||||
			expectedVersion:    currentVersion,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			name:               "Valid config and different version specified",
 | 
			
		||||
			isVersionMandatory: true,
 | 
			
		||||
			clusterConfig:      validConfig,
 | 
			
		||||
			args:               []string{"v1.13.1"},
 | 
			
		||||
			expectedVersion:    "v1.13.1",
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			name: "Version is optional",
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
	for _, tt := range tcases {
 | 
			
		||||
		t.Run(tt.name, func(t *testing.T) {
 | 
			
		||||
			flags := &applyPlanFlags{}
 | 
			
		||||
			if len(tt.clusterConfig) > 0 {
 | 
			
		||||
				file, err := ioutil.TempFile("", "kubeadm-upgrade-common-test-*.yaml")
 | 
			
		||||
				if err != nil {
 | 
			
		||||
					t.Fatalf("Failed to create test config file: %+v", err)
 | 
			
		||||
				}
 | 
			
		||||
 | 
			
		||||
				tmpFileName := file.Name()
 | 
			
		||||
				defer os.Remove(tmpFileName)
 | 
			
		||||
 | 
			
		||||
				_, err = file.WriteString(tt.clusterConfig)
 | 
			
		||||
				file.Close()
 | 
			
		||||
				if err != nil {
 | 
			
		||||
					t.Fatalf("Failed to write test config file contents: %+v", err)
 | 
			
		||||
				}
 | 
			
		||||
 | 
			
		||||
				flags.cfgPath = tmpFileName
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			userVersion, err := getK8sVersionFromUserInput(flags, tt.args, tt.isVersionMandatory)
 | 
			
		||||
 | 
			
		||||
			if err == nil && tt.expectedErr {
 | 
			
		||||
				t.Error("Expected error, but got success")
 | 
			
		||||
			}
 | 
			
		||||
			if err != nil && !tt.expectedErr {
 | 
			
		||||
				t.Errorf("Unexpected error: %+v", err)
 | 
			
		||||
			}
 | 
			
		||||
			if userVersion != tt.expectedVersion {
 | 
			
		||||
				t.Errorf("Expected %q, but got %q", tt.expectedVersion, userVersion)
 | 
			
		||||
			}
 | 
			
		||||
		})
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestEnforceRequirements(t *testing.T) {
 | 
			
		||||
	tcases := []struct {
 | 
			
		||||
		name          string
 | 
			
		||||
@@ -139,7 +52,7 @@ func TestEnforceRequirements(t *testing.T) {
 | 
			
		||||
	}
 | 
			
		||||
	for _, tt := range tcases {
 | 
			
		||||
		t.Run(tt.name, func(t *testing.T) {
 | 
			
		||||
			_, _, _, err := enforceRequirements(&tt.flags, tt.dryRun, tt.newK8sVersion)
 | 
			
		||||
			_, _, _, err := enforceRequirements(&tt.flags, nil, tt.dryRun, false)
 | 
			
		||||
 | 
			
		||||
			if err == nil && tt.expectedErr {
 | 
			
		||||
				t.Error("Expected error, but got success")
 | 
			
		||||
 
 | 
			
		||||
@@ -91,7 +91,7 @@ func runDiff(flags *diffFlags, args []string) error {
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			return errors.Wrapf(err, "couldn't create a Kubernetes client from file %q", flags.kubeConfigPath)
 | 
			
		||||
		}
 | 
			
		||||
		cfg, err = configutil.FetchInitConfigurationFromCluster(client, flags.out, "upgrade/diff", false)
 | 
			
		||||
		cfg, err = configutil.FetchInitConfigurationFromCluster(client, flags.out, "upgrade/diff", false, false)
 | 
			
		||||
	}
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return err
 | 
			
		||||
 
 | 
			
		||||
@@ -141,7 +141,7 @@ func newNodeData(cmd *cobra.Command, args []string, options *nodeOptions) (*node
 | 
			
		||||
	// Fetches the cluster configuration
 | 
			
		||||
	// NB in case of control-plane node, we are reading all the info for the node; in case of NOT control-plane node
 | 
			
		||||
	//    (worker node), we are not reading local API address and the CRI socket from the node object
 | 
			
		||||
	cfg, err := configutil.FetchInitConfigurationFromCluster(client, os.Stdout, "upgrade", !isControlPlaneNode)
 | 
			
		||||
	cfg, err := configutil.FetchInitConfigurationFromCluster(client, os.Stdout, "upgrade", !isControlPlaneNode, false)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return nil, errors.Wrap(err, "unable to fetch the kubeadm-config ConfigMap")
 | 
			
		||||
	}
 | 
			
		||||
 
 | 
			
		||||
@@ -48,12 +48,7 @@ func NewCmdPlan(apf *applyPlanFlags) *cobra.Command {
 | 
			
		||||
		Use:   "plan [version] [flags]",
 | 
			
		||||
		Short: "Check which versions are available to upgrade to and validate whether your current cluster is upgradeable. To skip the internet check, pass in the optional [version] parameter",
 | 
			
		||||
		RunE: func(_ *cobra.Command, args []string) error {
 | 
			
		||||
			userVersion, err := getK8sVersionFromUserInput(flags.applyPlanFlags, args, false)
 | 
			
		||||
			if err != nil {
 | 
			
		||||
				return err
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			return runPlan(flags, userVersion)
 | 
			
		||||
			return runPlan(flags, args)
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
@@ -63,11 +58,11 @@ func NewCmdPlan(apf *applyPlanFlags) *cobra.Command {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// runPlan takes care of outputting available versions to upgrade to for the user
 | 
			
		||||
func runPlan(flags *planFlags, userVersion string) error {
 | 
			
		||||
func runPlan(flags *planFlags, args []string) error {
 | 
			
		||||
	// Start with the basics, verify that the cluster is healthy, build a client and a versionGetter. Never dry-run when planning.
 | 
			
		||||
	klog.V(1).Infoln("[upgrade/plan] verifying health of cluster")
 | 
			
		||||
	klog.V(1).Infoln("[upgrade/plan] retrieving configuration from cluster")
 | 
			
		||||
	client, versionGetter, cfg, err := enforceRequirements(flags.applyPlanFlags, false, userVersion)
 | 
			
		||||
	client, versionGetter, cfg, err := enforceRequirements(flags.applyPlanFlags, args, false, false)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return err
 | 
			
		||||
	}
 | 
			
		||||
 
 | 
			
		||||
@@ -239,6 +239,49 @@ func FetchFromDocumentMap(clusterCfg *kubeadmapi.ClusterConfiguration, docmap ku
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// FetchFromClusterWithLocalOverwrites fetches component configs from a cluster and overwrites them locally with
 | 
			
		||||
// the ones present in the supplied document map. If any UnsupportedConfigVersionError are not handled by the configs
 | 
			
		||||
// in the document map, the function returns them all as a single UnsupportedConfigVersionsErrorMap.
 | 
			
		||||
// This function is normally called only in some specific cases during upgrade.
 | 
			
		||||
func FetchFromClusterWithLocalOverwrites(clusterCfg *kubeadmapi.ClusterConfiguration, client clientset.Interface, docmap kubeadmapi.DocumentMap) error {
 | 
			
		||||
	ensureInitializedComponentConfigs(clusterCfg)
 | 
			
		||||
 | 
			
		||||
	oldVersionErrs := UnsupportedConfigVersionsErrorMap{}
 | 
			
		||||
 | 
			
		||||
	for _, handler := range known {
 | 
			
		||||
		componentCfg, err := handler.FromCluster(client, clusterCfg)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			if vererr, ok := err.(*UnsupportedConfigVersionError); ok {
 | 
			
		||||
				oldVersionErrs[handler.GroupVersion.Group] = vererr
 | 
			
		||||
			} else {
 | 
			
		||||
				return err
 | 
			
		||||
			}
 | 
			
		||||
		} else if componentCfg != nil {
 | 
			
		||||
			clusterCfg.ComponentConfigs[handler.GroupVersion.Group] = componentCfg
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	for _, handler := range known {
 | 
			
		||||
		componentCfg, err := handler.FromDocumentMap(docmap)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			if vererr, ok := err.(*UnsupportedConfigVersionError); ok {
 | 
			
		||||
				oldVersionErrs[handler.GroupVersion.Group] = vererr
 | 
			
		||||
			} else {
 | 
			
		||||
				return err
 | 
			
		||||
			}
 | 
			
		||||
		} else if componentCfg != nil {
 | 
			
		||||
			clusterCfg.ComponentConfigs[handler.GroupVersion.Group] = componentCfg
 | 
			
		||||
			delete(oldVersionErrs, handler.GroupVersion.Group)
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if len(oldVersionErrs) != 0 {
 | 
			
		||||
		return oldVersionErrs
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Validate is a placeholder for performing a validation on an already loaded component configs in a ClusterConfiguration
 | 
			
		||||
// Currently it prints a warning that no validation was performed
 | 
			
		||||
func Validate(clusterCfg *kubeadmapi.ClusterConfiguration) field.ErrorList {
 | 
			
		||||
 
 | 
			
		||||
@@ -110,3 +110,139 @@ func TestFetchFromDocumentMap(t *testing.T) {
 | 
			
		||||
		t.Fatalf("missmatch between supplied and loaded type numbers:\n\tgot: %d\n\texpected: %d", len(clusterCfg.ComponentConfigs), len(gvkmap))
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func kubeproxyConfigMap(contents string) *v1.ConfigMap {
 | 
			
		||||
	return &v1.ConfigMap{
 | 
			
		||||
		ObjectMeta: metav1.ObjectMeta{
 | 
			
		||||
			Name:      constants.KubeProxyConfigMap,
 | 
			
		||||
			Namespace: metav1.NamespaceSystem,
 | 
			
		||||
		},
 | 
			
		||||
		Data: map[string]string{
 | 
			
		||||
			constants.KubeProxyConfigMapKey: dedent.Dedent(contents),
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestFetchFromClusterWithLocalUpgrades(t *testing.T) {
 | 
			
		||||
	cases := []struct {
 | 
			
		||||
		desc          string
 | 
			
		||||
		obj           runtime.Object
 | 
			
		||||
		config        string
 | 
			
		||||
		expectedValue string
 | 
			
		||||
		expectedErr   bool
 | 
			
		||||
	}{
 | 
			
		||||
		{
 | 
			
		||||
			desc: "reconginzed cluster object without overwrite is used",
 | 
			
		||||
			obj: kubeproxyConfigMap(`
 | 
			
		||||
				apiVersion: kubeproxy.config.k8s.io/v1alpha1
 | 
			
		||||
				kind: KubeProxyConfiguration
 | 
			
		||||
				hostnameOverride: foo
 | 
			
		||||
			`),
 | 
			
		||||
			expectedValue: "foo",
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			desc: "reconginzed cluster object with overwrite is not used",
 | 
			
		||||
			obj: kubeproxyConfigMap(`
 | 
			
		||||
				apiVersion: kubeproxy.config.k8s.io/v1alpha1
 | 
			
		||||
				kind: KubeProxyConfiguration
 | 
			
		||||
				hostnameOverride: foo
 | 
			
		||||
			`),
 | 
			
		||||
			config: dedent.Dedent(`
 | 
			
		||||
				apiVersion: kubeproxy.config.k8s.io/v1alpha1
 | 
			
		||||
				kind: KubeProxyConfiguration
 | 
			
		||||
				hostnameOverride: bar
 | 
			
		||||
			`),
 | 
			
		||||
			expectedValue: "bar",
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			desc: "old config without overwrite returns an error",
 | 
			
		||||
			obj: kubeproxyConfigMap(`
 | 
			
		||||
				apiVersion: kubeproxy.config.k8s.io/v1alpha0
 | 
			
		||||
				kind: KubeProxyConfiguration
 | 
			
		||||
				hostnameOverride: foo
 | 
			
		||||
			`),
 | 
			
		||||
			expectedErr: true,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			desc: "old config with recognized overwrite returns success",
 | 
			
		||||
			obj: kubeproxyConfigMap(`
 | 
			
		||||
				apiVersion: kubeproxy.config.k8s.io/v1alpha0
 | 
			
		||||
				kind: KubeProxyConfiguration
 | 
			
		||||
				hostnameOverride: foo
 | 
			
		||||
			`),
 | 
			
		||||
			config: dedent.Dedent(`
 | 
			
		||||
				apiVersion: kubeproxy.config.k8s.io/v1alpha1
 | 
			
		||||
				kind: KubeProxyConfiguration
 | 
			
		||||
				hostnameOverride: bar
 | 
			
		||||
			`),
 | 
			
		||||
			expectedValue: "bar",
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			desc: "old config with old overwrite returns an error",
 | 
			
		||||
			obj: kubeproxyConfigMap(`
 | 
			
		||||
				apiVersion: kubeproxy.config.k8s.io/v1alpha0
 | 
			
		||||
				kind: KubeProxyConfiguration
 | 
			
		||||
				hostnameOverride: foo
 | 
			
		||||
			`),
 | 
			
		||||
			config: dedent.Dedent(`
 | 
			
		||||
				apiVersion: kubeproxy.config.k8s.io/v1alpha0
 | 
			
		||||
				kind: KubeProxyConfiguration
 | 
			
		||||
				hostnameOverride: bar
 | 
			
		||||
			`),
 | 
			
		||||
			expectedErr: true,
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
	for _, test := range cases {
 | 
			
		||||
		t.Run(test.desc, func(t *testing.T) {
 | 
			
		||||
			clusterCfg := &kubeadmapi.ClusterConfiguration{
 | 
			
		||||
				KubernetesVersion: constants.CurrentKubernetesVersion.String(),
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			k8sVersion := version.MustParseGeneric(clusterCfg.KubernetesVersion)
 | 
			
		||||
 | 
			
		||||
			client := clientsetfake.NewSimpleClientset(
 | 
			
		||||
				test.obj,
 | 
			
		||||
				&v1.ConfigMap{
 | 
			
		||||
					ObjectMeta: metav1.ObjectMeta{
 | 
			
		||||
						Name:      constants.GetKubeletConfigMapName(k8sVersion),
 | 
			
		||||
						Namespace: metav1.NamespaceSystem,
 | 
			
		||||
					},
 | 
			
		||||
					Data: map[string]string{
 | 
			
		||||
						constants.KubeletBaseConfigurationConfigMapKey: dedent.Dedent(`
 | 
			
		||||
							apiVersion: kubelet.config.k8s.io/v1beta1
 | 
			
		||||
							kind: KubeletConfiguration
 | 
			
		||||
						`),
 | 
			
		||||
					},
 | 
			
		||||
				},
 | 
			
		||||
			)
 | 
			
		||||
 | 
			
		||||
			docmap, err := kubeadmutil.SplitYAMLDocuments([]byte(test.config))
 | 
			
		||||
			if err != nil {
 | 
			
		||||
				t.Fatalf("unexpected failure of SplitYAMLDocuments: %v", err)
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			err = FetchFromClusterWithLocalOverwrites(clusterCfg, client, docmap)
 | 
			
		||||
			if err != nil {
 | 
			
		||||
				if !test.expectedErr {
 | 
			
		||||
					t.Errorf("unexpected failure: %v", err)
 | 
			
		||||
				}
 | 
			
		||||
			} else {
 | 
			
		||||
				if test.expectedErr {
 | 
			
		||||
					t.Error("unexpected success")
 | 
			
		||||
				} else {
 | 
			
		||||
					kubeproxyCfg, ok := clusterCfg.ComponentConfigs[KubeProxyGroup]
 | 
			
		||||
					if !ok {
 | 
			
		||||
						t.Error("the config was reported as loaded, but was not in reality")
 | 
			
		||||
					} else {
 | 
			
		||||
						actualConfig, ok := kubeproxyCfg.(*kubeProxyConfig)
 | 
			
		||||
						if !ok {
 | 
			
		||||
							t.Error("the config is not of the expected type")
 | 
			
		||||
						} else if actualConfig.config.HostnameOverride != test.expectedValue {
 | 
			
		||||
							t.Errorf("unexpected value:\n\tgot: %q\n\texpected: %q", actualConfig.config.HostnameOverride, test.expectedValue)
 | 
			
		||||
						}
 | 
			
		||||
					}
 | 
			
		||||
				}
 | 
			
		||||
			}
 | 
			
		||||
		})
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 
 | 
			
		||||
@@ -18,6 +18,8 @@ package componentconfigs
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"sort"
 | 
			
		||||
	"strings"
 | 
			
		||||
 | 
			
		||||
	"k8s.io/apimachinery/pkg/runtime/schema"
 | 
			
		||||
	"k8s.io/klog/v2"
 | 
			
		||||
@@ -40,6 +42,27 @@ func (err *UnsupportedConfigVersionError) Error() string {
 | 
			
		||||
	return fmt.Sprintf("unsupported apiVersion %q, you may have to do manual conversion to %q and run kubeadm again", err.OldVersion, err.CurrentVersion)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// UnsupportedConfigVersionsErrorMap is a cumulative version of the UnsupportedConfigVersionError type
 | 
			
		||||
type UnsupportedConfigVersionsErrorMap map[string]*UnsupportedConfigVersionError
 | 
			
		||||
 | 
			
		||||
// Error implements the standard Golang error interface for UnsupportedConfigVersionsErrorMap
 | 
			
		||||
func (errs UnsupportedConfigVersionsErrorMap) Error() string {
 | 
			
		||||
	// Make sure the error messages we print are predictable by sorting them by the group names involved
 | 
			
		||||
	groups := make([]string, 0, len(errs))
 | 
			
		||||
	for group := range errs {
 | 
			
		||||
		groups = append(groups, group)
 | 
			
		||||
	}
 | 
			
		||||
	sort.Strings(groups)
 | 
			
		||||
 | 
			
		||||
	msgs := make([]string, 1, 1+len(errs))
 | 
			
		||||
	msgs[0] = "multiple unsupported config version errors encountered:"
 | 
			
		||||
	for _, group := range groups {
 | 
			
		||||
		msgs = append(msgs, errs[group].Error())
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return strings.Join(msgs, "\n\t- ")
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// warnDefaultComponentConfigValue prints a warning if the user modified a field in a certain
 | 
			
		||||
// CompomentConfig from the default recommended value in kubeadm.
 | 
			
		||||
func warnDefaultComponentConfigValue(componentConfigKind, paramName string, defaultValue, userValue interface{}) {
 | 
			
		||||
 
 | 
			
		||||
@@ -61,12 +61,12 @@ func (ue *unretriableError) Error() string {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// FetchInitConfigurationFromCluster fetches configuration from a ConfigMap in the cluster
 | 
			
		||||
func FetchInitConfigurationFromCluster(client clientset.Interface, w io.Writer, logPrefix string, newControlPlane bool) (*kubeadmapi.InitConfiguration, error) {
 | 
			
		||||
func FetchInitConfigurationFromCluster(client clientset.Interface, w io.Writer, logPrefix string, newControlPlane, skipComponentConfigs bool) (*kubeadmapi.InitConfiguration, error) {
 | 
			
		||||
	fmt.Fprintf(w, "[%s] Reading configuration from the cluster...\n", logPrefix)
 | 
			
		||||
	fmt.Fprintf(w, "[%s] FYI: You can look at this config file with 'kubectl -n %s get cm %s -oyaml'\n", logPrefix, metav1.NamespaceSystem, constants.KubeadmConfigConfigMap)
 | 
			
		||||
 | 
			
		||||
	// Fetch the actual config from cluster
 | 
			
		||||
	cfg, err := getInitConfigurationFromCluster(constants.KubernetesDir, client, newControlPlane)
 | 
			
		||||
	cfg, err := getInitConfigurationFromCluster(constants.KubernetesDir, client, newControlPlane, skipComponentConfigs)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return nil, err
 | 
			
		||||
	}
 | 
			
		||||
@@ -80,7 +80,7 @@ func FetchInitConfigurationFromCluster(client clientset.Interface, w io.Writer,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// getInitConfigurationFromCluster is separate only for testing purposes, don't call it directly, use FetchInitConfigurationFromCluster instead
 | 
			
		||||
func getInitConfigurationFromCluster(kubeconfigDir string, client clientset.Interface, newControlPlane bool) (*kubeadmapi.InitConfiguration, error) {
 | 
			
		||||
func getInitConfigurationFromCluster(kubeconfigDir string, client clientset.Interface, newControlPlane, skipComponentConfigs bool) (*kubeadmapi.InitConfiguration, error) {
 | 
			
		||||
	// Also, the config map really should be KubeadmConfigConfigMap...
 | 
			
		||||
	configMap, err := apiclient.GetConfigMapWithRetry(client, metav1.NamespaceSystem, constants.KubeadmConfigConfigMap)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
@@ -99,10 +99,12 @@ func getInitConfigurationFromCluster(kubeconfigDir string, client clientset.Inte
 | 
			
		||||
		return nil, errors.Wrap(err, "failed to decode cluster configuration data")
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// gets the component configs from the corresponding config maps
 | 
			
		||||
	if !skipComponentConfigs {
 | 
			
		||||
		// get the component configs from the corresponding config maps
 | 
			
		||||
		if err := componentconfigs.FetchFromCluster(&initcfg.ClusterConfiguration, client); err != nil {
 | 
			
		||||
			return nil, errors.Wrap(err, "failed to get component configs")
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// if this isn't a new controlplane instance (e.g. in case of kubeadm upgrades)
 | 
			
		||||
	// get nodes specific information as well
 | 
			
		||||
 
 | 
			
		||||
@@ -736,7 +736,7 @@ func TestGetInitConfigurationFromCluster(t *testing.T) {
 | 
			
		||||
				}
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			cfg, err := getInitConfigurationFromCluster(tmpdir, client, rt.newControlPlane)
 | 
			
		||||
			cfg, err := getInitConfigurationFromCluster(tmpdir, client, rt.newControlPlane, false)
 | 
			
		||||
			if rt.expectedError != (err != nil) {
 | 
			
		||||
				t.Errorf("unexpected return err from getInitConfigurationFromCluster: %v", err)
 | 
			
		||||
				return
 | 
			
		||||
 
 | 
			
		||||
		Reference in New Issue
	
	Block a user