mirror of
				https://github.com/optim-enterprises-bv/kubernetes.git
				synced 2025-11-04 04:08:16 +00:00 
			
		
		
		
	Merge pull request #122857 from nilo19/chore/cleanup-azure
chore: Cleanup in-tree credential provider azure and cloud provider a…
This commit is contained in:
		
							
								
								
									
										25
									
								
								LICENSES/vendor/github.com/Azure/azure-sdk-for-go/LICENSE
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										25
									
								
								LICENSES/vendor/github.com/Azure/azure-sdk-for-go/LICENSE
									
									
									
										generated
									
									
										vendored
									
									
								
							@@ -1,25 +0,0 @@
 | 
			
		||||
= vendor/github.com/Azure/azure-sdk-for-go licensed under: =
 | 
			
		||||
 | 
			
		||||
The MIT License (MIT)
 | 
			
		||||
 | 
			
		||||
Copyright (c) Microsoft Corporation.
 | 
			
		||||
 | 
			
		||||
Permission is hereby granted, free of charge, to any person obtaining a copy
 | 
			
		||||
of this software and associated documentation files (the "Software"), to deal
 | 
			
		||||
in the Software without restriction, including without limitation the rights
 | 
			
		||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
 | 
			
		||||
copies of the Software, and to permit persons to whom the Software is
 | 
			
		||||
furnished to do so, subject to the following conditions:
 | 
			
		||||
 | 
			
		||||
The above copyright notice and this permission notice shall be included in all
 | 
			
		||||
copies or substantial portions of the Software.
 | 
			
		||||
 | 
			
		||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 | 
			
		||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 | 
			
		||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
 | 
			
		||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 | 
			
		||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
 | 
			
		||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 | 
			
		||||
SOFTWARE.
 | 
			
		||||
 | 
			
		||||
= vendor/github.com/Azure/azure-sdk-for-go/LICENSE.txt 4f7454c9bcbb0acee6d9a971001befe2
 | 
			
		||||
							
								
								
									
										195
									
								
								LICENSES/vendor/github.com/Azure/go-autorest/LICENSE
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										195
									
								
								LICENSES/vendor/github.com/Azure/go-autorest/LICENSE
									
									
									
										generated
									
									
										vendored
									
									
								
							@@ -1,195 +0,0 @@
 | 
			
		||||
= vendor/github.com/Azure/go-autorest licensed under: =
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
                                 Apache License
 | 
			
		||||
                           Version 2.0, January 2004
 | 
			
		||||
                        http://www.apache.org/licenses/
 | 
			
		||||
 | 
			
		||||
   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
 | 
			
		||||
 | 
			
		||||
   1. Definitions.
 | 
			
		||||
 | 
			
		||||
      "License" shall mean the terms and conditions for use, reproduction,
 | 
			
		||||
      and distribution as defined by Sections 1 through 9 of this document.
 | 
			
		||||
 | 
			
		||||
      "Licensor" shall mean the copyright owner or entity authorized by
 | 
			
		||||
      the copyright owner that is granting the License.
 | 
			
		||||
 | 
			
		||||
      "Legal Entity" shall mean the union of the acting entity and all
 | 
			
		||||
      other entities that control, are controlled by, or are under common
 | 
			
		||||
      control with that entity. For the purposes of this definition,
 | 
			
		||||
      "control" means (i) the power, direct or indirect, to cause the
 | 
			
		||||
      direction or management of such entity, whether by contract or
 | 
			
		||||
      otherwise, or (ii) ownership of fifty percent (50%) or more of the
 | 
			
		||||
      outstanding shares, or (iii) beneficial ownership of such entity.
 | 
			
		||||
 | 
			
		||||
      "You" (or "Your") shall mean an individual or Legal Entity
 | 
			
		||||
      exercising permissions granted by this License.
 | 
			
		||||
 | 
			
		||||
      "Source" form shall mean the preferred form for making modifications,
 | 
			
		||||
      including but not limited to software source code, documentation
 | 
			
		||||
      source, and configuration files.
 | 
			
		||||
 | 
			
		||||
      "Object" form shall mean any form resulting from mechanical
 | 
			
		||||
      transformation or translation of a Source form, including but
 | 
			
		||||
      not limited to compiled object code, generated documentation,
 | 
			
		||||
      and conversions to other media types.
 | 
			
		||||
 | 
			
		||||
      "Work" shall mean the work of authorship, whether in Source or
 | 
			
		||||
      Object form, made available under the License, as indicated by a
 | 
			
		||||
      copyright notice that is included in or attached to the work
 | 
			
		||||
      (an example is provided in the Appendix below).
 | 
			
		||||
 | 
			
		||||
      "Derivative Works" shall mean any work, whether in Source or Object
 | 
			
		||||
      form, that is based on (or derived from) the Work and for which the
 | 
			
		||||
      editorial revisions, annotations, elaborations, or other modifications
 | 
			
		||||
      represent, as a whole, an original work of authorship. For the purposes
 | 
			
		||||
      of this License, Derivative Works shall not include works that remain
 | 
			
		||||
      separable from, or merely link (or bind by name) to the interfaces of,
 | 
			
		||||
      the Work and Derivative Works thereof.
 | 
			
		||||
 | 
			
		||||
      "Contribution" shall mean any work of authorship, including
 | 
			
		||||
      the original version of the Work and any modifications or additions
 | 
			
		||||
      to that Work or Derivative Works thereof, that is intentionally
 | 
			
		||||
      submitted to Licensor for inclusion in the Work by the copyright owner
 | 
			
		||||
      or by an individual or Legal Entity authorized to submit on behalf of
 | 
			
		||||
      the copyright owner. For the purposes of this definition, "submitted"
 | 
			
		||||
      means any form of electronic, verbal, or written communication sent
 | 
			
		||||
      to the Licensor or its representatives, including but not limited to
 | 
			
		||||
      communication on electronic mailing lists, source code control systems,
 | 
			
		||||
      and issue tracking systems that are managed by, or on behalf of, the
 | 
			
		||||
      Licensor for the purpose of discussing and improving the Work, but
 | 
			
		||||
      excluding communication that is conspicuously marked or otherwise
 | 
			
		||||
      designated in writing by the copyright owner as "Not a Contribution."
 | 
			
		||||
 | 
			
		||||
      "Contributor" shall mean Licensor and any individual or Legal Entity
 | 
			
		||||
      on behalf of whom a Contribution has been received by Licensor and
 | 
			
		||||
      subsequently incorporated within the Work.
 | 
			
		||||
 | 
			
		||||
   2. Grant of Copyright License. Subject to the terms and conditions of
 | 
			
		||||
      this License, each Contributor hereby grants to You a perpetual,
 | 
			
		||||
      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
 | 
			
		||||
      copyright license to reproduce, prepare Derivative Works of,
 | 
			
		||||
      publicly display, publicly perform, sublicense, and distribute the
 | 
			
		||||
      Work and such Derivative Works in Source or Object form.
 | 
			
		||||
 | 
			
		||||
   3. Grant of Patent License. Subject to the terms and conditions of
 | 
			
		||||
      this License, each Contributor hereby grants to You a perpetual,
 | 
			
		||||
      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
 | 
			
		||||
      (except as stated in this section) patent license to make, have made,
 | 
			
		||||
      use, offer to sell, sell, import, and otherwise transfer the Work,
 | 
			
		||||
      where such license applies only to those patent claims licensable
 | 
			
		||||
      by such Contributor that are necessarily infringed by their
 | 
			
		||||
      Contribution(s) alone or by combination of their Contribution(s)
 | 
			
		||||
      with the Work to which such Contribution(s) was submitted. If You
 | 
			
		||||
      institute patent litigation against any entity (including a
 | 
			
		||||
      cross-claim or counterclaim in a lawsuit) alleging that the Work
 | 
			
		||||
      or a Contribution incorporated within the Work constitutes direct
 | 
			
		||||
      or contributory patent infringement, then any patent licenses
 | 
			
		||||
      granted to You under this License for that Work shall terminate
 | 
			
		||||
      as of the date such litigation is filed.
 | 
			
		||||
 | 
			
		||||
   4. Redistribution. You may reproduce and distribute copies of the
 | 
			
		||||
      Work or Derivative Works thereof in any medium, with or without
 | 
			
		||||
      modifications, and in Source or Object form, provided that You
 | 
			
		||||
      meet the following conditions:
 | 
			
		||||
 | 
			
		||||
      (a) You must give any other recipients of the Work or
 | 
			
		||||
          Derivative Works a copy of this License; and
 | 
			
		||||
 | 
			
		||||
      (b) You must cause any modified files to carry prominent notices
 | 
			
		||||
          stating that You changed the files; and
 | 
			
		||||
 | 
			
		||||
      (c) You must retain, in the Source form of any Derivative Works
 | 
			
		||||
          that You distribute, all copyright, patent, trademark, and
 | 
			
		||||
          attribution notices from the Source form of the Work,
 | 
			
		||||
          excluding those notices that do not pertain to any part of
 | 
			
		||||
          the Derivative Works; and
 | 
			
		||||
 | 
			
		||||
      (d) If the Work includes a "NOTICE" text file as part of its
 | 
			
		||||
          distribution, then any Derivative Works that You distribute must
 | 
			
		||||
          include a readable copy of the attribution notices contained
 | 
			
		||||
          within such NOTICE file, excluding those notices that do not
 | 
			
		||||
          pertain to any part of the Derivative Works, in at least one
 | 
			
		||||
          of the following places: within a NOTICE text file distributed
 | 
			
		||||
          as part of the Derivative Works; within the Source form or
 | 
			
		||||
          documentation, if provided along with the Derivative Works; or,
 | 
			
		||||
          within a display generated by the Derivative Works, if and
 | 
			
		||||
          wherever such third-party notices normally appear. The contents
 | 
			
		||||
          of the NOTICE file are for informational purposes only and
 | 
			
		||||
          do not modify the License. You may add Your own attribution
 | 
			
		||||
          notices within Derivative Works that You distribute, alongside
 | 
			
		||||
          or as an addendum to the NOTICE text from the Work, provided
 | 
			
		||||
          that such additional attribution notices cannot be construed
 | 
			
		||||
          as modifying the License.
 | 
			
		||||
 | 
			
		||||
      You may add Your own copyright statement to Your modifications and
 | 
			
		||||
      may provide additional or different license terms and conditions
 | 
			
		||||
      for use, reproduction, or distribution of Your modifications, or
 | 
			
		||||
      for any such Derivative Works as a whole, provided Your use,
 | 
			
		||||
      reproduction, and distribution of the Work otherwise complies with
 | 
			
		||||
      the conditions stated in this License.
 | 
			
		||||
 | 
			
		||||
   5. Submission of Contributions. Unless You explicitly state otherwise,
 | 
			
		||||
      any Contribution intentionally submitted for inclusion in the Work
 | 
			
		||||
      by You to the Licensor shall be under the terms and conditions of
 | 
			
		||||
      this License, without any additional terms or conditions.
 | 
			
		||||
      Notwithstanding the above, nothing herein shall supersede or modify
 | 
			
		||||
      the terms of any separate license agreement you may have executed
 | 
			
		||||
      with Licensor regarding such Contributions.
 | 
			
		||||
 | 
			
		||||
   6. Trademarks. This License does not grant permission to use the trade
 | 
			
		||||
      names, trademarks, service marks, or product names of the Licensor,
 | 
			
		||||
      except as required for reasonable and customary use in describing the
 | 
			
		||||
      origin of the Work and reproducing the content of the NOTICE file.
 | 
			
		||||
 | 
			
		||||
   7. Disclaimer of Warranty. Unless required by applicable law or
 | 
			
		||||
      agreed to in writing, Licensor provides the Work (and each
 | 
			
		||||
      Contributor provides its Contributions) on an "AS IS" BASIS,
 | 
			
		||||
      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
 | 
			
		||||
      implied, including, without limitation, any warranties or conditions
 | 
			
		||||
      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
 | 
			
		||||
      PARTICULAR PURPOSE. You are solely responsible for determining the
 | 
			
		||||
      appropriateness of using or redistributing the Work and assume any
 | 
			
		||||
      risks associated with Your exercise of permissions under this License.
 | 
			
		||||
 | 
			
		||||
   8. Limitation of Liability. In no event and under no legal theory,
 | 
			
		||||
      whether in tort (including negligence), contract, or otherwise,
 | 
			
		||||
      unless required by applicable law (such as deliberate and grossly
 | 
			
		||||
      negligent acts) or agreed to in writing, shall any Contributor be
 | 
			
		||||
      liable to You for damages, including any direct, indirect, special,
 | 
			
		||||
      incidental, or consequential damages of any character arising as a
 | 
			
		||||
      result of this License or out of the use or inability to use the
 | 
			
		||||
      Work (including but not limited to damages for loss of goodwill,
 | 
			
		||||
      work stoppage, computer failure or malfunction, or any and all
 | 
			
		||||
      other commercial damages or losses), even if such Contributor
 | 
			
		||||
      has been advised of the possibility of such damages.
 | 
			
		||||
 | 
			
		||||
   9. Accepting Warranty or Additional Liability. While redistributing
 | 
			
		||||
      the Work or Derivative Works thereof, You may choose to offer,
 | 
			
		||||
      and charge a fee for, acceptance of support, warranty, indemnity,
 | 
			
		||||
      or other liability obligations and/or rights consistent with this
 | 
			
		||||
      License. However, in accepting such obligations, You may act only
 | 
			
		||||
      on Your own behalf and on Your sole responsibility, not on behalf
 | 
			
		||||
      of any other Contributor, and only if You agree to indemnify,
 | 
			
		||||
      defend, and hold each Contributor harmless for any liability
 | 
			
		||||
      incurred by, or claims asserted against, such Contributor by reason
 | 
			
		||||
      of your accepting any such warranty or additional liability.
 | 
			
		||||
 | 
			
		||||
   END OF TERMS AND CONDITIONS
 | 
			
		||||
 | 
			
		||||
   Copyright 2015 Microsoft Corporation
 | 
			
		||||
 | 
			
		||||
   Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
   you may not use this file except in compliance with the License.
 | 
			
		||||
   You may obtain a copy of the License at
 | 
			
		||||
 | 
			
		||||
       http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
 | 
			
		||||
   Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
   distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
   See the License for the specific language governing permissions and
 | 
			
		||||
   limitations under the License.
 | 
			
		||||
 | 
			
		||||
= vendor/github.com/Azure/go-autorest/LICENSE a250e5ac3848f2acadb5adcb9555c18b
 | 
			
		||||
							
								
								
									
										195
									
								
								LICENSES/vendor/github.com/Azure/go-autorest/autorest/LICENSE
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										195
									
								
								LICENSES/vendor/github.com/Azure/go-autorest/autorest/LICENSE
									
									
									
										generated
									
									
										vendored
									
									
								
							@@ -1,195 +0,0 @@
 | 
			
		||||
= vendor/github.com/Azure/go-autorest/autorest licensed under: =
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
                                 Apache License
 | 
			
		||||
                           Version 2.0, January 2004
 | 
			
		||||
                        http://www.apache.org/licenses/
 | 
			
		||||
 | 
			
		||||
   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
 | 
			
		||||
 | 
			
		||||
   1. Definitions.
 | 
			
		||||
 | 
			
		||||
      "License" shall mean the terms and conditions for use, reproduction,
 | 
			
		||||
      and distribution as defined by Sections 1 through 9 of this document.
 | 
			
		||||
 | 
			
		||||
      "Licensor" shall mean the copyright owner or entity authorized by
 | 
			
		||||
      the copyright owner that is granting the License.
 | 
			
		||||
 | 
			
		||||
      "Legal Entity" shall mean the union of the acting entity and all
 | 
			
		||||
      other entities that control, are controlled by, or are under common
 | 
			
		||||
      control with that entity. For the purposes of this definition,
 | 
			
		||||
      "control" means (i) the power, direct or indirect, to cause the
 | 
			
		||||
      direction or management of such entity, whether by contract or
 | 
			
		||||
      otherwise, or (ii) ownership of fifty percent (50%) or more of the
 | 
			
		||||
      outstanding shares, or (iii) beneficial ownership of such entity.
 | 
			
		||||
 | 
			
		||||
      "You" (or "Your") shall mean an individual or Legal Entity
 | 
			
		||||
      exercising permissions granted by this License.
 | 
			
		||||
 | 
			
		||||
      "Source" form shall mean the preferred form for making modifications,
 | 
			
		||||
      including but not limited to software source code, documentation
 | 
			
		||||
      source, and configuration files.
 | 
			
		||||
 | 
			
		||||
      "Object" form shall mean any form resulting from mechanical
 | 
			
		||||
      transformation or translation of a Source form, including but
 | 
			
		||||
      not limited to compiled object code, generated documentation,
 | 
			
		||||
      and conversions to other media types.
 | 
			
		||||
 | 
			
		||||
      "Work" shall mean the work of authorship, whether in Source or
 | 
			
		||||
      Object form, made available under the License, as indicated by a
 | 
			
		||||
      copyright notice that is included in or attached to the work
 | 
			
		||||
      (an example is provided in the Appendix below).
 | 
			
		||||
 | 
			
		||||
      "Derivative Works" shall mean any work, whether in Source or Object
 | 
			
		||||
      form, that is based on (or derived from) the Work and for which the
 | 
			
		||||
      editorial revisions, annotations, elaborations, or other modifications
 | 
			
		||||
      represent, as a whole, an original work of authorship. For the purposes
 | 
			
		||||
      of this License, Derivative Works shall not include works that remain
 | 
			
		||||
      separable from, or merely link (or bind by name) to the interfaces of,
 | 
			
		||||
      the Work and Derivative Works thereof.
 | 
			
		||||
 | 
			
		||||
      "Contribution" shall mean any work of authorship, including
 | 
			
		||||
      the original version of the Work and any modifications or additions
 | 
			
		||||
      to that Work or Derivative Works thereof, that is intentionally
 | 
			
		||||
      submitted to Licensor for inclusion in the Work by the copyright owner
 | 
			
		||||
      or by an individual or Legal Entity authorized to submit on behalf of
 | 
			
		||||
      the copyright owner. For the purposes of this definition, "submitted"
 | 
			
		||||
      means any form of electronic, verbal, or written communication sent
 | 
			
		||||
      to the Licensor or its representatives, including but not limited to
 | 
			
		||||
      communication on electronic mailing lists, source code control systems,
 | 
			
		||||
      and issue tracking systems that are managed by, or on behalf of, the
 | 
			
		||||
      Licensor for the purpose of discussing and improving the Work, but
 | 
			
		||||
      excluding communication that is conspicuously marked or otherwise
 | 
			
		||||
      designated in writing by the copyright owner as "Not a Contribution."
 | 
			
		||||
 | 
			
		||||
      "Contributor" shall mean Licensor and any individual or Legal Entity
 | 
			
		||||
      on behalf of whom a Contribution has been received by Licensor and
 | 
			
		||||
      subsequently incorporated within the Work.
 | 
			
		||||
 | 
			
		||||
   2. Grant of Copyright License. Subject to the terms and conditions of
 | 
			
		||||
      this License, each Contributor hereby grants to You a perpetual,
 | 
			
		||||
      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
 | 
			
		||||
      copyright license to reproduce, prepare Derivative Works of,
 | 
			
		||||
      publicly display, publicly perform, sublicense, and distribute the
 | 
			
		||||
      Work and such Derivative Works in Source or Object form.
 | 
			
		||||
 | 
			
		||||
   3. Grant of Patent License. Subject to the terms and conditions of
 | 
			
		||||
      this License, each Contributor hereby grants to You a perpetual,
 | 
			
		||||
      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
 | 
			
		||||
      (except as stated in this section) patent license to make, have made,
 | 
			
		||||
      use, offer to sell, sell, import, and otherwise transfer the Work,
 | 
			
		||||
      where such license applies only to those patent claims licensable
 | 
			
		||||
      by such Contributor that are necessarily infringed by their
 | 
			
		||||
      Contribution(s) alone or by combination of their Contribution(s)
 | 
			
		||||
      with the Work to which such Contribution(s) was submitted. If You
 | 
			
		||||
      institute patent litigation against any entity (including a
 | 
			
		||||
      cross-claim or counterclaim in a lawsuit) alleging that the Work
 | 
			
		||||
      or a Contribution incorporated within the Work constitutes direct
 | 
			
		||||
      or contributory patent infringement, then any patent licenses
 | 
			
		||||
      granted to You under this License for that Work shall terminate
 | 
			
		||||
      as of the date such litigation is filed.
 | 
			
		||||
 | 
			
		||||
   4. Redistribution. You may reproduce and distribute copies of the
 | 
			
		||||
      Work or Derivative Works thereof in any medium, with or without
 | 
			
		||||
      modifications, and in Source or Object form, provided that You
 | 
			
		||||
      meet the following conditions:
 | 
			
		||||
 | 
			
		||||
      (a) You must give any other recipients of the Work or
 | 
			
		||||
          Derivative Works a copy of this License; and
 | 
			
		||||
 | 
			
		||||
      (b) You must cause any modified files to carry prominent notices
 | 
			
		||||
          stating that You changed the files; and
 | 
			
		||||
 | 
			
		||||
      (c) You must retain, in the Source form of any Derivative Works
 | 
			
		||||
          that You distribute, all copyright, patent, trademark, and
 | 
			
		||||
          attribution notices from the Source form of the Work,
 | 
			
		||||
          excluding those notices that do not pertain to any part of
 | 
			
		||||
          the Derivative Works; and
 | 
			
		||||
 | 
			
		||||
      (d) If the Work includes a "NOTICE" text file as part of its
 | 
			
		||||
          distribution, then any Derivative Works that You distribute must
 | 
			
		||||
          include a readable copy of the attribution notices contained
 | 
			
		||||
          within such NOTICE file, excluding those notices that do not
 | 
			
		||||
          pertain to any part of the Derivative Works, in at least one
 | 
			
		||||
          of the following places: within a NOTICE text file distributed
 | 
			
		||||
          as part of the Derivative Works; within the Source form or
 | 
			
		||||
          documentation, if provided along with the Derivative Works; or,
 | 
			
		||||
          within a display generated by the Derivative Works, if and
 | 
			
		||||
          wherever such third-party notices normally appear. The contents
 | 
			
		||||
          of the NOTICE file are for informational purposes only and
 | 
			
		||||
          do not modify the License. You may add Your own attribution
 | 
			
		||||
          notices within Derivative Works that You distribute, alongside
 | 
			
		||||
          or as an addendum to the NOTICE text from the Work, provided
 | 
			
		||||
          that such additional attribution notices cannot be construed
 | 
			
		||||
          as modifying the License.
 | 
			
		||||
 | 
			
		||||
      You may add Your own copyright statement to Your modifications and
 | 
			
		||||
      may provide additional or different license terms and conditions
 | 
			
		||||
      for use, reproduction, or distribution of Your modifications, or
 | 
			
		||||
      for any such Derivative Works as a whole, provided Your use,
 | 
			
		||||
      reproduction, and distribution of the Work otherwise complies with
 | 
			
		||||
      the conditions stated in this License.
 | 
			
		||||
 | 
			
		||||
   5. Submission of Contributions. Unless You explicitly state otherwise,
 | 
			
		||||
      any Contribution intentionally submitted for inclusion in the Work
 | 
			
		||||
      by You to the Licensor shall be under the terms and conditions of
 | 
			
		||||
      this License, without any additional terms or conditions.
 | 
			
		||||
      Notwithstanding the above, nothing herein shall supersede or modify
 | 
			
		||||
      the terms of any separate license agreement you may have executed
 | 
			
		||||
      with Licensor regarding such Contributions.
 | 
			
		||||
 | 
			
		||||
   6. Trademarks. This License does not grant permission to use the trade
 | 
			
		||||
      names, trademarks, service marks, or product names of the Licensor,
 | 
			
		||||
      except as required for reasonable and customary use in describing the
 | 
			
		||||
      origin of the Work and reproducing the content of the NOTICE file.
 | 
			
		||||
 | 
			
		||||
   7. Disclaimer of Warranty. Unless required by applicable law or
 | 
			
		||||
      agreed to in writing, Licensor provides the Work (and each
 | 
			
		||||
      Contributor provides its Contributions) on an "AS IS" BASIS,
 | 
			
		||||
      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
 | 
			
		||||
      implied, including, without limitation, any warranties or conditions
 | 
			
		||||
      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
 | 
			
		||||
      PARTICULAR PURPOSE. You are solely responsible for determining the
 | 
			
		||||
      appropriateness of using or redistributing the Work and assume any
 | 
			
		||||
      risks associated with Your exercise of permissions under this License.
 | 
			
		||||
 | 
			
		||||
   8. Limitation of Liability. In no event and under no legal theory,
 | 
			
		||||
      whether in tort (including negligence), contract, or otherwise,
 | 
			
		||||
      unless required by applicable law (such as deliberate and grossly
 | 
			
		||||
      negligent acts) or agreed to in writing, shall any Contributor be
 | 
			
		||||
      liable to You for damages, including any direct, indirect, special,
 | 
			
		||||
      incidental, or consequential damages of any character arising as a
 | 
			
		||||
      result of this License or out of the use or inability to use the
 | 
			
		||||
      Work (including but not limited to damages for loss of goodwill,
 | 
			
		||||
      work stoppage, computer failure or malfunction, or any and all
 | 
			
		||||
      other commercial damages or losses), even if such Contributor
 | 
			
		||||
      has been advised of the possibility of such damages.
 | 
			
		||||
 | 
			
		||||
   9. Accepting Warranty or Additional Liability. While redistributing
 | 
			
		||||
      the Work or Derivative Works thereof, You may choose to offer,
 | 
			
		||||
      and charge a fee for, acceptance of support, warranty, indemnity,
 | 
			
		||||
      or other liability obligations and/or rights consistent with this
 | 
			
		||||
      License. However, in accepting such obligations, You may act only
 | 
			
		||||
      on Your own behalf and on Your sole responsibility, not on behalf
 | 
			
		||||
      of any other Contributor, and only if You agree to indemnify,
 | 
			
		||||
      defend, and hold each Contributor harmless for any liability
 | 
			
		||||
      incurred by, or claims asserted against, such Contributor by reason
 | 
			
		||||
      of your accepting any such warranty or additional liability.
 | 
			
		||||
 | 
			
		||||
   END OF TERMS AND CONDITIONS
 | 
			
		||||
 | 
			
		||||
   Copyright 2015 Microsoft Corporation
 | 
			
		||||
 | 
			
		||||
   Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
   you may not use this file except in compliance with the License.
 | 
			
		||||
   You may obtain a copy of the License at
 | 
			
		||||
 | 
			
		||||
       http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
 | 
			
		||||
   Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
   distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
   See the License for the specific language governing permissions and
 | 
			
		||||
   limitations under the License.
 | 
			
		||||
 | 
			
		||||
= vendor/github.com/Azure/go-autorest/LICENSE a250e5ac3848f2acadb5adcb9555c18b
 | 
			
		||||
							
								
								
									
										195
									
								
								LICENSES/vendor/github.com/Azure/go-autorest/autorest/adal/LICENSE
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										195
									
								
								LICENSES/vendor/github.com/Azure/go-autorest/autorest/adal/LICENSE
									
									
									
										generated
									
									
										vendored
									
									
								
							@@ -1,195 +0,0 @@
 | 
			
		||||
= vendor/github.com/Azure/go-autorest/autorest/adal licensed under: =
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
                                 Apache License
 | 
			
		||||
                           Version 2.0, January 2004
 | 
			
		||||
                        http://www.apache.org/licenses/
 | 
			
		||||
 | 
			
		||||
   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
 | 
			
		||||
 | 
			
		||||
   1. Definitions.
 | 
			
		||||
 | 
			
		||||
      "License" shall mean the terms and conditions for use, reproduction,
 | 
			
		||||
      and distribution as defined by Sections 1 through 9 of this document.
 | 
			
		||||
 | 
			
		||||
      "Licensor" shall mean the copyright owner or entity authorized by
 | 
			
		||||
      the copyright owner that is granting the License.
 | 
			
		||||
 | 
			
		||||
      "Legal Entity" shall mean the union of the acting entity and all
 | 
			
		||||
      other entities that control, are controlled by, or are under common
 | 
			
		||||
      control with that entity. For the purposes of this definition,
 | 
			
		||||
      "control" means (i) the power, direct or indirect, to cause the
 | 
			
		||||
      direction or management of such entity, whether by contract or
 | 
			
		||||
      otherwise, or (ii) ownership of fifty percent (50%) or more of the
 | 
			
		||||
      outstanding shares, or (iii) beneficial ownership of such entity.
 | 
			
		||||
 | 
			
		||||
      "You" (or "Your") shall mean an individual or Legal Entity
 | 
			
		||||
      exercising permissions granted by this License.
 | 
			
		||||
 | 
			
		||||
      "Source" form shall mean the preferred form for making modifications,
 | 
			
		||||
      including but not limited to software source code, documentation
 | 
			
		||||
      source, and configuration files.
 | 
			
		||||
 | 
			
		||||
      "Object" form shall mean any form resulting from mechanical
 | 
			
		||||
      transformation or translation of a Source form, including but
 | 
			
		||||
      not limited to compiled object code, generated documentation,
 | 
			
		||||
      and conversions to other media types.
 | 
			
		||||
 | 
			
		||||
      "Work" shall mean the work of authorship, whether in Source or
 | 
			
		||||
      Object form, made available under the License, as indicated by a
 | 
			
		||||
      copyright notice that is included in or attached to the work
 | 
			
		||||
      (an example is provided in the Appendix below).
 | 
			
		||||
 | 
			
		||||
      "Derivative Works" shall mean any work, whether in Source or Object
 | 
			
		||||
      form, that is based on (or derived from) the Work and for which the
 | 
			
		||||
      editorial revisions, annotations, elaborations, or other modifications
 | 
			
		||||
      represent, as a whole, an original work of authorship. For the purposes
 | 
			
		||||
      of this License, Derivative Works shall not include works that remain
 | 
			
		||||
      separable from, or merely link (or bind by name) to the interfaces of,
 | 
			
		||||
      the Work and Derivative Works thereof.
 | 
			
		||||
 | 
			
		||||
      "Contribution" shall mean any work of authorship, including
 | 
			
		||||
      the original version of the Work and any modifications or additions
 | 
			
		||||
      to that Work or Derivative Works thereof, that is intentionally
 | 
			
		||||
      submitted to Licensor for inclusion in the Work by the copyright owner
 | 
			
		||||
      or by an individual or Legal Entity authorized to submit on behalf of
 | 
			
		||||
      the copyright owner. For the purposes of this definition, "submitted"
 | 
			
		||||
      means any form of electronic, verbal, or written communication sent
 | 
			
		||||
      to the Licensor or its representatives, including but not limited to
 | 
			
		||||
      communication on electronic mailing lists, source code control systems,
 | 
			
		||||
      and issue tracking systems that are managed by, or on behalf of, the
 | 
			
		||||
      Licensor for the purpose of discussing and improving the Work, but
 | 
			
		||||
      excluding communication that is conspicuously marked or otherwise
 | 
			
		||||
      designated in writing by the copyright owner as "Not a Contribution."
 | 
			
		||||
 | 
			
		||||
      "Contributor" shall mean Licensor and any individual or Legal Entity
 | 
			
		||||
      on behalf of whom a Contribution has been received by Licensor and
 | 
			
		||||
      subsequently incorporated within the Work.
 | 
			
		||||
 | 
			
		||||
   2. Grant of Copyright License. Subject to the terms and conditions of
 | 
			
		||||
      this License, each Contributor hereby grants to You a perpetual,
 | 
			
		||||
      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
 | 
			
		||||
      copyright license to reproduce, prepare Derivative Works of,
 | 
			
		||||
      publicly display, publicly perform, sublicense, and distribute the
 | 
			
		||||
      Work and such Derivative Works in Source or Object form.
 | 
			
		||||
 | 
			
		||||
   3. Grant of Patent License. Subject to the terms and conditions of
 | 
			
		||||
      this License, each Contributor hereby grants to You a perpetual,
 | 
			
		||||
      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
 | 
			
		||||
      (except as stated in this section) patent license to make, have made,
 | 
			
		||||
      use, offer to sell, sell, import, and otherwise transfer the Work,
 | 
			
		||||
      where such license applies only to those patent claims licensable
 | 
			
		||||
      by such Contributor that are necessarily infringed by their
 | 
			
		||||
      Contribution(s) alone or by combination of their Contribution(s)
 | 
			
		||||
      with the Work to which such Contribution(s) was submitted. If You
 | 
			
		||||
      institute patent litigation against any entity (including a
 | 
			
		||||
      cross-claim or counterclaim in a lawsuit) alleging that the Work
 | 
			
		||||
      or a Contribution incorporated within the Work constitutes direct
 | 
			
		||||
      or contributory patent infringement, then any patent licenses
 | 
			
		||||
      granted to You under this License for that Work shall terminate
 | 
			
		||||
      as of the date such litigation is filed.
 | 
			
		||||
 | 
			
		||||
   4. Redistribution. You may reproduce and distribute copies of the
 | 
			
		||||
      Work or Derivative Works thereof in any medium, with or without
 | 
			
		||||
      modifications, and in Source or Object form, provided that You
 | 
			
		||||
      meet the following conditions:
 | 
			
		||||
 | 
			
		||||
      (a) You must give any other recipients of the Work or
 | 
			
		||||
          Derivative Works a copy of this License; and
 | 
			
		||||
 | 
			
		||||
      (b) You must cause any modified files to carry prominent notices
 | 
			
		||||
          stating that You changed the files; and
 | 
			
		||||
 | 
			
		||||
      (c) You must retain, in the Source form of any Derivative Works
 | 
			
		||||
          that You distribute, all copyright, patent, trademark, and
 | 
			
		||||
          attribution notices from the Source form of the Work,
 | 
			
		||||
          excluding those notices that do not pertain to any part of
 | 
			
		||||
          the Derivative Works; and
 | 
			
		||||
 | 
			
		||||
      (d) If the Work includes a "NOTICE" text file as part of its
 | 
			
		||||
          distribution, then any Derivative Works that You distribute must
 | 
			
		||||
          include a readable copy of the attribution notices contained
 | 
			
		||||
          within such NOTICE file, excluding those notices that do not
 | 
			
		||||
          pertain to any part of the Derivative Works, in at least one
 | 
			
		||||
          of the following places: within a NOTICE text file distributed
 | 
			
		||||
          as part of the Derivative Works; within the Source form or
 | 
			
		||||
          documentation, if provided along with the Derivative Works; or,
 | 
			
		||||
          within a display generated by the Derivative Works, if and
 | 
			
		||||
          wherever such third-party notices normally appear. The contents
 | 
			
		||||
          of the NOTICE file are for informational purposes only and
 | 
			
		||||
          do not modify the License. You may add Your own attribution
 | 
			
		||||
          notices within Derivative Works that You distribute, alongside
 | 
			
		||||
          or as an addendum to the NOTICE text from the Work, provided
 | 
			
		||||
          that such additional attribution notices cannot be construed
 | 
			
		||||
          as modifying the License.
 | 
			
		||||
 | 
			
		||||
      You may add Your own copyright statement to Your modifications and
 | 
			
		||||
      may provide additional or different license terms and conditions
 | 
			
		||||
      for use, reproduction, or distribution of Your modifications, or
 | 
			
		||||
      for any such Derivative Works as a whole, provided Your use,
 | 
			
		||||
      reproduction, and distribution of the Work otherwise complies with
 | 
			
		||||
      the conditions stated in this License.
 | 
			
		||||
 | 
			
		||||
   5. Submission of Contributions. Unless You explicitly state otherwise,
 | 
			
		||||
      any Contribution intentionally submitted for inclusion in the Work
 | 
			
		||||
      by You to the Licensor shall be under the terms and conditions of
 | 
			
		||||
      this License, without any additional terms or conditions.
 | 
			
		||||
      Notwithstanding the above, nothing herein shall supersede or modify
 | 
			
		||||
      the terms of any separate license agreement you may have executed
 | 
			
		||||
      with Licensor regarding such Contributions.
 | 
			
		||||
 | 
			
		||||
   6. Trademarks. This License does not grant permission to use the trade
 | 
			
		||||
      names, trademarks, service marks, or product names of the Licensor,
 | 
			
		||||
      except as required for reasonable and customary use in describing the
 | 
			
		||||
      origin of the Work and reproducing the content of the NOTICE file.
 | 
			
		||||
 | 
			
		||||
   7. Disclaimer of Warranty. Unless required by applicable law or
 | 
			
		||||
      agreed to in writing, Licensor provides the Work (and each
 | 
			
		||||
      Contributor provides its Contributions) on an "AS IS" BASIS,
 | 
			
		||||
      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
 | 
			
		||||
      implied, including, without limitation, any warranties or conditions
 | 
			
		||||
      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
 | 
			
		||||
      PARTICULAR PURPOSE. You are solely responsible for determining the
 | 
			
		||||
      appropriateness of using or redistributing the Work and assume any
 | 
			
		||||
      risks associated with Your exercise of permissions under this License.
 | 
			
		||||
 | 
			
		||||
   8. Limitation of Liability. In no event and under no legal theory,
 | 
			
		||||
      whether in tort (including negligence), contract, or otherwise,
 | 
			
		||||
      unless required by applicable law (such as deliberate and grossly
 | 
			
		||||
      negligent acts) or agreed to in writing, shall any Contributor be
 | 
			
		||||
      liable to You for damages, including any direct, indirect, special,
 | 
			
		||||
      incidental, or consequential damages of any character arising as a
 | 
			
		||||
      result of this License or out of the use or inability to use the
 | 
			
		||||
      Work (including but not limited to damages for loss of goodwill,
 | 
			
		||||
      work stoppage, computer failure or malfunction, or any and all
 | 
			
		||||
      other commercial damages or losses), even if such Contributor
 | 
			
		||||
      has been advised of the possibility of such damages.
 | 
			
		||||
 | 
			
		||||
   9. Accepting Warranty or Additional Liability. While redistributing
 | 
			
		||||
      the Work or Derivative Works thereof, You may choose to offer,
 | 
			
		||||
      and charge a fee for, acceptance of support, warranty, indemnity,
 | 
			
		||||
      or other liability obligations and/or rights consistent with this
 | 
			
		||||
      License. However, in accepting such obligations, You may act only
 | 
			
		||||
      on Your own behalf and on Your sole responsibility, not on behalf
 | 
			
		||||
      of any other Contributor, and only if You agree to indemnify,
 | 
			
		||||
      defend, and hold each Contributor harmless for any liability
 | 
			
		||||
      incurred by, or claims asserted against, such Contributor by reason
 | 
			
		||||
      of your accepting any such warranty or additional liability.
 | 
			
		||||
 | 
			
		||||
   END OF TERMS AND CONDITIONS
 | 
			
		||||
 | 
			
		||||
   Copyright 2015 Microsoft Corporation
 | 
			
		||||
 | 
			
		||||
   Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
   you may not use this file except in compliance with the License.
 | 
			
		||||
   You may obtain a copy of the License at
 | 
			
		||||
 | 
			
		||||
       http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
 | 
			
		||||
   Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
   distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
   See the License for the specific language governing permissions and
 | 
			
		||||
   limitations under the License.
 | 
			
		||||
 | 
			
		||||
= vendor/github.com/Azure/go-autorest/LICENSE a250e5ac3848f2acadb5adcb9555c18b
 | 
			
		||||
							
								
								
									
										195
									
								
								LICENSES/vendor/github.com/Azure/go-autorest/autorest/date/LICENSE
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										195
									
								
								LICENSES/vendor/github.com/Azure/go-autorest/autorest/date/LICENSE
									
									
									
										generated
									
									
										vendored
									
									
								
							@@ -1,195 +0,0 @@
 | 
			
		||||
= vendor/github.com/Azure/go-autorest/autorest/date licensed under: =
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
                                 Apache License
 | 
			
		||||
                           Version 2.0, January 2004
 | 
			
		||||
                        http://www.apache.org/licenses/
 | 
			
		||||
 | 
			
		||||
   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
 | 
			
		||||
 | 
			
		||||
   1. Definitions.
 | 
			
		||||
 | 
			
		||||
      "License" shall mean the terms and conditions for use, reproduction,
 | 
			
		||||
      and distribution as defined by Sections 1 through 9 of this document.
 | 
			
		||||
 | 
			
		||||
      "Licensor" shall mean the copyright owner or entity authorized by
 | 
			
		||||
      the copyright owner that is granting the License.
 | 
			
		||||
 | 
			
		||||
      "Legal Entity" shall mean the union of the acting entity and all
 | 
			
		||||
      other entities that control, are controlled by, or are under common
 | 
			
		||||
      control with that entity. For the purposes of this definition,
 | 
			
		||||
      "control" means (i) the power, direct or indirect, to cause the
 | 
			
		||||
      direction or management of such entity, whether by contract or
 | 
			
		||||
      otherwise, or (ii) ownership of fifty percent (50%) or more of the
 | 
			
		||||
      outstanding shares, or (iii) beneficial ownership of such entity.
 | 
			
		||||
 | 
			
		||||
      "You" (or "Your") shall mean an individual or Legal Entity
 | 
			
		||||
      exercising permissions granted by this License.
 | 
			
		||||
 | 
			
		||||
      "Source" form shall mean the preferred form for making modifications,
 | 
			
		||||
      including but not limited to software source code, documentation
 | 
			
		||||
      source, and configuration files.
 | 
			
		||||
 | 
			
		||||
      "Object" form shall mean any form resulting from mechanical
 | 
			
		||||
      transformation or translation of a Source form, including but
 | 
			
		||||
      not limited to compiled object code, generated documentation,
 | 
			
		||||
      and conversions to other media types.
 | 
			
		||||
 | 
			
		||||
      "Work" shall mean the work of authorship, whether in Source or
 | 
			
		||||
      Object form, made available under the License, as indicated by a
 | 
			
		||||
      copyright notice that is included in or attached to the work
 | 
			
		||||
      (an example is provided in the Appendix below).
 | 
			
		||||
 | 
			
		||||
      "Derivative Works" shall mean any work, whether in Source or Object
 | 
			
		||||
      form, that is based on (or derived from) the Work and for which the
 | 
			
		||||
      editorial revisions, annotations, elaborations, or other modifications
 | 
			
		||||
      represent, as a whole, an original work of authorship. For the purposes
 | 
			
		||||
      of this License, Derivative Works shall not include works that remain
 | 
			
		||||
      separable from, or merely link (or bind by name) to the interfaces of,
 | 
			
		||||
      the Work and Derivative Works thereof.
 | 
			
		||||
 | 
			
		||||
      "Contribution" shall mean any work of authorship, including
 | 
			
		||||
      the original version of the Work and any modifications or additions
 | 
			
		||||
      to that Work or Derivative Works thereof, that is intentionally
 | 
			
		||||
      submitted to Licensor for inclusion in the Work by the copyright owner
 | 
			
		||||
      or by an individual or Legal Entity authorized to submit on behalf of
 | 
			
		||||
      the copyright owner. For the purposes of this definition, "submitted"
 | 
			
		||||
      means any form of electronic, verbal, or written communication sent
 | 
			
		||||
      to the Licensor or its representatives, including but not limited to
 | 
			
		||||
      communication on electronic mailing lists, source code control systems,
 | 
			
		||||
      and issue tracking systems that are managed by, or on behalf of, the
 | 
			
		||||
      Licensor for the purpose of discussing and improving the Work, but
 | 
			
		||||
      excluding communication that is conspicuously marked or otherwise
 | 
			
		||||
      designated in writing by the copyright owner as "Not a Contribution."
 | 
			
		||||
 | 
			
		||||
      "Contributor" shall mean Licensor and any individual or Legal Entity
 | 
			
		||||
      on behalf of whom a Contribution has been received by Licensor and
 | 
			
		||||
      subsequently incorporated within the Work.
 | 
			
		||||
 | 
			
		||||
   2. Grant of Copyright License. Subject to the terms and conditions of
 | 
			
		||||
      this License, each Contributor hereby grants to You a perpetual,
 | 
			
		||||
      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
 | 
			
		||||
      copyright license to reproduce, prepare Derivative Works of,
 | 
			
		||||
      publicly display, publicly perform, sublicense, and distribute the
 | 
			
		||||
      Work and such Derivative Works in Source or Object form.
 | 
			
		||||
 | 
			
		||||
   3. Grant of Patent License. Subject to the terms and conditions of
 | 
			
		||||
      this License, each Contributor hereby grants to You a perpetual,
 | 
			
		||||
      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
 | 
			
		||||
      (except as stated in this section) patent license to make, have made,
 | 
			
		||||
      use, offer to sell, sell, import, and otherwise transfer the Work,
 | 
			
		||||
      where such license applies only to those patent claims licensable
 | 
			
		||||
      by such Contributor that are necessarily infringed by their
 | 
			
		||||
      Contribution(s) alone or by combination of their Contribution(s)
 | 
			
		||||
      with the Work to which such Contribution(s) was submitted. If You
 | 
			
		||||
      institute patent litigation against any entity (including a
 | 
			
		||||
      cross-claim or counterclaim in a lawsuit) alleging that the Work
 | 
			
		||||
      or a Contribution incorporated within the Work constitutes direct
 | 
			
		||||
      or contributory patent infringement, then any patent licenses
 | 
			
		||||
      granted to You under this License for that Work shall terminate
 | 
			
		||||
      as of the date such litigation is filed.
 | 
			
		||||
 | 
			
		||||
   4. Redistribution. You may reproduce and distribute copies of the
 | 
			
		||||
      Work or Derivative Works thereof in any medium, with or without
 | 
			
		||||
      modifications, and in Source or Object form, provided that You
 | 
			
		||||
      meet the following conditions:
 | 
			
		||||
 | 
			
		||||
      (a) You must give any other recipients of the Work or
 | 
			
		||||
          Derivative Works a copy of this License; and
 | 
			
		||||
 | 
			
		||||
      (b) You must cause any modified files to carry prominent notices
 | 
			
		||||
          stating that You changed the files; and
 | 
			
		||||
 | 
			
		||||
      (c) You must retain, in the Source form of any Derivative Works
 | 
			
		||||
          that You distribute, all copyright, patent, trademark, and
 | 
			
		||||
          attribution notices from the Source form of the Work,
 | 
			
		||||
          excluding those notices that do not pertain to any part of
 | 
			
		||||
          the Derivative Works; and
 | 
			
		||||
 | 
			
		||||
      (d) If the Work includes a "NOTICE" text file as part of its
 | 
			
		||||
          distribution, then any Derivative Works that You distribute must
 | 
			
		||||
          include a readable copy of the attribution notices contained
 | 
			
		||||
          within such NOTICE file, excluding those notices that do not
 | 
			
		||||
          pertain to any part of the Derivative Works, in at least one
 | 
			
		||||
          of the following places: within a NOTICE text file distributed
 | 
			
		||||
          as part of the Derivative Works; within the Source form or
 | 
			
		||||
          documentation, if provided along with the Derivative Works; or,
 | 
			
		||||
          within a display generated by the Derivative Works, if and
 | 
			
		||||
          wherever such third-party notices normally appear. The contents
 | 
			
		||||
          of the NOTICE file are for informational purposes only and
 | 
			
		||||
          do not modify the License. You may add Your own attribution
 | 
			
		||||
          notices within Derivative Works that You distribute, alongside
 | 
			
		||||
          or as an addendum to the NOTICE text from the Work, provided
 | 
			
		||||
          that such additional attribution notices cannot be construed
 | 
			
		||||
          as modifying the License.
 | 
			
		||||
 | 
			
		||||
      You may add Your own copyright statement to Your modifications and
 | 
			
		||||
      may provide additional or different license terms and conditions
 | 
			
		||||
      for use, reproduction, or distribution of Your modifications, or
 | 
			
		||||
      for any such Derivative Works as a whole, provided Your use,
 | 
			
		||||
      reproduction, and distribution of the Work otherwise complies with
 | 
			
		||||
      the conditions stated in this License.
 | 
			
		||||
 | 
			
		||||
   5. Submission of Contributions. Unless You explicitly state otherwise,
 | 
			
		||||
      any Contribution intentionally submitted for inclusion in the Work
 | 
			
		||||
      by You to the Licensor shall be under the terms and conditions of
 | 
			
		||||
      this License, without any additional terms or conditions.
 | 
			
		||||
      Notwithstanding the above, nothing herein shall supersede or modify
 | 
			
		||||
      the terms of any separate license agreement you may have executed
 | 
			
		||||
      with Licensor regarding such Contributions.
 | 
			
		||||
 | 
			
		||||
   6. Trademarks. This License does not grant permission to use the trade
 | 
			
		||||
      names, trademarks, service marks, or product names of the Licensor,
 | 
			
		||||
      except as required for reasonable and customary use in describing the
 | 
			
		||||
      origin of the Work and reproducing the content of the NOTICE file.
 | 
			
		||||
 | 
			
		||||
   7. Disclaimer of Warranty. Unless required by applicable law or
 | 
			
		||||
      agreed to in writing, Licensor provides the Work (and each
 | 
			
		||||
      Contributor provides its Contributions) on an "AS IS" BASIS,
 | 
			
		||||
      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
 | 
			
		||||
      implied, including, without limitation, any warranties or conditions
 | 
			
		||||
      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
 | 
			
		||||
      PARTICULAR PURPOSE. You are solely responsible for determining the
 | 
			
		||||
      appropriateness of using or redistributing the Work and assume any
 | 
			
		||||
      risks associated with Your exercise of permissions under this License.
 | 
			
		||||
 | 
			
		||||
   8. Limitation of Liability. In no event and under no legal theory,
 | 
			
		||||
      whether in tort (including negligence), contract, or otherwise,
 | 
			
		||||
      unless required by applicable law (such as deliberate and grossly
 | 
			
		||||
      negligent acts) or agreed to in writing, shall any Contributor be
 | 
			
		||||
      liable to You for damages, including any direct, indirect, special,
 | 
			
		||||
      incidental, or consequential damages of any character arising as a
 | 
			
		||||
      result of this License or out of the use or inability to use the
 | 
			
		||||
      Work (including but not limited to damages for loss of goodwill,
 | 
			
		||||
      work stoppage, computer failure or malfunction, or any and all
 | 
			
		||||
      other commercial damages or losses), even if such Contributor
 | 
			
		||||
      has been advised of the possibility of such damages.
 | 
			
		||||
 | 
			
		||||
   9. Accepting Warranty or Additional Liability. While redistributing
 | 
			
		||||
      the Work or Derivative Works thereof, You may choose to offer,
 | 
			
		||||
      and charge a fee for, acceptance of support, warranty, indemnity,
 | 
			
		||||
      or other liability obligations and/or rights consistent with this
 | 
			
		||||
      License. However, in accepting such obligations, You may act only
 | 
			
		||||
      on Your own behalf and on Your sole responsibility, not on behalf
 | 
			
		||||
      of any other Contributor, and only if You agree to indemnify,
 | 
			
		||||
      defend, and hold each Contributor harmless for any liability
 | 
			
		||||
      incurred by, or claims asserted against, such Contributor by reason
 | 
			
		||||
      of your accepting any such warranty or additional liability.
 | 
			
		||||
 | 
			
		||||
   END OF TERMS AND CONDITIONS
 | 
			
		||||
 | 
			
		||||
   Copyright 2015 Microsoft Corporation
 | 
			
		||||
 | 
			
		||||
   Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
   you may not use this file except in compliance with the License.
 | 
			
		||||
   You may obtain a copy of the License at
 | 
			
		||||
 | 
			
		||||
       http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
 | 
			
		||||
   Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
   distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
   See the License for the specific language governing permissions and
 | 
			
		||||
   limitations under the License.
 | 
			
		||||
 | 
			
		||||
= vendor/github.com/Azure/go-autorest/LICENSE a250e5ac3848f2acadb5adcb9555c18b
 | 
			
		||||
							
								
								
									
										195
									
								
								LICENSES/vendor/github.com/Azure/go-autorest/autorest/mocks/LICENSE
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										195
									
								
								LICENSES/vendor/github.com/Azure/go-autorest/autorest/mocks/LICENSE
									
									
									
										generated
									
									
										vendored
									
									
								
							@@ -1,195 +0,0 @@
 | 
			
		||||
= vendor/github.com/Azure/go-autorest/autorest/mocks licensed under: =
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
                                 Apache License
 | 
			
		||||
                           Version 2.0, January 2004
 | 
			
		||||
                        http://www.apache.org/licenses/
 | 
			
		||||
 | 
			
		||||
   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
 | 
			
		||||
 | 
			
		||||
   1. Definitions.
 | 
			
		||||
 | 
			
		||||
      "License" shall mean the terms and conditions for use, reproduction,
 | 
			
		||||
      and distribution as defined by Sections 1 through 9 of this document.
 | 
			
		||||
 | 
			
		||||
      "Licensor" shall mean the copyright owner or entity authorized by
 | 
			
		||||
      the copyright owner that is granting the License.
 | 
			
		||||
 | 
			
		||||
      "Legal Entity" shall mean the union of the acting entity and all
 | 
			
		||||
      other entities that control, are controlled by, or are under common
 | 
			
		||||
      control with that entity. For the purposes of this definition,
 | 
			
		||||
      "control" means (i) the power, direct or indirect, to cause the
 | 
			
		||||
      direction or management of such entity, whether by contract or
 | 
			
		||||
      otherwise, or (ii) ownership of fifty percent (50%) or more of the
 | 
			
		||||
      outstanding shares, or (iii) beneficial ownership of such entity.
 | 
			
		||||
 | 
			
		||||
      "You" (or "Your") shall mean an individual or Legal Entity
 | 
			
		||||
      exercising permissions granted by this License.
 | 
			
		||||
 | 
			
		||||
      "Source" form shall mean the preferred form for making modifications,
 | 
			
		||||
      including but not limited to software source code, documentation
 | 
			
		||||
      source, and configuration files.
 | 
			
		||||
 | 
			
		||||
      "Object" form shall mean any form resulting from mechanical
 | 
			
		||||
      transformation or translation of a Source form, including but
 | 
			
		||||
      not limited to compiled object code, generated documentation,
 | 
			
		||||
      and conversions to other media types.
 | 
			
		||||
 | 
			
		||||
      "Work" shall mean the work of authorship, whether in Source or
 | 
			
		||||
      Object form, made available under the License, as indicated by a
 | 
			
		||||
      copyright notice that is included in or attached to the work
 | 
			
		||||
      (an example is provided in the Appendix below).
 | 
			
		||||
 | 
			
		||||
      "Derivative Works" shall mean any work, whether in Source or Object
 | 
			
		||||
      form, that is based on (or derived from) the Work and for which the
 | 
			
		||||
      editorial revisions, annotations, elaborations, or other modifications
 | 
			
		||||
      represent, as a whole, an original work of authorship. For the purposes
 | 
			
		||||
      of this License, Derivative Works shall not include works that remain
 | 
			
		||||
      separable from, or merely link (or bind by name) to the interfaces of,
 | 
			
		||||
      the Work and Derivative Works thereof.
 | 
			
		||||
 | 
			
		||||
      "Contribution" shall mean any work of authorship, including
 | 
			
		||||
      the original version of the Work and any modifications or additions
 | 
			
		||||
      to that Work or Derivative Works thereof, that is intentionally
 | 
			
		||||
      submitted to Licensor for inclusion in the Work by the copyright owner
 | 
			
		||||
      or by an individual or Legal Entity authorized to submit on behalf of
 | 
			
		||||
      the copyright owner. For the purposes of this definition, "submitted"
 | 
			
		||||
      means any form of electronic, verbal, or written communication sent
 | 
			
		||||
      to the Licensor or its representatives, including but not limited to
 | 
			
		||||
      communication on electronic mailing lists, source code control systems,
 | 
			
		||||
      and issue tracking systems that are managed by, or on behalf of, the
 | 
			
		||||
      Licensor for the purpose of discussing and improving the Work, but
 | 
			
		||||
      excluding communication that is conspicuously marked or otherwise
 | 
			
		||||
      designated in writing by the copyright owner as "Not a Contribution."
 | 
			
		||||
 | 
			
		||||
      "Contributor" shall mean Licensor and any individual or Legal Entity
 | 
			
		||||
      on behalf of whom a Contribution has been received by Licensor and
 | 
			
		||||
      subsequently incorporated within the Work.
 | 
			
		||||
 | 
			
		||||
   2. Grant of Copyright License. Subject to the terms and conditions of
 | 
			
		||||
      this License, each Contributor hereby grants to You a perpetual,
 | 
			
		||||
      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
 | 
			
		||||
      copyright license to reproduce, prepare Derivative Works of,
 | 
			
		||||
      publicly display, publicly perform, sublicense, and distribute the
 | 
			
		||||
      Work and such Derivative Works in Source or Object form.
 | 
			
		||||
 | 
			
		||||
   3. Grant of Patent License. Subject to the terms and conditions of
 | 
			
		||||
      this License, each Contributor hereby grants to You a perpetual,
 | 
			
		||||
      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
 | 
			
		||||
      (except as stated in this section) patent license to make, have made,
 | 
			
		||||
      use, offer to sell, sell, import, and otherwise transfer the Work,
 | 
			
		||||
      where such license applies only to those patent claims licensable
 | 
			
		||||
      by such Contributor that are necessarily infringed by their
 | 
			
		||||
      Contribution(s) alone or by combination of their Contribution(s)
 | 
			
		||||
      with the Work to which such Contribution(s) was submitted. If You
 | 
			
		||||
      institute patent litigation against any entity (including a
 | 
			
		||||
      cross-claim or counterclaim in a lawsuit) alleging that the Work
 | 
			
		||||
      or a Contribution incorporated within the Work constitutes direct
 | 
			
		||||
      or contributory patent infringement, then any patent licenses
 | 
			
		||||
      granted to You under this License for that Work shall terminate
 | 
			
		||||
      as of the date such litigation is filed.
 | 
			
		||||
 | 
			
		||||
   4. Redistribution. You may reproduce and distribute copies of the
 | 
			
		||||
      Work or Derivative Works thereof in any medium, with or without
 | 
			
		||||
      modifications, and in Source or Object form, provided that You
 | 
			
		||||
      meet the following conditions:
 | 
			
		||||
 | 
			
		||||
      (a) You must give any other recipients of the Work or
 | 
			
		||||
          Derivative Works a copy of this License; and
 | 
			
		||||
 | 
			
		||||
      (b) You must cause any modified files to carry prominent notices
 | 
			
		||||
          stating that You changed the files; and
 | 
			
		||||
 | 
			
		||||
      (c) You must retain, in the Source form of any Derivative Works
 | 
			
		||||
          that You distribute, all copyright, patent, trademark, and
 | 
			
		||||
          attribution notices from the Source form of the Work,
 | 
			
		||||
          excluding those notices that do not pertain to any part of
 | 
			
		||||
          the Derivative Works; and
 | 
			
		||||
 | 
			
		||||
      (d) If the Work includes a "NOTICE" text file as part of its
 | 
			
		||||
          distribution, then any Derivative Works that You distribute must
 | 
			
		||||
          include a readable copy of the attribution notices contained
 | 
			
		||||
          within such NOTICE file, excluding those notices that do not
 | 
			
		||||
          pertain to any part of the Derivative Works, in at least one
 | 
			
		||||
          of the following places: within a NOTICE text file distributed
 | 
			
		||||
          as part of the Derivative Works; within the Source form or
 | 
			
		||||
          documentation, if provided along with the Derivative Works; or,
 | 
			
		||||
          within a display generated by the Derivative Works, if and
 | 
			
		||||
          wherever such third-party notices normally appear. The contents
 | 
			
		||||
          of the NOTICE file are for informational purposes only and
 | 
			
		||||
          do not modify the License. You may add Your own attribution
 | 
			
		||||
          notices within Derivative Works that You distribute, alongside
 | 
			
		||||
          or as an addendum to the NOTICE text from the Work, provided
 | 
			
		||||
          that such additional attribution notices cannot be construed
 | 
			
		||||
          as modifying the License.
 | 
			
		||||
 | 
			
		||||
      You may add Your own copyright statement to Your modifications and
 | 
			
		||||
      may provide additional or different license terms and conditions
 | 
			
		||||
      for use, reproduction, or distribution of Your modifications, or
 | 
			
		||||
      for any such Derivative Works as a whole, provided Your use,
 | 
			
		||||
      reproduction, and distribution of the Work otherwise complies with
 | 
			
		||||
      the conditions stated in this License.
 | 
			
		||||
 | 
			
		||||
   5. Submission of Contributions. Unless You explicitly state otherwise,
 | 
			
		||||
      any Contribution intentionally submitted for inclusion in the Work
 | 
			
		||||
      by You to the Licensor shall be under the terms and conditions of
 | 
			
		||||
      this License, without any additional terms or conditions.
 | 
			
		||||
      Notwithstanding the above, nothing herein shall supersede or modify
 | 
			
		||||
      the terms of any separate license agreement you may have executed
 | 
			
		||||
      with Licensor regarding such Contributions.
 | 
			
		||||
 | 
			
		||||
   6. Trademarks. This License does not grant permission to use the trade
 | 
			
		||||
      names, trademarks, service marks, or product names of the Licensor,
 | 
			
		||||
      except as required for reasonable and customary use in describing the
 | 
			
		||||
      origin of the Work and reproducing the content of the NOTICE file.
 | 
			
		||||
 | 
			
		||||
   7. Disclaimer of Warranty. Unless required by applicable law or
 | 
			
		||||
      agreed to in writing, Licensor provides the Work (and each
 | 
			
		||||
      Contributor provides its Contributions) on an "AS IS" BASIS,
 | 
			
		||||
      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
 | 
			
		||||
      implied, including, without limitation, any warranties or conditions
 | 
			
		||||
      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
 | 
			
		||||
      PARTICULAR PURPOSE. You are solely responsible for determining the
 | 
			
		||||
      appropriateness of using or redistributing the Work and assume any
 | 
			
		||||
      risks associated with Your exercise of permissions under this License.
 | 
			
		||||
 | 
			
		||||
   8. Limitation of Liability. In no event and under no legal theory,
 | 
			
		||||
      whether in tort (including negligence), contract, or otherwise,
 | 
			
		||||
      unless required by applicable law (such as deliberate and grossly
 | 
			
		||||
      negligent acts) or agreed to in writing, shall any Contributor be
 | 
			
		||||
      liable to You for damages, including any direct, indirect, special,
 | 
			
		||||
      incidental, or consequential damages of any character arising as a
 | 
			
		||||
      result of this License or out of the use or inability to use the
 | 
			
		||||
      Work (including but not limited to damages for loss of goodwill,
 | 
			
		||||
      work stoppage, computer failure or malfunction, or any and all
 | 
			
		||||
      other commercial damages or losses), even if such Contributor
 | 
			
		||||
      has been advised of the possibility of such damages.
 | 
			
		||||
 | 
			
		||||
   9. Accepting Warranty or Additional Liability. While redistributing
 | 
			
		||||
      the Work or Derivative Works thereof, You may choose to offer,
 | 
			
		||||
      and charge a fee for, acceptance of support, warranty, indemnity,
 | 
			
		||||
      or other liability obligations and/or rights consistent with this
 | 
			
		||||
      License. However, in accepting such obligations, You may act only
 | 
			
		||||
      on Your own behalf and on Your sole responsibility, not on behalf
 | 
			
		||||
      of any other Contributor, and only if You agree to indemnify,
 | 
			
		||||
      defend, and hold each Contributor harmless for any liability
 | 
			
		||||
      incurred by, or claims asserted against, such Contributor by reason
 | 
			
		||||
      of your accepting any such warranty or additional liability.
 | 
			
		||||
 | 
			
		||||
   END OF TERMS AND CONDITIONS
 | 
			
		||||
 | 
			
		||||
   Copyright 2015 Microsoft Corporation
 | 
			
		||||
 | 
			
		||||
   Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
   you may not use this file except in compliance with the License.
 | 
			
		||||
   You may obtain a copy of the License at
 | 
			
		||||
 | 
			
		||||
       http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
 | 
			
		||||
   Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
   distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
   See the License for the specific language governing permissions and
 | 
			
		||||
   limitations under the License.
 | 
			
		||||
 | 
			
		||||
= vendor/github.com/Azure/go-autorest/LICENSE a250e5ac3848f2acadb5adcb9555c18b
 | 
			
		||||
							
								
								
									
										195
									
								
								LICENSES/vendor/github.com/Azure/go-autorest/autorest/to/LICENSE
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										195
									
								
								LICENSES/vendor/github.com/Azure/go-autorest/autorest/to/LICENSE
									
									
									
										generated
									
									
										vendored
									
									
								
							@@ -1,195 +0,0 @@
 | 
			
		||||
= vendor/github.com/Azure/go-autorest/autorest/to licensed under: =
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
                                 Apache License
 | 
			
		||||
                           Version 2.0, January 2004
 | 
			
		||||
                        http://www.apache.org/licenses/
 | 
			
		||||
 | 
			
		||||
   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
 | 
			
		||||
 | 
			
		||||
   1. Definitions.
 | 
			
		||||
 | 
			
		||||
      "License" shall mean the terms and conditions for use, reproduction,
 | 
			
		||||
      and distribution as defined by Sections 1 through 9 of this document.
 | 
			
		||||
 | 
			
		||||
      "Licensor" shall mean the copyright owner or entity authorized by
 | 
			
		||||
      the copyright owner that is granting the License.
 | 
			
		||||
 | 
			
		||||
      "Legal Entity" shall mean the union of the acting entity and all
 | 
			
		||||
      other entities that control, are controlled by, or are under common
 | 
			
		||||
      control with that entity. For the purposes of this definition,
 | 
			
		||||
      "control" means (i) the power, direct or indirect, to cause the
 | 
			
		||||
      direction or management of such entity, whether by contract or
 | 
			
		||||
      otherwise, or (ii) ownership of fifty percent (50%) or more of the
 | 
			
		||||
      outstanding shares, or (iii) beneficial ownership of such entity.
 | 
			
		||||
 | 
			
		||||
      "You" (or "Your") shall mean an individual or Legal Entity
 | 
			
		||||
      exercising permissions granted by this License.
 | 
			
		||||
 | 
			
		||||
      "Source" form shall mean the preferred form for making modifications,
 | 
			
		||||
      including but not limited to software source code, documentation
 | 
			
		||||
      source, and configuration files.
 | 
			
		||||
 | 
			
		||||
      "Object" form shall mean any form resulting from mechanical
 | 
			
		||||
      transformation or translation of a Source form, including but
 | 
			
		||||
      not limited to compiled object code, generated documentation,
 | 
			
		||||
      and conversions to other media types.
 | 
			
		||||
 | 
			
		||||
      "Work" shall mean the work of authorship, whether in Source or
 | 
			
		||||
      Object form, made available under the License, as indicated by a
 | 
			
		||||
      copyright notice that is included in or attached to the work
 | 
			
		||||
      (an example is provided in the Appendix below).
 | 
			
		||||
 | 
			
		||||
      "Derivative Works" shall mean any work, whether in Source or Object
 | 
			
		||||
      form, that is based on (or derived from) the Work and for which the
 | 
			
		||||
      editorial revisions, annotations, elaborations, or other modifications
 | 
			
		||||
      represent, as a whole, an original work of authorship. For the purposes
 | 
			
		||||
      of this License, Derivative Works shall not include works that remain
 | 
			
		||||
      separable from, or merely link (or bind by name) to the interfaces of,
 | 
			
		||||
      the Work and Derivative Works thereof.
 | 
			
		||||
 | 
			
		||||
      "Contribution" shall mean any work of authorship, including
 | 
			
		||||
      the original version of the Work and any modifications or additions
 | 
			
		||||
      to that Work or Derivative Works thereof, that is intentionally
 | 
			
		||||
      submitted to Licensor for inclusion in the Work by the copyright owner
 | 
			
		||||
      or by an individual or Legal Entity authorized to submit on behalf of
 | 
			
		||||
      the copyright owner. For the purposes of this definition, "submitted"
 | 
			
		||||
      means any form of electronic, verbal, or written communication sent
 | 
			
		||||
      to the Licensor or its representatives, including but not limited to
 | 
			
		||||
      communication on electronic mailing lists, source code control systems,
 | 
			
		||||
      and issue tracking systems that are managed by, or on behalf of, the
 | 
			
		||||
      Licensor for the purpose of discussing and improving the Work, but
 | 
			
		||||
      excluding communication that is conspicuously marked or otherwise
 | 
			
		||||
      designated in writing by the copyright owner as "Not a Contribution."
 | 
			
		||||
 | 
			
		||||
      "Contributor" shall mean Licensor and any individual or Legal Entity
 | 
			
		||||
      on behalf of whom a Contribution has been received by Licensor and
 | 
			
		||||
      subsequently incorporated within the Work.
 | 
			
		||||
 | 
			
		||||
   2. Grant of Copyright License. Subject to the terms and conditions of
 | 
			
		||||
      this License, each Contributor hereby grants to You a perpetual,
 | 
			
		||||
      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
 | 
			
		||||
      copyright license to reproduce, prepare Derivative Works of,
 | 
			
		||||
      publicly display, publicly perform, sublicense, and distribute the
 | 
			
		||||
      Work and such Derivative Works in Source or Object form.
 | 
			
		||||
 | 
			
		||||
   3. Grant of Patent License. Subject to the terms and conditions of
 | 
			
		||||
      this License, each Contributor hereby grants to You a perpetual,
 | 
			
		||||
      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
 | 
			
		||||
      (except as stated in this section) patent license to make, have made,
 | 
			
		||||
      use, offer to sell, sell, import, and otherwise transfer the Work,
 | 
			
		||||
      where such license applies only to those patent claims licensable
 | 
			
		||||
      by such Contributor that are necessarily infringed by their
 | 
			
		||||
      Contribution(s) alone or by combination of their Contribution(s)
 | 
			
		||||
      with the Work to which such Contribution(s) was submitted. If You
 | 
			
		||||
      institute patent litigation against any entity (including a
 | 
			
		||||
      cross-claim or counterclaim in a lawsuit) alleging that the Work
 | 
			
		||||
      or a Contribution incorporated within the Work constitutes direct
 | 
			
		||||
      or contributory patent infringement, then any patent licenses
 | 
			
		||||
      granted to You under this License for that Work shall terminate
 | 
			
		||||
      as of the date such litigation is filed.
 | 
			
		||||
 | 
			
		||||
   4. Redistribution. You may reproduce and distribute copies of the
 | 
			
		||||
      Work or Derivative Works thereof in any medium, with or without
 | 
			
		||||
      modifications, and in Source or Object form, provided that You
 | 
			
		||||
      meet the following conditions:
 | 
			
		||||
 | 
			
		||||
      (a) You must give any other recipients of the Work or
 | 
			
		||||
          Derivative Works a copy of this License; and
 | 
			
		||||
 | 
			
		||||
      (b) You must cause any modified files to carry prominent notices
 | 
			
		||||
          stating that You changed the files; and
 | 
			
		||||
 | 
			
		||||
      (c) You must retain, in the Source form of any Derivative Works
 | 
			
		||||
          that You distribute, all copyright, patent, trademark, and
 | 
			
		||||
          attribution notices from the Source form of the Work,
 | 
			
		||||
          excluding those notices that do not pertain to any part of
 | 
			
		||||
          the Derivative Works; and
 | 
			
		||||
 | 
			
		||||
      (d) If the Work includes a "NOTICE" text file as part of its
 | 
			
		||||
          distribution, then any Derivative Works that You distribute must
 | 
			
		||||
          include a readable copy of the attribution notices contained
 | 
			
		||||
          within such NOTICE file, excluding those notices that do not
 | 
			
		||||
          pertain to any part of the Derivative Works, in at least one
 | 
			
		||||
          of the following places: within a NOTICE text file distributed
 | 
			
		||||
          as part of the Derivative Works; within the Source form or
 | 
			
		||||
          documentation, if provided along with the Derivative Works; or,
 | 
			
		||||
          within a display generated by the Derivative Works, if and
 | 
			
		||||
          wherever such third-party notices normally appear. The contents
 | 
			
		||||
          of the NOTICE file are for informational purposes only and
 | 
			
		||||
          do not modify the License. You may add Your own attribution
 | 
			
		||||
          notices within Derivative Works that You distribute, alongside
 | 
			
		||||
          or as an addendum to the NOTICE text from the Work, provided
 | 
			
		||||
          that such additional attribution notices cannot be construed
 | 
			
		||||
          as modifying the License.
 | 
			
		||||
 | 
			
		||||
      You may add Your own copyright statement to Your modifications and
 | 
			
		||||
      may provide additional or different license terms and conditions
 | 
			
		||||
      for use, reproduction, or distribution of Your modifications, or
 | 
			
		||||
      for any such Derivative Works as a whole, provided Your use,
 | 
			
		||||
      reproduction, and distribution of the Work otherwise complies with
 | 
			
		||||
      the conditions stated in this License.
 | 
			
		||||
 | 
			
		||||
   5. Submission of Contributions. Unless You explicitly state otherwise,
 | 
			
		||||
      any Contribution intentionally submitted for inclusion in the Work
 | 
			
		||||
      by You to the Licensor shall be under the terms and conditions of
 | 
			
		||||
      this License, without any additional terms or conditions.
 | 
			
		||||
      Notwithstanding the above, nothing herein shall supersede or modify
 | 
			
		||||
      the terms of any separate license agreement you may have executed
 | 
			
		||||
      with Licensor regarding such Contributions.
 | 
			
		||||
 | 
			
		||||
   6. Trademarks. This License does not grant permission to use the trade
 | 
			
		||||
      names, trademarks, service marks, or product names of the Licensor,
 | 
			
		||||
      except as required for reasonable and customary use in describing the
 | 
			
		||||
      origin of the Work and reproducing the content of the NOTICE file.
 | 
			
		||||
 | 
			
		||||
   7. Disclaimer of Warranty. Unless required by applicable law or
 | 
			
		||||
      agreed to in writing, Licensor provides the Work (and each
 | 
			
		||||
      Contributor provides its Contributions) on an "AS IS" BASIS,
 | 
			
		||||
      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
 | 
			
		||||
      implied, including, without limitation, any warranties or conditions
 | 
			
		||||
      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
 | 
			
		||||
      PARTICULAR PURPOSE. You are solely responsible for determining the
 | 
			
		||||
      appropriateness of using or redistributing the Work and assume any
 | 
			
		||||
      risks associated with Your exercise of permissions under this License.
 | 
			
		||||
 | 
			
		||||
   8. Limitation of Liability. In no event and under no legal theory,
 | 
			
		||||
      whether in tort (including negligence), contract, or otherwise,
 | 
			
		||||
      unless required by applicable law (such as deliberate and grossly
 | 
			
		||||
      negligent acts) or agreed to in writing, shall any Contributor be
 | 
			
		||||
      liable to You for damages, including any direct, indirect, special,
 | 
			
		||||
      incidental, or consequential damages of any character arising as a
 | 
			
		||||
      result of this License or out of the use or inability to use the
 | 
			
		||||
      Work (including but not limited to damages for loss of goodwill,
 | 
			
		||||
      work stoppage, computer failure or malfunction, or any and all
 | 
			
		||||
      other commercial damages or losses), even if such Contributor
 | 
			
		||||
      has been advised of the possibility of such damages.
 | 
			
		||||
 | 
			
		||||
   9. Accepting Warranty or Additional Liability. While redistributing
 | 
			
		||||
      the Work or Derivative Works thereof, You may choose to offer,
 | 
			
		||||
      and charge a fee for, acceptance of support, warranty, indemnity,
 | 
			
		||||
      or other liability obligations and/or rights consistent with this
 | 
			
		||||
      License. However, in accepting such obligations, You may act only
 | 
			
		||||
      on Your own behalf and on Your sole responsibility, not on behalf
 | 
			
		||||
      of any other Contributor, and only if You agree to indemnify,
 | 
			
		||||
      defend, and hold each Contributor harmless for any liability
 | 
			
		||||
      incurred by, or claims asserted against, such Contributor by reason
 | 
			
		||||
      of your accepting any such warranty or additional liability.
 | 
			
		||||
 | 
			
		||||
   END OF TERMS AND CONDITIONS
 | 
			
		||||
 | 
			
		||||
   Copyright 2015 Microsoft Corporation
 | 
			
		||||
 | 
			
		||||
   Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
   you may not use this file except in compliance with the License.
 | 
			
		||||
   You may obtain a copy of the License at
 | 
			
		||||
 | 
			
		||||
       http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
 | 
			
		||||
   Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
   distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
   See the License for the specific language governing permissions and
 | 
			
		||||
   limitations under the License.
 | 
			
		||||
 | 
			
		||||
= vendor/github.com/Azure/go-autorest/LICENSE a250e5ac3848f2acadb5adcb9555c18b
 | 
			
		||||
							
								
								
									
										195
									
								
								LICENSES/vendor/github.com/Azure/go-autorest/autorest/validation/LICENSE
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										195
									
								
								LICENSES/vendor/github.com/Azure/go-autorest/autorest/validation/LICENSE
									
									
									
										generated
									
									
										vendored
									
									
								
							@@ -1,195 +0,0 @@
 | 
			
		||||
= vendor/github.com/Azure/go-autorest/autorest/validation licensed under: =
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
                                 Apache License
 | 
			
		||||
                           Version 2.0, January 2004
 | 
			
		||||
                        http://www.apache.org/licenses/
 | 
			
		||||
 | 
			
		||||
   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
 | 
			
		||||
 | 
			
		||||
   1. Definitions.
 | 
			
		||||
 | 
			
		||||
      "License" shall mean the terms and conditions for use, reproduction,
 | 
			
		||||
      and distribution as defined by Sections 1 through 9 of this document.
 | 
			
		||||
 | 
			
		||||
      "Licensor" shall mean the copyright owner or entity authorized by
 | 
			
		||||
      the copyright owner that is granting the License.
 | 
			
		||||
 | 
			
		||||
      "Legal Entity" shall mean the union of the acting entity and all
 | 
			
		||||
      other entities that control, are controlled by, or are under common
 | 
			
		||||
      control with that entity. For the purposes of this definition,
 | 
			
		||||
      "control" means (i) the power, direct or indirect, to cause the
 | 
			
		||||
      direction or management of such entity, whether by contract or
 | 
			
		||||
      otherwise, or (ii) ownership of fifty percent (50%) or more of the
 | 
			
		||||
      outstanding shares, or (iii) beneficial ownership of such entity.
 | 
			
		||||
 | 
			
		||||
      "You" (or "Your") shall mean an individual or Legal Entity
 | 
			
		||||
      exercising permissions granted by this License.
 | 
			
		||||
 | 
			
		||||
      "Source" form shall mean the preferred form for making modifications,
 | 
			
		||||
      including but not limited to software source code, documentation
 | 
			
		||||
      source, and configuration files.
 | 
			
		||||
 | 
			
		||||
      "Object" form shall mean any form resulting from mechanical
 | 
			
		||||
      transformation or translation of a Source form, including but
 | 
			
		||||
      not limited to compiled object code, generated documentation,
 | 
			
		||||
      and conversions to other media types.
 | 
			
		||||
 | 
			
		||||
      "Work" shall mean the work of authorship, whether in Source or
 | 
			
		||||
      Object form, made available under the License, as indicated by a
 | 
			
		||||
      copyright notice that is included in or attached to the work
 | 
			
		||||
      (an example is provided in the Appendix below).
 | 
			
		||||
 | 
			
		||||
      "Derivative Works" shall mean any work, whether in Source or Object
 | 
			
		||||
      form, that is based on (or derived from) the Work and for which the
 | 
			
		||||
      editorial revisions, annotations, elaborations, or other modifications
 | 
			
		||||
      represent, as a whole, an original work of authorship. For the purposes
 | 
			
		||||
      of this License, Derivative Works shall not include works that remain
 | 
			
		||||
      separable from, or merely link (or bind by name) to the interfaces of,
 | 
			
		||||
      the Work and Derivative Works thereof.
 | 
			
		||||
 | 
			
		||||
      "Contribution" shall mean any work of authorship, including
 | 
			
		||||
      the original version of the Work and any modifications or additions
 | 
			
		||||
      to that Work or Derivative Works thereof, that is intentionally
 | 
			
		||||
      submitted to Licensor for inclusion in the Work by the copyright owner
 | 
			
		||||
      or by an individual or Legal Entity authorized to submit on behalf of
 | 
			
		||||
      the copyright owner. For the purposes of this definition, "submitted"
 | 
			
		||||
      means any form of electronic, verbal, or written communication sent
 | 
			
		||||
      to the Licensor or its representatives, including but not limited to
 | 
			
		||||
      communication on electronic mailing lists, source code control systems,
 | 
			
		||||
      and issue tracking systems that are managed by, or on behalf of, the
 | 
			
		||||
      Licensor for the purpose of discussing and improving the Work, but
 | 
			
		||||
      excluding communication that is conspicuously marked or otherwise
 | 
			
		||||
      designated in writing by the copyright owner as "Not a Contribution."
 | 
			
		||||
 | 
			
		||||
      "Contributor" shall mean Licensor and any individual or Legal Entity
 | 
			
		||||
      on behalf of whom a Contribution has been received by Licensor and
 | 
			
		||||
      subsequently incorporated within the Work.
 | 
			
		||||
 | 
			
		||||
   2. Grant of Copyright License. Subject to the terms and conditions of
 | 
			
		||||
      this License, each Contributor hereby grants to You a perpetual,
 | 
			
		||||
      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
 | 
			
		||||
      copyright license to reproduce, prepare Derivative Works of,
 | 
			
		||||
      publicly display, publicly perform, sublicense, and distribute the
 | 
			
		||||
      Work and such Derivative Works in Source or Object form.
 | 
			
		||||
 | 
			
		||||
   3. Grant of Patent License. Subject to the terms and conditions of
 | 
			
		||||
      this License, each Contributor hereby grants to You a perpetual,
 | 
			
		||||
      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
 | 
			
		||||
      (except as stated in this section) patent license to make, have made,
 | 
			
		||||
      use, offer to sell, sell, import, and otherwise transfer the Work,
 | 
			
		||||
      where such license applies only to those patent claims licensable
 | 
			
		||||
      by such Contributor that are necessarily infringed by their
 | 
			
		||||
      Contribution(s) alone or by combination of their Contribution(s)
 | 
			
		||||
      with the Work to which such Contribution(s) was submitted. If You
 | 
			
		||||
      institute patent litigation against any entity (including a
 | 
			
		||||
      cross-claim or counterclaim in a lawsuit) alleging that the Work
 | 
			
		||||
      or a Contribution incorporated within the Work constitutes direct
 | 
			
		||||
      or contributory patent infringement, then any patent licenses
 | 
			
		||||
      granted to You under this License for that Work shall terminate
 | 
			
		||||
      as of the date such litigation is filed.
 | 
			
		||||
 | 
			
		||||
   4. Redistribution. You may reproduce and distribute copies of the
 | 
			
		||||
      Work or Derivative Works thereof in any medium, with or without
 | 
			
		||||
      modifications, and in Source or Object form, provided that You
 | 
			
		||||
      meet the following conditions:
 | 
			
		||||
 | 
			
		||||
      (a) You must give any other recipients of the Work or
 | 
			
		||||
          Derivative Works a copy of this License; and
 | 
			
		||||
 | 
			
		||||
      (b) You must cause any modified files to carry prominent notices
 | 
			
		||||
          stating that You changed the files; and
 | 
			
		||||
 | 
			
		||||
      (c) You must retain, in the Source form of any Derivative Works
 | 
			
		||||
          that You distribute, all copyright, patent, trademark, and
 | 
			
		||||
          attribution notices from the Source form of the Work,
 | 
			
		||||
          excluding those notices that do not pertain to any part of
 | 
			
		||||
          the Derivative Works; and
 | 
			
		||||
 | 
			
		||||
      (d) If the Work includes a "NOTICE" text file as part of its
 | 
			
		||||
          distribution, then any Derivative Works that You distribute must
 | 
			
		||||
          include a readable copy of the attribution notices contained
 | 
			
		||||
          within such NOTICE file, excluding those notices that do not
 | 
			
		||||
          pertain to any part of the Derivative Works, in at least one
 | 
			
		||||
          of the following places: within a NOTICE text file distributed
 | 
			
		||||
          as part of the Derivative Works; within the Source form or
 | 
			
		||||
          documentation, if provided along with the Derivative Works; or,
 | 
			
		||||
          within a display generated by the Derivative Works, if and
 | 
			
		||||
          wherever such third-party notices normally appear. The contents
 | 
			
		||||
          of the NOTICE file are for informational purposes only and
 | 
			
		||||
          do not modify the License. You may add Your own attribution
 | 
			
		||||
          notices within Derivative Works that You distribute, alongside
 | 
			
		||||
          or as an addendum to the NOTICE text from the Work, provided
 | 
			
		||||
          that such additional attribution notices cannot be construed
 | 
			
		||||
          as modifying the License.
 | 
			
		||||
 | 
			
		||||
      You may add Your own copyright statement to Your modifications and
 | 
			
		||||
      may provide additional or different license terms and conditions
 | 
			
		||||
      for use, reproduction, or distribution of Your modifications, or
 | 
			
		||||
      for any such Derivative Works as a whole, provided Your use,
 | 
			
		||||
      reproduction, and distribution of the Work otherwise complies with
 | 
			
		||||
      the conditions stated in this License.
 | 
			
		||||
 | 
			
		||||
   5. Submission of Contributions. Unless You explicitly state otherwise,
 | 
			
		||||
      any Contribution intentionally submitted for inclusion in the Work
 | 
			
		||||
      by You to the Licensor shall be under the terms and conditions of
 | 
			
		||||
      this License, without any additional terms or conditions.
 | 
			
		||||
      Notwithstanding the above, nothing herein shall supersede or modify
 | 
			
		||||
      the terms of any separate license agreement you may have executed
 | 
			
		||||
      with Licensor regarding such Contributions.
 | 
			
		||||
 | 
			
		||||
   6. Trademarks. This License does not grant permission to use the trade
 | 
			
		||||
      names, trademarks, service marks, or product names of the Licensor,
 | 
			
		||||
      except as required for reasonable and customary use in describing the
 | 
			
		||||
      origin of the Work and reproducing the content of the NOTICE file.
 | 
			
		||||
 | 
			
		||||
   7. Disclaimer of Warranty. Unless required by applicable law or
 | 
			
		||||
      agreed to in writing, Licensor provides the Work (and each
 | 
			
		||||
      Contributor provides its Contributions) on an "AS IS" BASIS,
 | 
			
		||||
      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
 | 
			
		||||
      implied, including, without limitation, any warranties or conditions
 | 
			
		||||
      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
 | 
			
		||||
      PARTICULAR PURPOSE. You are solely responsible for determining the
 | 
			
		||||
      appropriateness of using or redistributing the Work and assume any
 | 
			
		||||
      risks associated with Your exercise of permissions under this License.
 | 
			
		||||
 | 
			
		||||
   8. Limitation of Liability. In no event and under no legal theory,
 | 
			
		||||
      whether in tort (including negligence), contract, or otherwise,
 | 
			
		||||
      unless required by applicable law (such as deliberate and grossly
 | 
			
		||||
      negligent acts) or agreed to in writing, shall any Contributor be
 | 
			
		||||
      liable to You for damages, including any direct, indirect, special,
 | 
			
		||||
      incidental, or consequential damages of any character arising as a
 | 
			
		||||
      result of this License or out of the use or inability to use the
 | 
			
		||||
      Work (including but not limited to damages for loss of goodwill,
 | 
			
		||||
      work stoppage, computer failure or malfunction, or any and all
 | 
			
		||||
      other commercial damages or losses), even if such Contributor
 | 
			
		||||
      has been advised of the possibility of such damages.
 | 
			
		||||
 | 
			
		||||
   9. Accepting Warranty or Additional Liability. While redistributing
 | 
			
		||||
      the Work or Derivative Works thereof, You may choose to offer,
 | 
			
		||||
      and charge a fee for, acceptance of support, warranty, indemnity,
 | 
			
		||||
      or other liability obligations and/or rights consistent with this
 | 
			
		||||
      License. However, in accepting such obligations, You may act only
 | 
			
		||||
      on Your own behalf and on Your sole responsibility, not on behalf
 | 
			
		||||
      of any other Contributor, and only if You agree to indemnify,
 | 
			
		||||
      defend, and hold each Contributor harmless for any liability
 | 
			
		||||
      incurred by, or claims asserted against, such Contributor by reason
 | 
			
		||||
      of your accepting any such warranty or additional liability.
 | 
			
		||||
 | 
			
		||||
   END OF TERMS AND CONDITIONS
 | 
			
		||||
 | 
			
		||||
   Copyright 2015 Microsoft Corporation
 | 
			
		||||
 | 
			
		||||
   Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
   you may not use this file except in compliance with the License.
 | 
			
		||||
   You may obtain a copy of the License at
 | 
			
		||||
 | 
			
		||||
       http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
 | 
			
		||||
   Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
   distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
   See the License for the specific language governing permissions and
 | 
			
		||||
   limitations under the License.
 | 
			
		||||
 | 
			
		||||
= vendor/github.com/Azure/go-autorest/LICENSE a250e5ac3848f2acadb5adcb9555c18b
 | 
			
		||||
							
								
								
									
										195
									
								
								LICENSES/vendor/github.com/Azure/go-autorest/logger/LICENSE
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										195
									
								
								LICENSES/vendor/github.com/Azure/go-autorest/logger/LICENSE
									
									
									
										generated
									
									
										vendored
									
									
								
							@@ -1,195 +0,0 @@
 | 
			
		||||
= vendor/github.com/Azure/go-autorest/logger licensed under: =
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
                                 Apache License
 | 
			
		||||
                           Version 2.0, January 2004
 | 
			
		||||
                        http://www.apache.org/licenses/
 | 
			
		||||
 | 
			
		||||
   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
 | 
			
		||||
 | 
			
		||||
   1. Definitions.
 | 
			
		||||
 | 
			
		||||
      "License" shall mean the terms and conditions for use, reproduction,
 | 
			
		||||
      and distribution as defined by Sections 1 through 9 of this document.
 | 
			
		||||
 | 
			
		||||
      "Licensor" shall mean the copyright owner or entity authorized by
 | 
			
		||||
      the copyright owner that is granting the License.
 | 
			
		||||
 | 
			
		||||
      "Legal Entity" shall mean the union of the acting entity and all
 | 
			
		||||
      other entities that control, are controlled by, or are under common
 | 
			
		||||
      control with that entity. For the purposes of this definition,
 | 
			
		||||
      "control" means (i) the power, direct or indirect, to cause the
 | 
			
		||||
      direction or management of such entity, whether by contract or
 | 
			
		||||
      otherwise, or (ii) ownership of fifty percent (50%) or more of the
 | 
			
		||||
      outstanding shares, or (iii) beneficial ownership of such entity.
 | 
			
		||||
 | 
			
		||||
      "You" (or "Your") shall mean an individual or Legal Entity
 | 
			
		||||
      exercising permissions granted by this License.
 | 
			
		||||
 | 
			
		||||
      "Source" form shall mean the preferred form for making modifications,
 | 
			
		||||
      including but not limited to software source code, documentation
 | 
			
		||||
      source, and configuration files.
 | 
			
		||||
 | 
			
		||||
      "Object" form shall mean any form resulting from mechanical
 | 
			
		||||
      transformation or translation of a Source form, including but
 | 
			
		||||
      not limited to compiled object code, generated documentation,
 | 
			
		||||
      and conversions to other media types.
 | 
			
		||||
 | 
			
		||||
      "Work" shall mean the work of authorship, whether in Source or
 | 
			
		||||
      Object form, made available under the License, as indicated by a
 | 
			
		||||
      copyright notice that is included in or attached to the work
 | 
			
		||||
      (an example is provided in the Appendix below).
 | 
			
		||||
 | 
			
		||||
      "Derivative Works" shall mean any work, whether in Source or Object
 | 
			
		||||
      form, that is based on (or derived from) the Work and for which the
 | 
			
		||||
      editorial revisions, annotations, elaborations, or other modifications
 | 
			
		||||
      represent, as a whole, an original work of authorship. For the purposes
 | 
			
		||||
      of this License, Derivative Works shall not include works that remain
 | 
			
		||||
      separable from, or merely link (or bind by name) to the interfaces of,
 | 
			
		||||
      the Work and Derivative Works thereof.
 | 
			
		||||
 | 
			
		||||
      "Contribution" shall mean any work of authorship, including
 | 
			
		||||
      the original version of the Work and any modifications or additions
 | 
			
		||||
      to that Work or Derivative Works thereof, that is intentionally
 | 
			
		||||
      submitted to Licensor for inclusion in the Work by the copyright owner
 | 
			
		||||
      or by an individual or Legal Entity authorized to submit on behalf of
 | 
			
		||||
      the copyright owner. For the purposes of this definition, "submitted"
 | 
			
		||||
      means any form of electronic, verbal, or written communication sent
 | 
			
		||||
      to the Licensor or its representatives, including but not limited to
 | 
			
		||||
      communication on electronic mailing lists, source code control systems,
 | 
			
		||||
      and issue tracking systems that are managed by, or on behalf of, the
 | 
			
		||||
      Licensor for the purpose of discussing and improving the Work, but
 | 
			
		||||
      excluding communication that is conspicuously marked or otherwise
 | 
			
		||||
      designated in writing by the copyright owner as "Not a Contribution."
 | 
			
		||||
 | 
			
		||||
      "Contributor" shall mean Licensor and any individual or Legal Entity
 | 
			
		||||
      on behalf of whom a Contribution has been received by Licensor and
 | 
			
		||||
      subsequently incorporated within the Work.
 | 
			
		||||
 | 
			
		||||
   2. Grant of Copyright License. Subject to the terms and conditions of
 | 
			
		||||
      this License, each Contributor hereby grants to You a perpetual,
 | 
			
		||||
      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
 | 
			
		||||
      copyright license to reproduce, prepare Derivative Works of,
 | 
			
		||||
      publicly display, publicly perform, sublicense, and distribute the
 | 
			
		||||
      Work and such Derivative Works in Source or Object form.
 | 
			
		||||
 | 
			
		||||
   3. Grant of Patent License. Subject to the terms and conditions of
 | 
			
		||||
      this License, each Contributor hereby grants to You a perpetual,
 | 
			
		||||
      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
 | 
			
		||||
      (except as stated in this section) patent license to make, have made,
 | 
			
		||||
      use, offer to sell, sell, import, and otherwise transfer the Work,
 | 
			
		||||
      where such license applies only to those patent claims licensable
 | 
			
		||||
      by such Contributor that are necessarily infringed by their
 | 
			
		||||
      Contribution(s) alone or by combination of their Contribution(s)
 | 
			
		||||
      with the Work to which such Contribution(s) was submitted. If You
 | 
			
		||||
      institute patent litigation against any entity (including a
 | 
			
		||||
      cross-claim or counterclaim in a lawsuit) alleging that the Work
 | 
			
		||||
      or a Contribution incorporated within the Work constitutes direct
 | 
			
		||||
      or contributory patent infringement, then any patent licenses
 | 
			
		||||
      granted to You under this License for that Work shall terminate
 | 
			
		||||
      as of the date such litigation is filed.
 | 
			
		||||
 | 
			
		||||
   4. Redistribution. You may reproduce and distribute copies of the
 | 
			
		||||
      Work or Derivative Works thereof in any medium, with or without
 | 
			
		||||
      modifications, and in Source or Object form, provided that You
 | 
			
		||||
      meet the following conditions:
 | 
			
		||||
 | 
			
		||||
      (a) You must give any other recipients of the Work or
 | 
			
		||||
          Derivative Works a copy of this License; and
 | 
			
		||||
 | 
			
		||||
      (b) You must cause any modified files to carry prominent notices
 | 
			
		||||
          stating that You changed the files; and
 | 
			
		||||
 | 
			
		||||
      (c) You must retain, in the Source form of any Derivative Works
 | 
			
		||||
          that You distribute, all copyright, patent, trademark, and
 | 
			
		||||
          attribution notices from the Source form of the Work,
 | 
			
		||||
          excluding those notices that do not pertain to any part of
 | 
			
		||||
          the Derivative Works; and
 | 
			
		||||
 | 
			
		||||
      (d) If the Work includes a "NOTICE" text file as part of its
 | 
			
		||||
          distribution, then any Derivative Works that You distribute must
 | 
			
		||||
          include a readable copy of the attribution notices contained
 | 
			
		||||
          within such NOTICE file, excluding those notices that do not
 | 
			
		||||
          pertain to any part of the Derivative Works, in at least one
 | 
			
		||||
          of the following places: within a NOTICE text file distributed
 | 
			
		||||
          as part of the Derivative Works; within the Source form or
 | 
			
		||||
          documentation, if provided along with the Derivative Works; or,
 | 
			
		||||
          within a display generated by the Derivative Works, if and
 | 
			
		||||
          wherever such third-party notices normally appear. The contents
 | 
			
		||||
          of the NOTICE file are for informational purposes only and
 | 
			
		||||
          do not modify the License. You may add Your own attribution
 | 
			
		||||
          notices within Derivative Works that You distribute, alongside
 | 
			
		||||
          or as an addendum to the NOTICE text from the Work, provided
 | 
			
		||||
          that such additional attribution notices cannot be construed
 | 
			
		||||
          as modifying the License.
 | 
			
		||||
 | 
			
		||||
      You may add Your own copyright statement to Your modifications and
 | 
			
		||||
      may provide additional or different license terms and conditions
 | 
			
		||||
      for use, reproduction, or distribution of Your modifications, or
 | 
			
		||||
      for any such Derivative Works as a whole, provided Your use,
 | 
			
		||||
      reproduction, and distribution of the Work otherwise complies with
 | 
			
		||||
      the conditions stated in this License.
 | 
			
		||||
 | 
			
		||||
   5. Submission of Contributions. Unless You explicitly state otherwise,
 | 
			
		||||
      any Contribution intentionally submitted for inclusion in the Work
 | 
			
		||||
      by You to the Licensor shall be under the terms and conditions of
 | 
			
		||||
      this License, without any additional terms or conditions.
 | 
			
		||||
      Notwithstanding the above, nothing herein shall supersede or modify
 | 
			
		||||
      the terms of any separate license agreement you may have executed
 | 
			
		||||
      with Licensor regarding such Contributions.
 | 
			
		||||
 | 
			
		||||
   6. Trademarks. This License does not grant permission to use the trade
 | 
			
		||||
      names, trademarks, service marks, or product names of the Licensor,
 | 
			
		||||
      except as required for reasonable and customary use in describing the
 | 
			
		||||
      origin of the Work and reproducing the content of the NOTICE file.
 | 
			
		||||
 | 
			
		||||
   7. Disclaimer of Warranty. Unless required by applicable law or
 | 
			
		||||
      agreed to in writing, Licensor provides the Work (and each
 | 
			
		||||
      Contributor provides its Contributions) on an "AS IS" BASIS,
 | 
			
		||||
      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
 | 
			
		||||
      implied, including, without limitation, any warranties or conditions
 | 
			
		||||
      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
 | 
			
		||||
      PARTICULAR PURPOSE. You are solely responsible for determining the
 | 
			
		||||
      appropriateness of using or redistributing the Work and assume any
 | 
			
		||||
      risks associated with Your exercise of permissions under this License.
 | 
			
		||||
 | 
			
		||||
   8. Limitation of Liability. In no event and under no legal theory,
 | 
			
		||||
      whether in tort (including negligence), contract, or otherwise,
 | 
			
		||||
      unless required by applicable law (such as deliberate and grossly
 | 
			
		||||
      negligent acts) or agreed to in writing, shall any Contributor be
 | 
			
		||||
      liable to You for damages, including any direct, indirect, special,
 | 
			
		||||
      incidental, or consequential damages of any character arising as a
 | 
			
		||||
      result of this License or out of the use or inability to use the
 | 
			
		||||
      Work (including but not limited to damages for loss of goodwill,
 | 
			
		||||
      work stoppage, computer failure or malfunction, or any and all
 | 
			
		||||
      other commercial damages or losses), even if such Contributor
 | 
			
		||||
      has been advised of the possibility of such damages.
 | 
			
		||||
 | 
			
		||||
   9. Accepting Warranty or Additional Liability. While redistributing
 | 
			
		||||
      the Work or Derivative Works thereof, You may choose to offer,
 | 
			
		||||
      and charge a fee for, acceptance of support, warranty, indemnity,
 | 
			
		||||
      or other liability obligations and/or rights consistent with this
 | 
			
		||||
      License. However, in accepting such obligations, You may act only
 | 
			
		||||
      on Your own behalf and on Your sole responsibility, not on behalf
 | 
			
		||||
      of any other Contributor, and only if You agree to indemnify,
 | 
			
		||||
      defend, and hold each Contributor harmless for any liability
 | 
			
		||||
      incurred by, or claims asserted against, such Contributor by reason
 | 
			
		||||
      of your accepting any such warranty or additional liability.
 | 
			
		||||
 | 
			
		||||
   END OF TERMS AND CONDITIONS
 | 
			
		||||
 | 
			
		||||
   Copyright 2015 Microsoft Corporation
 | 
			
		||||
 | 
			
		||||
   Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
   you may not use this file except in compliance with the License.
 | 
			
		||||
   You may obtain a copy of the License at
 | 
			
		||||
 | 
			
		||||
       http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
 | 
			
		||||
   Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
   distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
   See the License for the specific language governing permissions and
 | 
			
		||||
   limitations under the License.
 | 
			
		||||
 | 
			
		||||
= vendor/github.com/Azure/go-autorest/LICENSE a250e5ac3848f2acadb5adcb9555c18b
 | 
			
		||||
							
								
								
									
										195
									
								
								LICENSES/vendor/github.com/Azure/go-autorest/tracing/LICENSE
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										195
									
								
								LICENSES/vendor/github.com/Azure/go-autorest/tracing/LICENSE
									
									
									
										generated
									
									
										vendored
									
									
								
							@@ -1,195 +0,0 @@
 | 
			
		||||
= vendor/github.com/Azure/go-autorest/tracing licensed under: =
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
                                 Apache License
 | 
			
		||||
                           Version 2.0, January 2004
 | 
			
		||||
                        http://www.apache.org/licenses/
 | 
			
		||||
 | 
			
		||||
   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
 | 
			
		||||
 | 
			
		||||
   1. Definitions.
 | 
			
		||||
 | 
			
		||||
      "License" shall mean the terms and conditions for use, reproduction,
 | 
			
		||||
      and distribution as defined by Sections 1 through 9 of this document.
 | 
			
		||||
 | 
			
		||||
      "Licensor" shall mean the copyright owner or entity authorized by
 | 
			
		||||
      the copyright owner that is granting the License.
 | 
			
		||||
 | 
			
		||||
      "Legal Entity" shall mean the union of the acting entity and all
 | 
			
		||||
      other entities that control, are controlled by, or are under common
 | 
			
		||||
      control with that entity. For the purposes of this definition,
 | 
			
		||||
      "control" means (i) the power, direct or indirect, to cause the
 | 
			
		||||
      direction or management of such entity, whether by contract or
 | 
			
		||||
      otherwise, or (ii) ownership of fifty percent (50%) or more of the
 | 
			
		||||
      outstanding shares, or (iii) beneficial ownership of such entity.
 | 
			
		||||
 | 
			
		||||
      "You" (or "Your") shall mean an individual or Legal Entity
 | 
			
		||||
      exercising permissions granted by this License.
 | 
			
		||||
 | 
			
		||||
      "Source" form shall mean the preferred form for making modifications,
 | 
			
		||||
      including but not limited to software source code, documentation
 | 
			
		||||
      source, and configuration files.
 | 
			
		||||
 | 
			
		||||
      "Object" form shall mean any form resulting from mechanical
 | 
			
		||||
      transformation or translation of a Source form, including but
 | 
			
		||||
      not limited to compiled object code, generated documentation,
 | 
			
		||||
      and conversions to other media types.
 | 
			
		||||
 | 
			
		||||
      "Work" shall mean the work of authorship, whether in Source or
 | 
			
		||||
      Object form, made available under the License, as indicated by a
 | 
			
		||||
      copyright notice that is included in or attached to the work
 | 
			
		||||
      (an example is provided in the Appendix below).
 | 
			
		||||
 | 
			
		||||
      "Derivative Works" shall mean any work, whether in Source or Object
 | 
			
		||||
      form, that is based on (or derived from) the Work and for which the
 | 
			
		||||
      editorial revisions, annotations, elaborations, or other modifications
 | 
			
		||||
      represent, as a whole, an original work of authorship. For the purposes
 | 
			
		||||
      of this License, Derivative Works shall not include works that remain
 | 
			
		||||
      separable from, or merely link (or bind by name) to the interfaces of,
 | 
			
		||||
      the Work and Derivative Works thereof.
 | 
			
		||||
 | 
			
		||||
      "Contribution" shall mean any work of authorship, including
 | 
			
		||||
      the original version of the Work and any modifications or additions
 | 
			
		||||
      to that Work or Derivative Works thereof, that is intentionally
 | 
			
		||||
      submitted to Licensor for inclusion in the Work by the copyright owner
 | 
			
		||||
      or by an individual or Legal Entity authorized to submit on behalf of
 | 
			
		||||
      the copyright owner. For the purposes of this definition, "submitted"
 | 
			
		||||
      means any form of electronic, verbal, or written communication sent
 | 
			
		||||
      to the Licensor or its representatives, including but not limited to
 | 
			
		||||
      communication on electronic mailing lists, source code control systems,
 | 
			
		||||
      and issue tracking systems that are managed by, or on behalf of, the
 | 
			
		||||
      Licensor for the purpose of discussing and improving the Work, but
 | 
			
		||||
      excluding communication that is conspicuously marked or otherwise
 | 
			
		||||
      designated in writing by the copyright owner as "Not a Contribution."
 | 
			
		||||
 | 
			
		||||
      "Contributor" shall mean Licensor and any individual or Legal Entity
 | 
			
		||||
      on behalf of whom a Contribution has been received by Licensor and
 | 
			
		||||
      subsequently incorporated within the Work.
 | 
			
		||||
 | 
			
		||||
   2. Grant of Copyright License. Subject to the terms and conditions of
 | 
			
		||||
      this License, each Contributor hereby grants to You a perpetual,
 | 
			
		||||
      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
 | 
			
		||||
      copyright license to reproduce, prepare Derivative Works of,
 | 
			
		||||
      publicly display, publicly perform, sublicense, and distribute the
 | 
			
		||||
      Work and such Derivative Works in Source or Object form.
 | 
			
		||||
 | 
			
		||||
   3. Grant of Patent License. Subject to the terms and conditions of
 | 
			
		||||
      this License, each Contributor hereby grants to You a perpetual,
 | 
			
		||||
      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
 | 
			
		||||
      (except as stated in this section) patent license to make, have made,
 | 
			
		||||
      use, offer to sell, sell, import, and otherwise transfer the Work,
 | 
			
		||||
      where such license applies only to those patent claims licensable
 | 
			
		||||
      by such Contributor that are necessarily infringed by their
 | 
			
		||||
      Contribution(s) alone or by combination of their Contribution(s)
 | 
			
		||||
      with the Work to which such Contribution(s) was submitted. If You
 | 
			
		||||
      institute patent litigation against any entity (including a
 | 
			
		||||
      cross-claim or counterclaim in a lawsuit) alleging that the Work
 | 
			
		||||
      or a Contribution incorporated within the Work constitutes direct
 | 
			
		||||
      or contributory patent infringement, then any patent licenses
 | 
			
		||||
      granted to You under this License for that Work shall terminate
 | 
			
		||||
      as of the date such litigation is filed.
 | 
			
		||||
 | 
			
		||||
   4. Redistribution. You may reproduce and distribute copies of the
 | 
			
		||||
      Work or Derivative Works thereof in any medium, with or without
 | 
			
		||||
      modifications, and in Source or Object form, provided that You
 | 
			
		||||
      meet the following conditions:
 | 
			
		||||
 | 
			
		||||
      (a) You must give any other recipients of the Work or
 | 
			
		||||
          Derivative Works a copy of this License; and
 | 
			
		||||
 | 
			
		||||
      (b) You must cause any modified files to carry prominent notices
 | 
			
		||||
          stating that You changed the files; and
 | 
			
		||||
 | 
			
		||||
      (c) You must retain, in the Source form of any Derivative Works
 | 
			
		||||
          that You distribute, all copyright, patent, trademark, and
 | 
			
		||||
          attribution notices from the Source form of the Work,
 | 
			
		||||
          excluding those notices that do not pertain to any part of
 | 
			
		||||
          the Derivative Works; and
 | 
			
		||||
 | 
			
		||||
      (d) If the Work includes a "NOTICE" text file as part of its
 | 
			
		||||
          distribution, then any Derivative Works that You distribute must
 | 
			
		||||
          include a readable copy of the attribution notices contained
 | 
			
		||||
          within such NOTICE file, excluding those notices that do not
 | 
			
		||||
          pertain to any part of the Derivative Works, in at least one
 | 
			
		||||
          of the following places: within a NOTICE text file distributed
 | 
			
		||||
          as part of the Derivative Works; within the Source form or
 | 
			
		||||
          documentation, if provided along with the Derivative Works; or,
 | 
			
		||||
          within a display generated by the Derivative Works, if and
 | 
			
		||||
          wherever such third-party notices normally appear. The contents
 | 
			
		||||
          of the NOTICE file are for informational purposes only and
 | 
			
		||||
          do not modify the License. You may add Your own attribution
 | 
			
		||||
          notices within Derivative Works that You distribute, alongside
 | 
			
		||||
          or as an addendum to the NOTICE text from the Work, provided
 | 
			
		||||
          that such additional attribution notices cannot be construed
 | 
			
		||||
          as modifying the License.
 | 
			
		||||
 | 
			
		||||
      You may add Your own copyright statement to Your modifications and
 | 
			
		||||
      may provide additional or different license terms and conditions
 | 
			
		||||
      for use, reproduction, or distribution of Your modifications, or
 | 
			
		||||
      for any such Derivative Works as a whole, provided Your use,
 | 
			
		||||
      reproduction, and distribution of the Work otherwise complies with
 | 
			
		||||
      the conditions stated in this License.
 | 
			
		||||
 | 
			
		||||
   5. Submission of Contributions. Unless You explicitly state otherwise,
 | 
			
		||||
      any Contribution intentionally submitted for inclusion in the Work
 | 
			
		||||
      by You to the Licensor shall be under the terms and conditions of
 | 
			
		||||
      this License, without any additional terms or conditions.
 | 
			
		||||
      Notwithstanding the above, nothing herein shall supersede or modify
 | 
			
		||||
      the terms of any separate license agreement you may have executed
 | 
			
		||||
      with Licensor regarding such Contributions.
 | 
			
		||||
 | 
			
		||||
   6. Trademarks. This License does not grant permission to use the trade
 | 
			
		||||
      names, trademarks, service marks, or product names of the Licensor,
 | 
			
		||||
      except as required for reasonable and customary use in describing the
 | 
			
		||||
      origin of the Work and reproducing the content of the NOTICE file.
 | 
			
		||||
 | 
			
		||||
   7. Disclaimer of Warranty. Unless required by applicable law or
 | 
			
		||||
      agreed to in writing, Licensor provides the Work (and each
 | 
			
		||||
      Contributor provides its Contributions) on an "AS IS" BASIS,
 | 
			
		||||
      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
 | 
			
		||||
      implied, including, without limitation, any warranties or conditions
 | 
			
		||||
      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
 | 
			
		||||
      PARTICULAR PURPOSE. You are solely responsible for determining the
 | 
			
		||||
      appropriateness of using or redistributing the Work and assume any
 | 
			
		||||
      risks associated with Your exercise of permissions under this License.
 | 
			
		||||
 | 
			
		||||
   8. Limitation of Liability. In no event and under no legal theory,
 | 
			
		||||
      whether in tort (including negligence), contract, or otherwise,
 | 
			
		||||
      unless required by applicable law (such as deliberate and grossly
 | 
			
		||||
      negligent acts) or agreed to in writing, shall any Contributor be
 | 
			
		||||
      liable to You for damages, including any direct, indirect, special,
 | 
			
		||||
      incidental, or consequential damages of any character arising as a
 | 
			
		||||
      result of this License or out of the use or inability to use the
 | 
			
		||||
      Work (including but not limited to damages for loss of goodwill,
 | 
			
		||||
      work stoppage, computer failure or malfunction, or any and all
 | 
			
		||||
      other commercial damages or losses), even if such Contributor
 | 
			
		||||
      has been advised of the possibility of such damages.
 | 
			
		||||
 | 
			
		||||
   9. Accepting Warranty or Additional Liability. While redistributing
 | 
			
		||||
      the Work or Derivative Works thereof, You may choose to offer,
 | 
			
		||||
      and charge a fee for, acceptance of support, warranty, indemnity,
 | 
			
		||||
      or other liability obligations and/or rights consistent with this
 | 
			
		||||
      License. However, in accepting such obligations, You may act only
 | 
			
		||||
      on Your own behalf and on Your sole responsibility, not on behalf
 | 
			
		||||
      of any other Contributor, and only if You agree to indemnify,
 | 
			
		||||
      defend, and hold each Contributor harmless for any liability
 | 
			
		||||
      incurred by, or claims asserted against, such Contributor by reason
 | 
			
		||||
      of your accepting any such warranty or additional liability.
 | 
			
		||||
 | 
			
		||||
   END OF TERMS AND CONDITIONS
 | 
			
		||||
 | 
			
		||||
   Copyright 2015 Microsoft Corporation
 | 
			
		||||
 | 
			
		||||
   Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
   you may not use this file except in compliance with the License.
 | 
			
		||||
   You may obtain a copy of the License at
 | 
			
		||||
 | 
			
		||||
       http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
 | 
			
		||||
   Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
   distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
   See the License for the specific language governing permissions and
 | 
			
		||||
   limitations under the License.
 | 
			
		||||
 | 
			
		||||
= vendor/github.com/Azure/go-autorest/LICENSE a250e5ac3848f2acadb5adcb9555c18b
 | 
			
		||||
							
								
								
									
										24
									
								
								LICENSES/vendor/github.com/gofrs/uuid/LICENSE
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										24
									
								
								LICENSES/vendor/github.com/gofrs/uuid/LICENSE
									
									
									
										generated
									
									
										vendored
									
									
								
							@@ -1,24 +0,0 @@
 | 
			
		||||
= vendor/github.com/gofrs/uuid licensed under: =
 | 
			
		||||
 | 
			
		||||
Copyright (C) 2013-2018 by Maxim Bublis <b@codemonkey.ru>
 | 
			
		||||
 | 
			
		||||
Permission is hereby granted, free of charge, to any person obtaining
 | 
			
		||||
a copy of this software and associated documentation files (the
 | 
			
		||||
"Software"), to deal in the Software without restriction, including
 | 
			
		||||
without limitation the rights to use, copy, modify, merge, publish,
 | 
			
		||||
distribute, sublicense, and/or sell copies of the Software, and to
 | 
			
		||||
permit persons to whom the Software is furnished to do so, subject to
 | 
			
		||||
the following conditions:
 | 
			
		||||
 | 
			
		||||
The above copyright notice and this permission notice shall be
 | 
			
		||||
included in all copies or substantial portions of the Software.
 | 
			
		||||
 | 
			
		||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 | 
			
		||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 | 
			
		||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 | 
			
		||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
 | 
			
		||||
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
 | 
			
		||||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
 | 
			
		||||
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 | 
			
		||||
 | 
			
		||||
= vendor/github.com/gofrs/uuid/LICENSE ae4ba217c6e20c2d8f48f69966b9121b
 | 
			
		||||
							
								
								
									
										26
									
								
								LICENSES/vendor/github.com/rubiojr/go-vhd/LICENSE
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										26
									
								
								LICENSES/vendor/github.com/rubiojr/go-vhd/LICENSE
									
									
									
										generated
									
									
										vendored
									
									
								
							@@ -1,26 +0,0 @@
 | 
			
		||||
= vendor/github.com/rubiojr/go-vhd licensed under: =
 | 
			
		||||
 | 
			
		||||
The MIT License (MIT)
 | 
			
		||||
 | 
			
		||||
Copyright (c) 2015 Sergio Rubio
 | 
			
		||||
 | 
			
		||||
Permission is hereby granted, free of charge, to any person obtaining a copy
 | 
			
		||||
of this software and associated documentation files (the "Software"), to deal
 | 
			
		||||
in the Software without restriction, including without limitation the rights
 | 
			
		||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
 | 
			
		||||
copies of the Software, and to permit persons to whom the Software is
 | 
			
		||||
furnished to do so, subject to the following conditions:
 | 
			
		||||
 | 
			
		||||
The above copyright notice and this permission notice shall be included in all
 | 
			
		||||
copies or substantial portions of the Software.
 | 
			
		||||
 | 
			
		||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 | 
			
		||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 | 
			
		||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
 | 
			
		||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 | 
			
		||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
 | 
			
		||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 | 
			
		||||
SOFTWARE.
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
= vendor/github.com/rubiojr/go-vhd/LICENSE 9ce5db55ba47444787183e59733e1977
 | 
			
		||||
@@ -25,7 +25,6 @@ package main
 | 
			
		||||
import (
 | 
			
		||||
	// NOTE: Importing all in-tree cloud-providers is not required when
 | 
			
		||||
	// implementing an out-of-tree cloud-provider.
 | 
			
		||||
	_ "k8s.io/legacy-cloud-providers/azure"
 | 
			
		||||
	_ "k8s.io/legacy-cloud-providers/gce"
 | 
			
		||||
	_ "k8s.io/legacy-cloud-providers/vsphere"
 | 
			
		||||
)
 | 
			
		||||
 
 | 
			
		||||
@@ -29,7 +29,6 @@ import (
 | 
			
		||||
	"k8s.io/component-base/version/verflag"
 | 
			
		||||
 | 
			
		||||
	// ensure libs have a chance to globally register their flags
 | 
			
		||||
	_ "k8s.io/kubernetes/pkg/credentialprovider/azure"
 | 
			
		||||
	_ "k8s.io/kubernetes/pkg/credentialprovider/gcp"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
@@ -60,16 +59,6 @@ func register(global *flag.FlagSet, local *pflag.FlagSet, globalName string) {
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// pflagRegister adds a flag to local that targets the Value associated with the Flag named globalName in global
 | 
			
		||||
func pflagRegister(global, local *pflag.FlagSet, globalName string) {
 | 
			
		||||
	if f := global.Lookup(globalName); f != nil {
 | 
			
		||||
		f.Name = normalize(f.Name)
 | 
			
		||||
		local.AddFlag(f)
 | 
			
		||||
	} else {
 | 
			
		||||
		panic(fmt.Sprintf("failed to find flag in global flagset (pflag): %s", globalName))
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// registerDeprecated registers the flag with register, and then marks it deprecated
 | 
			
		||||
func registerDeprecated(global *flag.FlagSet, local *pflag.FlagSet, globalName, deprecated string) {
 | 
			
		||||
	register(global, local, globalName)
 | 
			
		||||
@@ -79,10 +68,7 @@ func registerDeprecated(global *flag.FlagSet, local *pflag.FlagSet, globalName,
 | 
			
		||||
// addCredentialProviderFlags adds flags from k8s.io/kubernetes/pkg/credentialprovider
 | 
			
		||||
func addCredentialProviderFlags(fs *pflag.FlagSet) {
 | 
			
		||||
	// lookup flags in global flag set and re-register the values with our flagset
 | 
			
		||||
	global := pflag.CommandLine
 | 
			
		||||
	local := pflag.NewFlagSet(os.Args[0], pflag.ExitOnError)
 | 
			
		||||
 | 
			
		||||
	addLegacyCloudProviderCredentialProviderFlags(global, local)
 | 
			
		||||
 | 
			
		||||
	fs.AddFlagSet(local)
 | 
			
		||||
}
 | 
			
		||||
 
 | 
			
		||||
@@ -1,31 +0,0 @@
 | 
			
		||||
//go:build !providerless
 | 
			
		||||
// +build !providerless
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
Copyright 2020 The Kubernetes Authors.
 | 
			
		||||
 | 
			
		||||
Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
you may not use this file except in compliance with the License.
 | 
			
		||||
You may obtain a copy of the License at
 | 
			
		||||
 | 
			
		||||
    http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
 | 
			
		||||
Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
See the License for the specific language governing permissions and
 | 
			
		||||
limitations under the License.
 | 
			
		||||
*/
 | 
			
		||||
 | 
			
		||||
package options
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"github.com/spf13/pflag"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
func addLegacyCloudProviderCredentialProviderFlags(global, local *pflag.FlagSet) {
 | 
			
		||||
	// TODO(#58034): This is not a static file, so it's not quite as straightforward as --google-json-key.
 | 
			
		||||
	// We need to figure out how ACR users can dynamically provide pull credentials before we can deprecate this.
 | 
			
		||||
	pflagRegister(global, local, "azure-container-registry-config")
 | 
			
		||||
	local.MarkDeprecated("azure-container-registry-config", "Use --image-credential-provider-config and --image-credential-provider-bin-dir to setup acr credential provider instead. Will be removed in a future release.")
 | 
			
		||||
}
 | 
			
		||||
@@ -21,7 +21,6 @@ package app
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	// Credential providers
 | 
			
		||||
	_ "k8s.io/kubernetes/pkg/credentialprovider/azure"
 | 
			
		||||
	_ "k8s.io/kubernetes/pkg/credentialprovider/gcp"
 | 
			
		||||
 | 
			
		||||
	"k8s.io/component-base/featuregate"
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										12
									
								
								go.mod
									
									
									
									
									
								
							
							
						
						
									
										12
									
								
								go.mod
									
									
									
									
									
								
							@@ -10,9 +10,6 @@ go 1.21
 | 
			
		||||
 | 
			
		||||
require (
 | 
			
		||||
	bitbucket.org/bertimus9/systemstat v0.5.0
 | 
			
		||||
	github.com/Azure/azure-sdk-for-go v68.0.0+incompatible
 | 
			
		||||
	github.com/Azure/go-autorest/autorest v0.11.29
 | 
			
		||||
	github.com/Azure/go-autorest/autorest/adal v0.9.23
 | 
			
		||||
	github.com/GoogleCloudPlatform/k8s-cloud-provider v1.18.1-0.20220218231025-f11817397a1b
 | 
			
		||||
	github.com/JeffAshton/win_pdh v0.0.0-20161109143554-76bb4ee9f0ab
 | 
			
		||||
	github.com/Microsoft/go-winio v0.6.0
 | 
			
		||||
@@ -135,13 +132,6 @@ require (
 | 
			
		||||
	cloud.google.com/go/compute v1.23.0 // indirect
 | 
			
		||||
	cloud.google.com/go/compute/metadata v0.2.3 // indirect
 | 
			
		||||
	github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
 | 
			
		||||
	github.com/Azure/go-autorest v14.2.0+incompatible // indirect
 | 
			
		||||
	github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect
 | 
			
		||||
	github.com/Azure/go-autorest/autorest/mocks v0.4.2 // indirect
 | 
			
		||||
	github.com/Azure/go-autorest/autorest/to v0.4.0 // indirect
 | 
			
		||||
	github.com/Azure/go-autorest/autorest/validation v0.3.1 // indirect
 | 
			
		||||
	github.com/Azure/go-autorest/logger v0.2.1 // indirect
 | 
			
		||||
	github.com/Azure/go-autorest/tracing v0.6.0 // indirect
 | 
			
		||||
	github.com/MakeNowJust/heredoc v1.0.0 // indirect
 | 
			
		||||
	github.com/NYTimes/gziphandler v1.1.1 // indirect
 | 
			
		||||
	github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df // indirect
 | 
			
		||||
@@ -171,7 +161,6 @@ require (
 | 
			
		||||
	github.com/go-openapi/jsonreference v0.20.2 // indirect
 | 
			
		||||
	github.com/go-openapi/swag v0.22.3 // indirect
 | 
			
		||||
	github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect
 | 
			
		||||
	github.com/gofrs/uuid v4.4.0+incompatible // indirect
 | 
			
		||||
	github.com/golang-jwt/jwt/v4 v4.5.0 // indirect
 | 
			
		||||
	github.com/google/btree v1.0.1 // indirect
 | 
			
		||||
	github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 // indirect
 | 
			
		||||
@@ -209,7 +198,6 @@ require (
 | 
			
		||||
	github.com/peterbourgon/diskv v2.0.1+incompatible // indirect
 | 
			
		||||
	github.com/pquerna/cachecontrol v0.1.0 // indirect
 | 
			
		||||
	github.com/prometheus/procfs v0.10.1 // indirect
 | 
			
		||||
	github.com/rubiojr/go-vhd v0.0.0-20200706105327-02e210299021 // indirect
 | 
			
		||||
	github.com/russross/blackfriday/v2 v2.1.0 // indirect
 | 
			
		||||
	github.com/seccomp/libseccomp-golang v0.10.0 // indirect
 | 
			
		||||
	github.com/sirupsen/logrus v1.9.0 // indirect
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										45
									
								
								go.sum
									
									
									
									
									
								
							
							
						
						
									
										45
									
								
								go.sum
									
									
									
									
									
								
							@@ -166,30 +166,8 @@ cloud.google.com/go/webrisk v1.9.1/go.mod h1:4GCmXKcOa2BZcZPn6DCEvE7HypmEJcJkr4m
 | 
			
		||||
cloud.google.com/go/websecurityscanner v1.6.1/go.mod h1:Njgaw3rttgRHXzwCB8kgCYqv5/rGpFCsBOvPbYgszpg=
 | 
			
		||||
cloud.google.com/go/workflows v1.11.1/go.mod h1:Z+t10G1wF7h8LgdY/EmRcQY8ptBD/nvofaL6FqlET6g=
 | 
			
		||||
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
 | 
			
		||||
github.com/Azure/azure-sdk-for-go v68.0.0+incompatible h1:fcYLmCpyNYRnvJbPerq7U0hS+6+I79yEDJBqVNcqUzU=
 | 
			
		||||
github.com/Azure/azure-sdk-for-go v68.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
 | 
			
		||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8=
 | 
			
		||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
 | 
			
		||||
github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs=
 | 
			
		||||
github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
 | 
			
		||||
github.com/Azure/go-autorest/autorest v0.11.29 h1:I4+HL/JDvErx2LjyzaVxllw2lRDB5/BT2Bm4g20iqYw=
 | 
			
		||||
github.com/Azure/go-autorest/autorest v0.11.29/go.mod h1:ZtEzC4Jy2JDrZLxvWs8LrBWEBycl1hbT1eknI8MtfAs=
 | 
			
		||||
github.com/Azure/go-autorest/autorest/adal v0.9.22/go.mod h1:XuAbAEUv2Tta//+voMI038TrJBqjKam0me7qR+L8Cmk=
 | 
			
		||||
github.com/Azure/go-autorest/autorest/adal v0.9.23 h1:Yepx8CvFxwNKpH6ja7RZ+sKX+DWYNldbLiALMC3BTz8=
 | 
			
		||||
github.com/Azure/go-autorest/autorest/adal v0.9.23/go.mod h1:5pcMqFkdPhviJdlEy3kC/v1ZLnQl0MH6XA5YCcMhy4c=
 | 
			
		||||
github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw=
 | 
			
		||||
github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74=
 | 
			
		||||
github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
 | 
			
		||||
github.com/Azure/go-autorest/autorest/mocks v0.4.2 h1:PGN4EDXnuQbojHbU0UWoNvmu9AGVwYHG9/fkDYhtAfw=
 | 
			
		||||
github.com/Azure/go-autorest/autorest/mocks v0.4.2/go.mod h1:Vy7OitM9Kei0i1Oj+LvyAWMXJHeKH1MVlzFugfVrmyU=
 | 
			
		||||
github.com/Azure/go-autorest/autorest/to v0.4.0 h1:oXVqrxakqqV1UZdSazDOPOLvOIz+XA683u8EctwboHk=
 | 
			
		||||
github.com/Azure/go-autorest/autorest/to v0.4.0/go.mod h1:fE8iZBn7LQR7zH/9XU2NcPR4o9jEImooCeWJcYV/zLE=
 | 
			
		||||
github.com/Azure/go-autorest/autorest/validation v0.3.1 h1:AgyqjAd94fwNAoTjl/WQXg4VvFeRFpO+UhNyRXqF1ac=
 | 
			
		||||
github.com/Azure/go-autorest/autorest/validation v0.3.1/go.mod h1:yhLgjC0Wda5DYXl6JAsWyUe4KVNffhoDhG0zVzUMo3E=
 | 
			
		||||
github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+ZtXWSmf4Tg=
 | 
			
		||||
github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
 | 
			
		||||
github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo=
 | 
			
		||||
github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
 | 
			
		||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
 | 
			
		||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
 | 
			
		||||
github.com/GoogleCloudPlatform/k8s-cloud-provider v1.18.1-0.20220218231025-f11817397a1b h1:Heo1J/ttaQFgGJSVnCZquy3e5eH5j1nqxBuomztB3P0=
 | 
			
		||||
@@ -317,8 +295,6 @@ github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZm
 | 
			
		||||
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
 | 
			
		||||
github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0=
 | 
			
		||||
github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
 | 
			
		||||
github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI=
 | 
			
		||||
github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ=
 | 
			
		||||
github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8=
 | 
			
		||||
github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
 | 
			
		||||
github.com/docker/docker v20.10.24+incompatible h1:Ugvxm7a8+Gz6vqQYQQ2W7GYq5EUPaAiuPgIfVyI3dYE=
 | 
			
		||||
@@ -399,14 +375,11 @@ github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5x
 | 
			
		||||
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
 | 
			
		||||
github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk=
 | 
			
		||||
github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
 | 
			
		||||
github.com/gofrs/uuid v4.4.0+incompatible h1:3qXRTX8/NbyulANqlc0lchS1gqAVxRgsuW1YrTJupqA=
 | 
			
		||||
github.com/gofrs/uuid v4.4.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
 | 
			
		||||
github.com/gogo/googleapis v1.4.1/go.mod h1:2lpHqI5OcWCtVElxXnPt+s8oJvMpySlOyM6xDCrzib4=
 | 
			
		||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
 | 
			
		||||
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
 | 
			
		||||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
 | 
			
		||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
 | 
			
		||||
github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg=
 | 
			
		||||
github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg=
 | 
			
		||||
github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
 | 
			
		||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
 | 
			
		||||
@@ -708,8 +681,6 @@ github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6L
 | 
			
		||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
 | 
			
		||||
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
 | 
			
		||||
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
 | 
			
		||||
github.com/rubiojr/go-vhd v0.0.0-20200706105327-02e210299021 h1:if3/24+h9Sq6eDx8UUz1SO9cT9tizyIsATfB7b4D3tc=
 | 
			
		||||
github.com/rubiojr/go-vhd v0.0.0-20200706105327-02e210299021/go.mod h1:DM5xW0nvfNNm2uytzsvhI3OnX8uzaRAg8UX/CnDqbto=
 | 
			
		||||
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
 | 
			
		||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
 | 
			
		||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
 | 
			
		||||
@@ -760,7 +731,6 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
 | 
			
		||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
 | 
			
		||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
 | 
			
		||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
 | 
			
		||||
github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
 | 
			
		||||
github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
 | 
			
		||||
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
 | 
			
		||||
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
 | 
			
		||||
@@ -860,9 +830,6 @@ golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8U
 | 
			
		||||
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
 | 
			
		||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
 | 
			
		||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
 | 
			
		||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
 | 
			
		||||
golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
 | 
			
		||||
golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=
 | 
			
		||||
golang.org/x/crypto v0.16.0 h1:mMMrFzRSCF0GvB7Ne27XVtVAaXLrPmgPC7/v0tkwHaY=
 | 
			
		||||
golang.org/x/crypto v0.16.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4=
 | 
			
		||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
 | 
			
		||||
@@ -902,7 +869,6 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
 | 
			
		||||
golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
 | 
			
		||||
golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
 | 
			
		||||
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
 | 
			
		||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
 | 
			
		||||
golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0=
 | 
			
		||||
golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
 | 
			
		||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
 | 
			
		||||
@@ -946,10 +912,7 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v
 | 
			
		||||
golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc=
 | 
			
		||||
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
 | 
			
		||||
golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
 | 
			
		||||
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
 | 
			
		||||
golang.org/x/net v0.0.0-20211123203042-d83791d6bcd9/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
 | 
			
		||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
 | 
			
		||||
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
 | 
			
		||||
golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c=
 | 
			
		||||
golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U=
 | 
			
		||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
 | 
			
		||||
@@ -981,7 +944,6 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ
 | 
			
		||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
 | 
			
		||||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
 | 
			
		||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
 | 
			
		||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
 | 
			
		||||
golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE=
 | 
			
		||||
golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
 | 
			
		||||
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
 | 
			
		||||
@@ -1045,15 +1007,11 @@ golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBc
 | 
			
		||||
golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 | 
			
		||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 | 
			
		||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 | 
			
		||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 | 
			
		||||
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 | 
			
		||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 | 
			
		||||
golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc=
 | 
			
		||||
golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
 | 
			
		||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
 | 
			
		||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
 | 
			
		||||
golang.org/x/term v0.0.0-20220526004731-065cf7ba2467/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
 | 
			
		||||
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
 | 
			
		||||
golang.org/x/term v0.15.0 h1:y/Oo/a/q3IXu26lQgl04j/gjuBDOBlx7X6Om1j2CPW4=
 | 
			
		||||
golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0=
 | 
			
		||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
 | 
			
		||||
@@ -1064,8 +1022,6 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
 | 
			
		||||
golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
 | 
			
		||||
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
 | 
			
		||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
 | 
			
		||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
 | 
			
		||||
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
 | 
			
		||||
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
 | 
			
		||||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
 | 
			
		||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
 | 
			
		||||
@@ -1131,7 +1087,6 @@ golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
 | 
			
		||||
golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
 | 
			
		||||
golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
 | 
			
		||||
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
 | 
			
		||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
 | 
			
		||||
golang.org/x/tools v0.16.1 h1:TLyB3WofjdOEepBHAU20JdNC1Zbg87elYofWYAY5oZA=
 | 
			
		||||
golang.org/x/tools v0.16.1/go.mod h1:kYVVN6I1mBNoB1OX+noeBjbRk4IUEPa7JJ+TJMEooJ0=
 | 
			
		||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
 | 
			
		||||
 
 | 
			
		||||
@@ -20,7 +20,6 @@
 | 
			
		||||
      "github.com/go-openapi/validate": "use k8s.io/kube-openapi/pkg/validation/validate instead",
 | 
			
		||||
      "github.com/gogo/googleapis": "depends on unmaintained github.com/gogo/protobuf",
 | 
			
		||||
      "github.com/gogo/protobuf": "unmaintained",
 | 
			
		||||
      "github.com/golang/mock": "unmaintained, archive mode",
 | 
			
		||||
      "github.com/google/s2a-go": "cloud dependency, unstable",
 | 
			
		||||
      "github.com/google/shlex": "unmaintained, archive mode",
 | 
			
		||||
      "github.com/googleapis/enterprise-certificate-proxy": "references cloud dependencies",
 | 
			
		||||
@@ -55,7 +54,6 @@
 | 
			
		||||
      "github.com/mndrix/tap-go": "unmaintained",
 | 
			
		||||
      "github.com/onsi/ginkgo": "Ginkgo has been migrated to V2, refer to #109111",
 | 
			
		||||
      "github.com/pkg/errors": "unmaintained, archive mode",
 | 
			
		||||
      "github.com/rubiojr/go-vhd": "unmaintained, archive mode",
 | 
			
		||||
      "github.com/smartystreets/goconvey": "MPL license not in CNCF allowlist",
 | 
			
		||||
      "github.com/spf13/viper": "refer to #102598",
 | 
			
		||||
      "github.com/xeipuuv/gojsonschema": "unmaintained",
 | 
			
		||||
@@ -126,10 +124,6 @@
 | 
			
		||||
        "k8s.io/kubernetes",
 | 
			
		||||
        "k8s.io/metrics"
 | 
			
		||||
      ],
 | 
			
		||||
      "github.com/golang/mock": [
 | 
			
		||||
        "k8s.io/kubernetes",
 | 
			
		||||
        "k8s.io/legacy-cloud-providers"
 | 
			
		||||
      ],
 | 
			
		||||
      "github.com/google/s2a-go": [
 | 
			
		||||
        "cloud.google.com/go/compute",
 | 
			
		||||
        "google.golang.org/api"
 | 
			
		||||
@@ -187,9 +181,6 @@
 | 
			
		||||
        "sigs.k8s.io/kustomize/api",
 | 
			
		||||
        "sigs.k8s.io/kustomize/kustomize/v5"
 | 
			
		||||
      ],
 | 
			
		||||
      "github.com/rubiojr/go-vhd": [
 | 
			
		||||
        "k8s.io/legacy-cloud-providers"
 | 
			
		||||
      ],
 | 
			
		||||
      "go.opencensus.io": [
 | 
			
		||||
        "cloud.google.com/go/compute",
 | 
			
		||||
        "github.com/Microsoft/hcsshim",
 | 
			
		||||
@@ -238,7 +229,6 @@
 | 
			
		||||
      "cloud.google.com/go/compute",
 | 
			
		||||
      "github.com/GoogleCloudPlatform/k8s-cloud-provider",
 | 
			
		||||
      "github.com/gogo/protobuf",
 | 
			
		||||
      "github.com/golang/mock",
 | 
			
		||||
      "github.com/google/s2a-go",
 | 
			
		||||
      "github.com/google/shlex",
 | 
			
		||||
      "github.com/googleapis/enterprise-certificate-proxy",
 | 
			
		||||
@@ -249,7 +239,6 @@
 | 
			
		||||
      "github.com/json-iterator/go",
 | 
			
		||||
      "github.com/mailru/easyjson",
 | 
			
		||||
      "github.com/pkg/errors",
 | 
			
		||||
      "github.com/rubiojr/go-vhd",
 | 
			
		||||
      "go.opencensus.io",
 | 
			
		||||
      "golang.org/x/exp",
 | 
			
		||||
      "google.golang.org/api",
 | 
			
		||||
 
 | 
			
		||||
@@ -21,7 +21,6 @@ package cloudprovider
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	// Cloud providers
 | 
			
		||||
	_ "k8s.io/legacy-cloud-providers/azure"
 | 
			
		||||
	_ "k8s.io/legacy-cloud-providers/gce"
 | 
			
		||||
	_ "k8s.io/legacy-cloud-providers/vsphere"
 | 
			
		||||
)
 | 
			
		||||
 
 | 
			
		||||
@@ -1,13 +0,0 @@
 | 
			
		||||
# See the OWNERS docs at https://go.k8s.io/owners
 | 
			
		||||
 | 
			
		||||
approvers:
 | 
			
		||||
  - andyzhangx
 | 
			
		||||
  - feiskyer
 | 
			
		||||
  - khenidak
 | 
			
		||||
reviewers:
 | 
			
		||||
  - andyzhangx
 | 
			
		||||
  - feiskyer
 | 
			
		||||
  - khenidak
 | 
			
		||||
emeritus_approvers:
 | 
			
		||||
  - karataliu
 | 
			
		||||
  - brendandburns
 | 
			
		||||
@@ -1,291 +0,0 @@
 | 
			
		||||
//go:build !providerless
 | 
			
		||||
// +build !providerless
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
Copyright 2016 The Kubernetes Authors.
 | 
			
		||||
 | 
			
		||||
Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
you may not use this file except in compliance with the License.
 | 
			
		||||
You may obtain a copy of the License at
 | 
			
		||||
 | 
			
		||||
    http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
 | 
			
		||||
Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
See the License for the specific language governing permissions and
 | 
			
		||||
limitations under the License.
 | 
			
		||||
*/
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
Copyright 2017 Microsoft Corporation
 | 
			
		||||
 | 
			
		||||
MIT License
 | 
			
		||||
 | 
			
		||||
Copyright (c) Microsoft Corporation. All rights reserved.
 | 
			
		||||
 | 
			
		||||
Permission is hereby granted, free of charge, to any person obtaining a copy
 | 
			
		||||
of this software and associated documentation files (the "Software"), to deal
 | 
			
		||||
in the Software without restriction, including without limitation the rights
 | 
			
		||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
 | 
			
		||||
copies of the Software, and to permit persons to whom the Software is
 | 
			
		||||
furnished to do so, subject to the following conditions:
 | 
			
		||||
 | 
			
		||||
The above copyright notice and this permission notice shall be included in all
 | 
			
		||||
copies or substantial portions of the Software.
 | 
			
		||||
 | 
			
		||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 | 
			
		||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 | 
			
		||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
 | 
			
		||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 | 
			
		||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
 | 
			
		||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 | 
			
		||||
SOFTWARE
 | 
			
		||||
*/
 | 
			
		||||
 | 
			
		||||
// Source: https://github.com/Azure/acr-docker-credential-helper/blob/a79b541f3ee761f6cc4511863ed41fb038c19464/src/docker-credential-acr/acr_login.go
 | 
			
		||||
 | 
			
		||||
package azure
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"bytes"
 | 
			
		||||
	"encoding/json"
 | 
			
		||||
	"errors"
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"io"
 | 
			
		||||
	"net/http"
 | 
			
		||||
	"net/url"
 | 
			
		||||
	"strconv"
 | 
			
		||||
	"strings"
 | 
			
		||||
	"time"
 | 
			
		||||
	"unicode"
 | 
			
		||||
 | 
			
		||||
	utilnet "k8s.io/apimachinery/pkg/util/net"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
type authDirective struct {
 | 
			
		||||
	service string
 | 
			
		||||
	realm   string
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type acrAuthResponse struct {
 | 
			
		||||
	RefreshToken string `json:"refresh_token"`
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// 5 minutes buffer time to allow timeshift between local machine and AAD
 | 
			
		||||
const userAgentHeader = "User-Agent"
 | 
			
		||||
const userAgent = "kubernetes-credentialprovider-acr"
 | 
			
		||||
 | 
			
		||||
const dockerTokenLoginUsernameGUID = "00000000-0000-0000-0000-000000000000"
 | 
			
		||||
 | 
			
		||||
var client = &http.Client{
 | 
			
		||||
	Transport: utilnet.SetTransportDefaults(&http.Transport{}),
 | 
			
		||||
	Timeout:   time.Second * 60,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func receiveChallengeFromLoginServer(serverAddress string) (*authDirective, error) {
 | 
			
		||||
	challengeURL := url.URL{
 | 
			
		||||
		Scheme: "https",
 | 
			
		||||
		Host:   serverAddress,
 | 
			
		||||
		Path:   "v2/",
 | 
			
		||||
	}
 | 
			
		||||
	var err error
 | 
			
		||||
	var r *http.Request
 | 
			
		||||
	r, err = http.NewRequest("GET", challengeURL.String(), nil)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return nil, fmt.Errorf("failed to construct request, got %v", err)
 | 
			
		||||
	}
 | 
			
		||||
	r.Header.Add(userAgentHeader, userAgent)
 | 
			
		||||
 | 
			
		||||
	var challenge *http.Response
 | 
			
		||||
	if challenge, err = client.Do(r); err != nil {
 | 
			
		||||
		return nil, fmt.Errorf("error reaching registry endpoint %s, error: %s", challengeURL.String(), err)
 | 
			
		||||
	}
 | 
			
		||||
	defer challenge.Body.Close()
 | 
			
		||||
 | 
			
		||||
	if challenge.StatusCode != 401 {
 | 
			
		||||
		return nil, fmt.Errorf("registry did not issue a valid AAD challenge, status: %d", challenge.StatusCode)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	var authHeader []string
 | 
			
		||||
	var ok bool
 | 
			
		||||
	if authHeader, ok = challenge.Header["Www-Authenticate"]; !ok {
 | 
			
		||||
		return nil, fmt.Errorf("challenge response does not contain header 'Www-Authenticate'")
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if len(authHeader) != 1 {
 | 
			
		||||
		return nil, fmt.Errorf("registry did not issue a valid AAD challenge, authenticate header [%s]",
 | 
			
		||||
			strings.Join(authHeader, ", "))
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	authSections := strings.SplitN(authHeader[0], " ", 2)
 | 
			
		||||
	authType := strings.ToLower(authSections[0])
 | 
			
		||||
	var authParams *map[string]string
 | 
			
		||||
	if authParams, err = parseAssignments(authSections[1]); err != nil {
 | 
			
		||||
		return nil, fmt.Errorf("unable to understand the contents of Www-Authenticate header %s", authSections[1])
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// verify headers
 | 
			
		||||
	if !strings.EqualFold("Bearer", authType) {
 | 
			
		||||
		return nil, fmt.Errorf("Www-Authenticate: expected realm: Bearer, actual: %s", authType)
 | 
			
		||||
	}
 | 
			
		||||
	if len((*authParams)["service"]) == 0 {
 | 
			
		||||
		return nil, fmt.Errorf("Www-Authenticate: missing header \"service\"")
 | 
			
		||||
	}
 | 
			
		||||
	if len((*authParams)["realm"]) == 0 {
 | 
			
		||||
		return nil, fmt.Errorf("Www-Authenticate: missing header \"realm\"")
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return &authDirective{
 | 
			
		||||
		service: (*authParams)["service"],
 | 
			
		||||
		realm:   (*authParams)["realm"],
 | 
			
		||||
	}, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func performTokenExchange(
 | 
			
		||||
	serverAddress string,
 | 
			
		||||
	directive *authDirective,
 | 
			
		||||
	tenant string,
 | 
			
		||||
	accessToken string) (string, error) {
 | 
			
		||||
	var err error
 | 
			
		||||
	data := url.Values{
 | 
			
		||||
		"service":       []string{directive.service},
 | 
			
		||||
		"grant_type":    []string{"access_token_refresh_token"},
 | 
			
		||||
		"access_token":  []string{accessToken},
 | 
			
		||||
		"refresh_token": []string{accessToken},
 | 
			
		||||
		"tenant":        []string{tenant},
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	var realmURL *url.URL
 | 
			
		||||
	if realmURL, err = url.Parse(directive.realm); err != nil {
 | 
			
		||||
		return "", fmt.Errorf("Www-Authenticate: invalid realm %s", directive.realm)
 | 
			
		||||
	}
 | 
			
		||||
	authEndpoint := fmt.Sprintf("%s://%s/oauth2/exchange", realmURL.Scheme, realmURL.Host)
 | 
			
		||||
 | 
			
		||||
	datac := data.Encode()
 | 
			
		||||
	var r *http.Request
 | 
			
		||||
	r, err = http.NewRequest("POST", authEndpoint, bytes.NewBufferString(datac))
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return "", fmt.Errorf("failed to construct request, got %v", err)
 | 
			
		||||
	}
 | 
			
		||||
	r.Header.Add(userAgentHeader, userAgent)
 | 
			
		||||
	r.Header.Add("Content-Type", "application/x-www-form-urlencoded")
 | 
			
		||||
	r.Header.Add("Content-Length", strconv.Itoa(len(datac)))
 | 
			
		||||
 | 
			
		||||
	var exchange *http.Response
 | 
			
		||||
	if exchange, err = client.Do(r); err != nil {
 | 
			
		||||
		return "", fmt.Errorf("Www-Authenticate: failed to reach auth url %s", authEndpoint)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	defer exchange.Body.Close()
 | 
			
		||||
	if exchange.StatusCode != 200 {
 | 
			
		||||
		return "", fmt.Errorf("Www-Authenticate: auth url %s responded with status code %d", authEndpoint, exchange.StatusCode)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	var content []byte
 | 
			
		||||
	limitedReader := &io.LimitedReader{R: exchange.Body, N: maxReadLength}
 | 
			
		||||
	if content, err = io.ReadAll(limitedReader); err != nil {
 | 
			
		||||
		return "", fmt.Errorf("Www-Authenticate: error reading response from %s", authEndpoint)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if limitedReader.N <= 0 {
 | 
			
		||||
		return "", errors.New("the read limit is reached")
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	var authResp acrAuthResponse
 | 
			
		||||
	if err = json.Unmarshal(content, &authResp); err != nil {
 | 
			
		||||
		return "", fmt.Errorf("Www-Authenticate: unable to read response %s", content)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return authResp.RefreshToken, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Try and parse a string of assignments in the form of:
 | 
			
		||||
// key1 = value1, key2 = "value 2", key3 = ""
 | 
			
		||||
// Note: this method and handle quotes but does not handle escaping of quotes
 | 
			
		||||
func parseAssignments(statements string) (*map[string]string, error) {
 | 
			
		||||
	var cursor int
 | 
			
		||||
	result := make(map[string]string)
 | 
			
		||||
	var errorMsg = fmt.Errorf("malformed header value: %s", statements)
 | 
			
		||||
	for {
 | 
			
		||||
		// parse key
 | 
			
		||||
		equalIndex := nextOccurrence(statements, cursor, "=")
 | 
			
		||||
		if equalIndex == -1 {
 | 
			
		||||
			return nil, errorMsg
 | 
			
		||||
		}
 | 
			
		||||
		key := strings.TrimSpace(statements[cursor:equalIndex])
 | 
			
		||||
 | 
			
		||||
		// parse value
 | 
			
		||||
		cursor = nextNoneSpace(statements, equalIndex+1)
 | 
			
		||||
		if cursor == -1 {
 | 
			
		||||
			return nil, errorMsg
 | 
			
		||||
		}
 | 
			
		||||
		// case: value is quoted
 | 
			
		||||
		if statements[cursor] == '"' {
 | 
			
		||||
			cursor = cursor + 1
 | 
			
		||||
			// like I said, not handling escapes, but this will skip any comma that's
 | 
			
		||||
			// within the quotes which is somewhat more likely
 | 
			
		||||
			closeQuoteIndex := nextOccurrence(statements, cursor, "\"")
 | 
			
		||||
			if closeQuoteIndex == -1 {
 | 
			
		||||
				return nil, errorMsg
 | 
			
		||||
			}
 | 
			
		||||
			value := statements[cursor:closeQuoteIndex]
 | 
			
		||||
			result[key] = value
 | 
			
		||||
 | 
			
		||||
			commaIndex := nextNoneSpace(statements, closeQuoteIndex+1)
 | 
			
		||||
			if commaIndex == -1 {
 | 
			
		||||
				// no more comma, done
 | 
			
		||||
				return &result, nil
 | 
			
		||||
			} else if statements[commaIndex] != ',' {
 | 
			
		||||
				// expect comma immediately after close quote
 | 
			
		||||
				return nil, errorMsg
 | 
			
		||||
			} else {
 | 
			
		||||
				cursor = commaIndex + 1
 | 
			
		||||
			}
 | 
			
		||||
		} else {
 | 
			
		||||
			commaIndex := nextOccurrence(statements, cursor, ",")
 | 
			
		||||
			endStatements := commaIndex == -1
 | 
			
		||||
			var untrimmed string
 | 
			
		||||
			if endStatements {
 | 
			
		||||
				untrimmed = statements[cursor:commaIndex]
 | 
			
		||||
			} else {
 | 
			
		||||
				untrimmed = statements[cursor:]
 | 
			
		||||
			}
 | 
			
		||||
			value := strings.TrimSpace(untrimmed)
 | 
			
		||||
 | 
			
		||||
			if len(value) == 0 {
 | 
			
		||||
				// disallow empty value without quote
 | 
			
		||||
				return nil, errorMsg
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			result[key] = value
 | 
			
		||||
 | 
			
		||||
			if endStatements {
 | 
			
		||||
				return &result, nil
 | 
			
		||||
			}
 | 
			
		||||
			cursor = commaIndex + 1
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func nextOccurrence(str string, start int, sep string) int {
 | 
			
		||||
	if start >= len(str) {
 | 
			
		||||
		return -1
 | 
			
		||||
	}
 | 
			
		||||
	offset := strings.Index(str[start:], sep)
 | 
			
		||||
	if offset == -1 {
 | 
			
		||||
		return -1
 | 
			
		||||
	}
 | 
			
		||||
	return offset + start
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func nextNoneSpace(str string, start int) int {
 | 
			
		||||
	if start >= len(str) {
 | 
			
		||||
		return -1
 | 
			
		||||
	}
 | 
			
		||||
	offset := strings.IndexFunc(str[start:], func(c rune) bool { return !unicode.IsSpace(c) })
 | 
			
		||||
	if offset == -1 {
 | 
			
		||||
		return -1
 | 
			
		||||
	}
 | 
			
		||||
	return offset + start
 | 
			
		||||
}
 | 
			
		||||
@@ -1,350 +0,0 @@
 | 
			
		||||
//go:build !providerless
 | 
			
		||||
// +build !providerless
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
Copyright 2016 The Kubernetes Authors.
 | 
			
		||||
 | 
			
		||||
Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
you may not use this file except in compliance with the License.
 | 
			
		||||
You may obtain a copy of the License at
 | 
			
		||||
 | 
			
		||||
    http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
 | 
			
		||||
Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
See the License for the specific language governing permissions and
 | 
			
		||||
limitations under the License.
 | 
			
		||||
*/
 | 
			
		||||
 | 
			
		||||
package azure
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"context"
 | 
			
		||||
	"errors"
 | 
			
		||||
	"io"
 | 
			
		||||
	"os"
 | 
			
		||||
	"regexp"
 | 
			
		||||
	"strings"
 | 
			
		||||
	"sync"
 | 
			
		||||
	"time"
 | 
			
		||||
 | 
			
		||||
	"github.com/Azure/azure-sdk-for-go/services/containerregistry/mgmt/2019-05-01/containerregistry"
 | 
			
		||||
	"github.com/Azure/go-autorest/autorest/adal"
 | 
			
		||||
	"github.com/Azure/go-autorest/autorest/azure"
 | 
			
		||||
	"github.com/spf13/pflag"
 | 
			
		||||
 | 
			
		||||
	"k8s.io/client-go/tools/cache"
 | 
			
		||||
	"k8s.io/klog/v2"
 | 
			
		||||
	"k8s.io/kubernetes/pkg/credentialprovider"
 | 
			
		||||
	"k8s.io/legacy-cloud-providers/azure/auth"
 | 
			
		||||
	"sigs.k8s.io/yaml"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
var flagConfigFile = pflag.String("azure-container-registry-config", "",
 | 
			
		||||
	"Path to the file containing Azure container registry configuration information.")
 | 
			
		||||
 | 
			
		||||
const (
 | 
			
		||||
	dummyRegistryEmail = "name@contoso.com"
 | 
			
		||||
	maxReadLength      = 10 * 1 << 20 // 10MB
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
var (
 | 
			
		||||
	containerRegistryUrls = []string{"*.azurecr.io", "*.azurecr.cn", "*.azurecr.de", "*.azurecr.us"}
 | 
			
		||||
	acrRE                 = regexp.MustCompile(`.*\.azurecr\.io|.*\.azurecr\.cn|.*\.azurecr\.de|.*\.azurecr\.us`)
 | 
			
		||||
	warnOnce              sync.Once
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// init registers the various means by which credentials may
 | 
			
		||||
// be resolved on Azure.
 | 
			
		||||
func init() {
 | 
			
		||||
	credentialprovider.RegisterCredentialProvider(
 | 
			
		||||
		"azure",
 | 
			
		||||
		NewACRProvider(flagConfigFile),
 | 
			
		||||
	)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type cacheEntry struct {
 | 
			
		||||
	expiresAt   time.Time
 | 
			
		||||
	credentials credentialprovider.DockerConfigEntry
 | 
			
		||||
	registry    string
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// acrExpirationPolicy implements ExpirationPolicy from client-go.
 | 
			
		||||
type acrExpirationPolicy struct{}
 | 
			
		||||
 | 
			
		||||
// stringKeyFunc returns the cache key as a string
 | 
			
		||||
func stringKeyFunc(obj interface{}) (string, error) {
 | 
			
		||||
	key := obj.(*cacheEntry).registry
 | 
			
		||||
	return key, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// IsExpired checks if the ACR credentials are expired.
 | 
			
		||||
func (p *acrExpirationPolicy) IsExpired(entry *cache.TimestampedEntry) bool {
 | 
			
		||||
	return time.Now().After(entry.Obj.(*cacheEntry).expiresAt)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// RegistriesClient is a testable interface for the ACR client List operation.
 | 
			
		||||
type RegistriesClient interface {
 | 
			
		||||
	List(ctx context.Context) ([]containerregistry.Registry, error)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// NewACRProvider parses the specified configFile and returns a DockerConfigProvider
 | 
			
		||||
func NewACRProvider(configFile *string) credentialprovider.DockerConfigProvider {
 | 
			
		||||
	return &acrProvider{
 | 
			
		||||
		file:  configFile,
 | 
			
		||||
		cache: cache.NewExpirationStore(stringKeyFunc, &acrExpirationPolicy{}),
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type acrProvider struct {
 | 
			
		||||
	file                  *string
 | 
			
		||||
	config                *auth.AzureAuthConfig
 | 
			
		||||
	environment           *azure.Environment
 | 
			
		||||
	servicePrincipalToken *adal.ServicePrincipalToken
 | 
			
		||||
	cache                 cache.Store
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// ParseConfig returns a parsed configuration for an Azure cloudprovider config file
 | 
			
		||||
func parseConfig(configReader io.Reader) (*auth.AzureAuthConfig, error) {
 | 
			
		||||
	var config auth.AzureAuthConfig
 | 
			
		||||
 | 
			
		||||
	if configReader == nil {
 | 
			
		||||
		return &config, nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	limitedReader := &io.LimitedReader{R: configReader, N: maxReadLength}
 | 
			
		||||
	configContents, err := io.ReadAll(limitedReader)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return nil, err
 | 
			
		||||
	}
 | 
			
		||||
	if limitedReader.N <= 0 {
 | 
			
		||||
		return nil, errors.New("the read limit is reached")
 | 
			
		||||
	}
 | 
			
		||||
	err = yaml.Unmarshal(configContents, &config)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return nil, err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return &config, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (a *acrProvider) loadConfig(rdr io.Reader) error {
 | 
			
		||||
	var err error
 | 
			
		||||
	a.config, err = parseConfig(rdr)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		klog.Errorf("Failed to load azure credential file: %v", err)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	a.environment, err = auth.ParseAzureEnvironment(a.config.Cloud, a.config.ResourceManagerEndpoint, a.config.IdentitySystem)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (a *acrProvider) Enabled() bool {
 | 
			
		||||
	if a.file == nil || len(*a.file) == 0 {
 | 
			
		||||
		klog.V(5).Infof("Azure config unspecified, disabling")
 | 
			
		||||
		return false
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if credentialprovider.AreLegacyCloudCredentialProvidersDisabled() {
 | 
			
		||||
		warnOnce.Do(func() {
 | 
			
		||||
			klog.V(4).Infof("Azure credential provider is now disabled. Please refer to sig-cloud-provider for guidance on external credential provider integration for Azure")
 | 
			
		||||
		})
 | 
			
		||||
		return false
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	f, err := os.Open(*a.file)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		klog.Errorf("Failed to load config from file: %s", *a.file)
 | 
			
		||||
		return false
 | 
			
		||||
	}
 | 
			
		||||
	defer f.Close()
 | 
			
		||||
 | 
			
		||||
	err = a.loadConfig(f)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		klog.Errorf("Failed to load config from file: %s", *a.file)
 | 
			
		||||
		return false
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	a.servicePrincipalToken, err = auth.GetServicePrincipalToken(a.config, a.environment)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		klog.Errorf("Failed to create service principal token: %v", err)
 | 
			
		||||
	}
 | 
			
		||||
	return true
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// getFromCache attempts to get credentials from the cache
 | 
			
		||||
func (a *acrProvider) getFromCache(loginServer string) (credentialprovider.DockerConfig, bool) {
 | 
			
		||||
	cfg := credentialprovider.DockerConfig{}
 | 
			
		||||
	obj, exists, err := a.cache.GetByKey(loginServer)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		klog.Errorf("error getting ACR credentials from cache: %v", err)
 | 
			
		||||
		return cfg, false
 | 
			
		||||
	}
 | 
			
		||||
	if !exists {
 | 
			
		||||
		return cfg, false
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	entry := obj.(*cacheEntry)
 | 
			
		||||
	cfg[entry.registry] = entry.credentials
 | 
			
		||||
	return cfg, true
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// getFromACR gets credentials from ACR since they are not in the cache
 | 
			
		||||
func (a *acrProvider) getFromACR(loginServer string) (credentialprovider.DockerConfig, error) {
 | 
			
		||||
	cfg := credentialprovider.DockerConfig{}
 | 
			
		||||
	cred, err := getACRDockerEntryFromARMToken(a, loginServer)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return cfg, err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	entry := &cacheEntry{
 | 
			
		||||
		expiresAt:   time.Now().Add(10 * time.Minute),
 | 
			
		||||
		credentials: *cred,
 | 
			
		||||
		registry:    loginServer,
 | 
			
		||||
	}
 | 
			
		||||
	if err := a.cache.Add(entry); err != nil {
 | 
			
		||||
		return cfg, err
 | 
			
		||||
	}
 | 
			
		||||
	cfg[loginServer] = *cred
 | 
			
		||||
	return cfg, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (a *acrProvider) Provide(image string) credentialprovider.DockerConfig {
 | 
			
		||||
	loginServer := a.parseACRLoginServerFromImage(image)
 | 
			
		||||
	if loginServer == "" {
 | 
			
		||||
		klog.V(2).Infof("image(%s) is not from ACR, return empty authentication", image)
 | 
			
		||||
		return credentialprovider.DockerConfig{}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	cfg := credentialprovider.DockerConfig{}
 | 
			
		||||
	if a.config != nil && a.config.UseManagedIdentityExtension {
 | 
			
		||||
		var exists bool
 | 
			
		||||
		cfg, exists = a.getFromCache(loginServer)
 | 
			
		||||
		if exists {
 | 
			
		||||
			klog.V(4).Infof("Got ACR credentials from cache for %s", loginServer)
 | 
			
		||||
		} else {
 | 
			
		||||
			klog.V(2).Infof("unable to get ACR credentials from cache for %s, checking ACR API", loginServer)
 | 
			
		||||
			var err error
 | 
			
		||||
			cfg, err = a.getFromACR(loginServer)
 | 
			
		||||
			if err != nil {
 | 
			
		||||
				klog.Errorf("error getting credentials from ACR for %s %v", loginServer, err)
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
	} else {
 | 
			
		||||
		// Add our entry for each of the supported container registry URLs
 | 
			
		||||
		for _, url := range containerRegistryUrls {
 | 
			
		||||
			cred := &credentialprovider.DockerConfigEntry{
 | 
			
		||||
				Username: a.config.AADClientID,
 | 
			
		||||
				Password: a.config.AADClientSecret,
 | 
			
		||||
				Email:    dummyRegistryEmail,
 | 
			
		||||
			}
 | 
			
		||||
			cfg[url] = *cred
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		// Handle the custom cloud case
 | 
			
		||||
		// In clouds where ACR is not yet deployed, the string will be empty
 | 
			
		||||
		if a.environment != nil && strings.Contains(a.environment.ContainerRegistryDNSSuffix, ".azurecr.") {
 | 
			
		||||
			customAcrSuffix := "*" + a.environment.ContainerRegistryDNSSuffix
 | 
			
		||||
			hasBeenAdded := false
 | 
			
		||||
			for _, url := range containerRegistryUrls {
 | 
			
		||||
				if strings.EqualFold(url, customAcrSuffix) {
 | 
			
		||||
					hasBeenAdded = true
 | 
			
		||||
					break
 | 
			
		||||
				}
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			if !hasBeenAdded {
 | 
			
		||||
				cred := &credentialprovider.DockerConfigEntry{
 | 
			
		||||
					Username: a.config.AADClientID,
 | 
			
		||||
					Password: a.config.AADClientSecret,
 | 
			
		||||
					Email:    dummyRegistryEmail,
 | 
			
		||||
				}
 | 
			
		||||
				cfg[customAcrSuffix] = *cred
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// add ACR anonymous repo support: use empty username and password for anonymous access
 | 
			
		||||
	defaultConfigEntry := credentialprovider.DockerConfigEntry{
 | 
			
		||||
		Username: "",
 | 
			
		||||
		Password: "",
 | 
			
		||||
		Email:    dummyRegistryEmail,
 | 
			
		||||
	}
 | 
			
		||||
	cfg["*.azurecr.*"] = defaultConfigEntry
 | 
			
		||||
	return cfg
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func getLoginServer(registry containerregistry.Registry) string {
 | 
			
		||||
	return *(*registry.RegistryProperties).LoginServer
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func getACRDockerEntryFromARMToken(a *acrProvider, loginServer string) (*credentialprovider.DockerConfigEntry, error) {
 | 
			
		||||
	if a.servicePrincipalToken == nil {
 | 
			
		||||
		token, err := auth.GetServicePrincipalToken(a.config, a.environment)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			klog.Errorf("Failed to create service principal token: %v", err)
 | 
			
		||||
			return nil, err
 | 
			
		||||
		}
 | 
			
		||||
		a.servicePrincipalToken = token
 | 
			
		||||
	} else {
 | 
			
		||||
		// Run EnsureFresh to make sure the token is valid and does not expire
 | 
			
		||||
		if err := a.servicePrincipalToken.EnsureFresh(); err != nil {
 | 
			
		||||
			klog.Errorf("Failed to ensure fresh service principal token: %v", err)
 | 
			
		||||
			return nil, err
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	armAccessToken := a.servicePrincipalToken.OAuthToken()
 | 
			
		||||
 | 
			
		||||
	klog.V(4).Infof("discovering auth redirects for: %s", loginServer)
 | 
			
		||||
	directive, err := receiveChallengeFromLoginServer(loginServer)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		klog.Errorf("failed to receive challenge: %s", err)
 | 
			
		||||
		return nil, err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	klog.V(4).Infof("exchanging an acr refresh_token")
 | 
			
		||||
	registryRefreshToken, err := performTokenExchange(
 | 
			
		||||
		loginServer, directive, a.config.TenantID, armAccessToken)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		klog.Errorf("failed to perform token exchange: %s", err)
 | 
			
		||||
		return nil, err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	klog.V(4).Infof("adding ACR docker config entry for: %s", loginServer)
 | 
			
		||||
	return &credentialprovider.DockerConfigEntry{
 | 
			
		||||
		Username: dockerTokenLoginUsernameGUID,
 | 
			
		||||
		Password: registryRefreshToken,
 | 
			
		||||
		Email:    dummyRegistryEmail,
 | 
			
		||||
	}, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// parseACRLoginServerFromImage takes image as parameter and returns login server of it.
 | 
			
		||||
// Parameter `image` is expected in following format: foo.azurecr.io/bar/imageName:version
 | 
			
		||||
// If the provided image is not an acr image, this function will return an empty string.
 | 
			
		||||
func (a *acrProvider) parseACRLoginServerFromImage(image string) string {
 | 
			
		||||
	match := acrRE.FindAllString(image, -1)
 | 
			
		||||
	if len(match) == 1 {
 | 
			
		||||
		return match[0]
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// handle the custom cloud case
 | 
			
		||||
	if a != nil && a.environment != nil {
 | 
			
		||||
		cloudAcrSuffix := a.environment.ContainerRegistryDNSSuffix
 | 
			
		||||
		cloudAcrSuffixLength := len(cloudAcrSuffix)
 | 
			
		||||
		if cloudAcrSuffixLength > 0 {
 | 
			
		||||
			customAcrSuffixIndex := strings.Index(image, cloudAcrSuffix)
 | 
			
		||||
			if customAcrSuffixIndex != -1 {
 | 
			
		||||
				endIndex := customAcrSuffixIndex + cloudAcrSuffixLength
 | 
			
		||||
				return image[0:endIndex]
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return ""
 | 
			
		||||
}
 | 
			
		||||
@@ -1,182 +0,0 @@
 | 
			
		||||
//go:build !providerless
 | 
			
		||||
// +build !providerless
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
Copyright 2016 The Kubernetes Authors.
 | 
			
		||||
 | 
			
		||||
Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
you may not use this file except in compliance with the License.
 | 
			
		||||
You may obtain a copy of the License at
 | 
			
		||||
 | 
			
		||||
    http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
 | 
			
		||||
Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
See the License for the specific language governing permissions and
 | 
			
		||||
limitations under the License.
 | 
			
		||||
*/
 | 
			
		||||
 | 
			
		||||
package azure
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"bytes"
 | 
			
		||||
	"testing"
 | 
			
		||||
 | 
			
		||||
	"github.com/Azure/azure-sdk-for-go/services/containerregistry/mgmt/2019-05-01/containerregistry"
 | 
			
		||||
	"github.com/Azure/go-autorest/autorest/azure"
 | 
			
		||||
	"k8s.io/client-go/tools/cache"
 | 
			
		||||
	"k8s.io/utils/pointer"
 | 
			
		||||
 | 
			
		||||
	"github.com/stretchr/testify/assert"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
func Test(t *testing.T) {
 | 
			
		||||
	configStr := `
 | 
			
		||||
    {
 | 
			
		||||
        "aadClientId": "foo",
 | 
			
		||||
        "aadClientSecret": "bar"
 | 
			
		||||
    }`
 | 
			
		||||
	result := []containerregistry.Registry{
 | 
			
		||||
		{
 | 
			
		||||
			Name: pointer.String("foo"),
 | 
			
		||||
			RegistryProperties: &containerregistry.RegistryProperties{
 | 
			
		||||
				LoginServer: pointer.String("*.azurecr.io"),
 | 
			
		||||
			},
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			Name: pointer.String("bar"),
 | 
			
		||||
			RegistryProperties: &containerregistry.RegistryProperties{
 | 
			
		||||
				LoginServer: pointer.String("*.azurecr.cn"),
 | 
			
		||||
			},
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			Name: pointer.String("baz"),
 | 
			
		||||
			RegistryProperties: &containerregistry.RegistryProperties{
 | 
			
		||||
				LoginServer: pointer.String("*.azurecr.de"),
 | 
			
		||||
			},
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			Name: pointer.String("bus"),
 | 
			
		||||
			RegistryProperties: &containerregistry.RegistryProperties{
 | 
			
		||||
				LoginServer: pointer.String("*.azurecr.us"),
 | 
			
		||||
			},
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	provider := &acrProvider{
 | 
			
		||||
		cache: cache.NewExpirationStore(stringKeyFunc, &acrExpirationPolicy{}),
 | 
			
		||||
	}
 | 
			
		||||
	provider.loadConfig(bytes.NewBufferString(configStr))
 | 
			
		||||
 | 
			
		||||
	creds := provider.Provide("foo.azurecr.io/nginx:v1")
 | 
			
		||||
 | 
			
		||||
	if len(creds) != len(result)+1 {
 | 
			
		||||
		t.Errorf("Unexpected list: %v, expected length %d", creds, len(result)+1)
 | 
			
		||||
	}
 | 
			
		||||
	for _, cred := range creds {
 | 
			
		||||
		if cred.Username != "" && cred.Username != "foo" {
 | 
			
		||||
			t.Errorf("expected 'foo' for username, saw: %v", cred.Username)
 | 
			
		||||
		}
 | 
			
		||||
		if cred.Password != "" && cred.Password != "bar" {
 | 
			
		||||
			t.Errorf("expected 'bar' for password, saw: %v", cred.Username)
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	for _, val := range result {
 | 
			
		||||
		registryName := getLoginServer(val)
 | 
			
		||||
		if _, found := creds[registryName]; !found {
 | 
			
		||||
			t.Errorf("Missing expected registry: %s", registryName)
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestProvide(t *testing.T) {
 | 
			
		||||
	testCases := []struct {
 | 
			
		||||
		desc                string
 | 
			
		||||
		image               string
 | 
			
		||||
		configStr           string
 | 
			
		||||
		expectedCredsLength int
 | 
			
		||||
	}{
 | 
			
		||||
		{
 | 
			
		||||
			desc:  "return multiple credentials using Service Principal",
 | 
			
		||||
			image: "foo.azurecr.io/bar/image:v1",
 | 
			
		||||
			configStr: `
 | 
			
		||||
    {
 | 
			
		||||
        "aadClientId": "foo",
 | 
			
		||||
        "aadClientSecret": "bar"
 | 
			
		||||
    }`,
 | 
			
		||||
			expectedCredsLength: 5,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			desc:  "retuen 0 credential for non-ACR image using Managed Identity",
 | 
			
		||||
			image: "busybox",
 | 
			
		||||
			configStr: `
 | 
			
		||||
    {
 | 
			
		||||
	"UseManagedIdentityExtension": true
 | 
			
		||||
    }`,
 | 
			
		||||
			expectedCredsLength: 0,
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	for i, test := range testCases {
 | 
			
		||||
		provider := &acrProvider{
 | 
			
		||||
			cache: cache.NewExpirationStore(stringKeyFunc, &acrExpirationPolicy{}),
 | 
			
		||||
		}
 | 
			
		||||
		provider.loadConfig(bytes.NewBufferString(test.configStr))
 | 
			
		||||
 | 
			
		||||
		creds := provider.Provide(test.image)
 | 
			
		||||
		assert.Equal(t, test.expectedCredsLength, len(creds), "TestCase[%d]: %s", i, test.desc)
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestParseACRLoginServerFromImage(t *testing.T) {
 | 
			
		||||
	configStr := `
 | 
			
		||||
    {
 | 
			
		||||
        "aadClientId": "foo",
 | 
			
		||||
        "aadClientSecret": "bar"
 | 
			
		||||
    }`
 | 
			
		||||
 | 
			
		||||
	provider := &acrProvider{}
 | 
			
		||||
	provider.loadConfig(bytes.NewBufferString(configStr))
 | 
			
		||||
	provider.environment = &azure.Environment{
 | 
			
		||||
		ContainerRegistryDNSSuffix: ".azurecr.my.cloud",
 | 
			
		||||
	}
 | 
			
		||||
	tests := []struct {
 | 
			
		||||
		image    string
 | 
			
		||||
		expected string
 | 
			
		||||
	}{
 | 
			
		||||
		{
 | 
			
		||||
			image:    "invalidImage",
 | 
			
		||||
			expected: "",
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			image:    "docker.io/library/busybox:latest",
 | 
			
		||||
			expected: "",
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			image:    "foo.azurecr.io/bar/image:version",
 | 
			
		||||
			expected: "foo.azurecr.io",
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			image:    "foo.azurecr.cn/bar/image:version",
 | 
			
		||||
			expected: "foo.azurecr.cn",
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			image:    "foo.azurecr.de/bar/image:version",
 | 
			
		||||
			expected: "foo.azurecr.de",
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			image:    "foo.azurecr.us/bar/image:version",
 | 
			
		||||
			expected: "foo.azurecr.us",
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			image:    "foo.azurecr.my.cloud/bar/image:version",
 | 
			
		||||
			expected: "foo.azurecr.my.cloud",
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
	for _, test := range tests {
 | 
			
		||||
		if loginServer := provider.parseACRLoginServerFromImage(test.image); loginServer != test.expected {
 | 
			
		||||
			t.Errorf("function parseACRLoginServerFromImage returns \"%s\" for image %s, expected \"%s\"", loginServer, test.image, test.expected)
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
@@ -1,17 +0,0 @@
 | 
			
		||||
/*
 | 
			
		||||
Copyright 2020 The Kubernetes Authors.
 | 
			
		||||
 | 
			
		||||
Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
you may not use this file except in compliance with the License.
 | 
			
		||||
You may obtain a copy of the License at
 | 
			
		||||
 | 
			
		||||
    http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
 | 
			
		||||
Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
See the License for the specific language governing permissions and
 | 
			
		||||
limitations under the License.
 | 
			
		||||
*/
 | 
			
		||||
 | 
			
		||||
package azure
 | 
			
		||||
@@ -40,7 +40,6 @@ var (
 | 
			
		||||
		external bool
 | 
			
		||||
		detail   string
 | 
			
		||||
	}{
 | 
			
		||||
		{"azure", false, "The Azure provider is deprecated and will be removed in a future release. Please use https://github.com/kubernetes-sigs/cloud-provider-azure"},
 | 
			
		||||
		{"gce", false, "The GCE provider is deprecated and will be removed in a future release. Please use https://github.com/kubernetes/cloud-provider-gcp"},
 | 
			
		||||
		{"vsphere", false, "The vSphere provider is deprecated and will be removed in a future release. Please use https://github.com/kubernetes/cloud-provider-vsphere"},
 | 
			
		||||
	}
 | 
			
		||||
 
 | 
			
		||||
@@ -1,18 +0,0 @@
 | 
			
		||||
# See the OWNERS docs at https://go.k8s.io/owners
 | 
			
		||||
# We are no longer accepting features into k8s.io/legacy-cloud-providers.
 | 
			
		||||
# Any kind/feature PRs must be approved by SIG Cloud Provider going forward.
 | 
			
		||||
 | 
			
		||||
emeritus_approvers:
 | 
			
		||||
  - andyzhangx
 | 
			
		||||
  - brendandburns
 | 
			
		||||
  - feiskyer
 | 
			
		||||
  - karataliu
 | 
			
		||||
  - khenidak
 | 
			
		||||
  - nilo19
 | 
			
		||||
reviewers:
 | 
			
		||||
  - andyzhangx
 | 
			
		||||
  - aramase
 | 
			
		||||
  - feiskyer
 | 
			
		||||
  - khenidak
 | 
			
		||||
  - ritazh
 | 
			
		||||
  - nilo19
 | 
			
		||||
@@ -1,290 +0,0 @@
 | 
			
		||||
/*
 | 
			
		||||
Copyright 2017 The Kubernetes Authors.
 | 
			
		||||
 | 
			
		||||
Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
you may not use this file except in compliance with the License.
 | 
			
		||||
You may obtain a copy of the License at
 | 
			
		||||
 | 
			
		||||
    http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
 | 
			
		||||
Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
See the License for the specific language governing permissions and
 | 
			
		||||
limitations under the License.
 | 
			
		||||
*/
 | 
			
		||||
 | 
			
		||||
package auth
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"crypto/rsa"
 | 
			
		||||
	"crypto/x509"
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"io/ioutil"
 | 
			
		||||
	"strings"
 | 
			
		||||
 | 
			
		||||
	"github.com/Azure/go-autorest/autorest/adal"
 | 
			
		||||
	"github.com/Azure/go-autorest/autorest/azure"
 | 
			
		||||
	"golang.org/x/crypto/pkcs12"
 | 
			
		||||
 | 
			
		||||
	"k8s.io/klog/v2"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
const (
 | 
			
		||||
	// ADFSIdentitySystem is the override value for tenantID on Azure Stack clouds.
 | 
			
		||||
	ADFSIdentitySystem = "adfs"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
var (
 | 
			
		||||
	// ErrorNoAuth indicates that no credentials are provided.
 | 
			
		||||
	ErrorNoAuth = fmt.Errorf("no credentials provided for Azure cloud provider")
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// AzureAuthConfig holds auth related part of cloud config
 | 
			
		||||
type AzureAuthConfig struct {
 | 
			
		||||
	// The cloud environment identifier. Takes values from https://github.com/Azure/go-autorest/blob/ec5f4903f77ed9927ac95b19ab8e44ada64c1356/autorest/azure/environments.go#L13
 | 
			
		||||
	Cloud string `json:"cloud,omitempty" yaml:"cloud,omitempty"`
 | 
			
		||||
	// The AAD Tenant ID for the Subscription that the cluster is deployed in
 | 
			
		||||
	TenantID string `json:"tenantId,omitempty" yaml:"tenantId,omitempty"`
 | 
			
		||||
	// The ClientID for an AAD application with RBAC access to talk to Azure RM APIs
 | 
			
		||||
	AADClientID string `json:"aadClientId,omitempty" yaml:"aadClientId,omitempty"`
 | 
			
		||||
	// The ClientSecret for an AAD application with RBAC access to talk to Azure RM APIs
 | 
			
		||||
	AADClientSecret string `json:"aadClientSecret,omitempty" yaml:"aadClientSecret,omitempty" datapolicy:"token"`
 | 
			
		||||
	// The path of a client certificate for an AAD application with RBAC access to talk to Azure RM APIs
 | 
			
		||||
	AADClientCertPath string `json:"aadClientCertPath,omitempty" yaml:"aadClientCertPath,omitempty"`
 | 
			
		||||
	// The password of the client certificate for an AAD application with RBAC access to talk to Azure RM APIs
 | 
			
		||||
	AADClientCertPassword string `json:"aadClientCertPassword,omitempty" yaml:"aadClientCertPassword,omitempty" datapolicy:"password"`
 | 
			
		||||
	// Use managed service identity for the virtual machine to access Azure ARM APIs
 | 
			
		||||
	UseManagedIdentityExtension bool `json:"useManagedIdentityExtension,omitempty" yaml:"useManagedIdentityExtension,omitempty"`
 | 
			
		||||
	// UserAssignedIdentityID contains the Client ID of the user assigned MSI which is assigned to the underlying VMs. If empty the user assigned identity is not used.
 | 
			
		||||
	// More details of the user assigned identity can be found at: https://docs.microsoft.com/en-us/azure/active-directory/managed-service-identity/overview
 | 
			
		||||
	// For the user assigned identity specified here to be used, the UseManagedIdentityExtension has to be set to true.
 | 
			
		||||
	UserAssignedIdentityID string `json:"userAssignedIdentityID,omitempty" yaml:"userAssignedIdentityID,omitempty"`
 | 
			
		||||
	// The ID of the Azure Subscription that the cluster is deployed in
 | 
			
		||||
	SubscriptionID string `json:"subscriptionId,omitempty" yaml:"subscriptionId,omitempty"`
 | 
			
		||||
	// IdentitySystem indicates the identity provider. Relevant only to hybrid clouds (Azure Stack).
 | 
			
		||||
	// Allowed values are 'azure_ad' (default), 'adfs'.
 | 
			
		||||
	IdentitySystem string `json:"identitySystem,omitempty" yaml:"identitySystem,omitempty"`
 | 
			
		||||
	// ResourceManagerEndpoint is the cloud's resource manager endpoint. If set, cloud provider queries this endpoint
 | 
			
		||||
	// in order to generate an autorest.Environment instance instead of using one of the pre-defined Environments.
 | 
			
		||||
	ResourceManagerEndpoint string `json:"resourceManagerEndpoint,omitempty" yaml:"resourceManagerEndpoint,omitempty"`
 | 
			
		||||
	// The AAD Tenant ID for the Subscription that the network resources are deployed in
 | 
			
		||||
	NetworkResourceTenantID string `json:"networkResourceTenantID,omitempty" yaml:"networkResourceTenantID,omitempty"`
 | 
			
		||||
	// The ID of the Azure Subscription that the network resources are deployed in
 | 
			
		||||
	NetworkResourceSubscriptionID string `json:"networkResourceSubscriptionID,omitempty" yaml:"networkResourceSubscriptionID,omitempty"`
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// GetServicePrincipalToken creates a new service principal token based on the configuration.
 | 
			
		||||
//
 | 
			
		||||
// By default, the cluster and its network resources are deployed in the same AAD Tenant and Subscription,
 | 
			
		||||
// and all azure clients use this method to fetch Service Principal Token.
 | 
			
		||||
//
 | 
			
		||||
// If NetworkResourceTenantID and NetworkResourceSubscriptionID are specified to have different values than TenantID and SubscriptionID, network resources are deployed in different AAD Tenant and Subscription than those for the cluster,
 | 
			
		||||
// than only azure clients except VM/VMSS and network resource ones use this method to fetch Token.
 | 
			
		||||
// For tokens for VM/VMSS and network resource ones, please check GetMultiTenantServicePrincipalToken and GetNetworkResourceServicePrincipalToken.
 | 
			
		||||
func GetServicePrincipalToken(config *AzureAuthConfig, env *azure.Environment) (*adal.ServicePrincipalToken, error) {
 | 
			
		||||
	var tenantID string
 | 
			
		||||
	if strings.EqualFold(config.IdentitySystem, ADFSIdentitySystem) {
 | 
			
		||||
		tenantID = ADFSIdentitySystem
 | 
			
		||||
	} else {
 | 
			
		||||
		tenantID = config.TenantID
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if config.UseManagedIdentityExtension {
 | 
			
		||||
		klog.V(2).Infoln("azure: using managed identity extension to retrieve access token")
 | 
			
		||||
		msiEndpoint, err := adal.GetMSIVMEndpoint()
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			return nil, fmt.Errorf("Getting the managed service identity endpoint: %v", err)
 | 
			
		||||
		}
 | 
			
		||||
		if len(config.UserAssignedIdentityID) > 0 {
 | 
			
		||||
			klog.V(4).Info("azure: using User Assigned MSI ID to retrieve access token")
 | 
			
		||||
			return adal.NewServicePrincipalTokenFromMSIWithUserAssignedID(msiEndpoint,
 | 
			
		||||
				env.ServiceManagementEndpoint,
 | 
			
		||||
				config.UserAssignedIdentityID)
 | 
			
		||||
		}
 | 
			
		||||
		klog.V(4).Info("azure: using System Assigned MSI to retrieve access token")
 | 
			
		||||
		return adal.NewServicePrincipalTokenFromMSI(
 | 
			
		||||
			msiEndpoint,
 | 
			
		||||
			env.ServiceManagementEndpoint)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	oauthConfig, err := adal.NewOAuthConfigWithAPIVersion(env.ActiveDirectoryEndpoint, tenantID, nil)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return nil, fmt.Errorf("creating the OAuth config: %v", err)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if len(config.AADClientSecret) > 0 {
 | 
			
		||||
		klog.V(2).Infoln("azure: using client_id+client_secret to retrieve access token")
 | 
			
		||||
		return adal.NewServicePrincipalToken(
 | 
			
		||||
			*oauthConfig,
 | 
			
		||||
			config.AADClientID,
 | 
			
		||||
			config.AADClientSecret,
 | 
			
		||||
			env.ServiceManagementEndpoint)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if len(config.AADClientCertPath) > 0 && len(config.AADClientCertPassword) > 0 {
 | 
			
		||||
		klog.V(2).Infoln("azure: using jwt client_assertion (client_cert+client_private_key) to retrieve access token")
 | 
			
		||||
		certData, err := ioutil.ReadFile(config.AADClientCertPath)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			return nil, fmt.Errorf("reading the client certificate from file %s: %v", config.AADClientCertPath, err)
 | 
			
		||||
		}
 | 
			
		||||
		certificate, privateKey, err := decodePkcs12(certData, config.AADClientCertPassword)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			return nil, fmt.Errorf("decoding the client certificate: %v", err)
 | 
			
		||||
		}
 | 
			
		||||
		return adal.NewServicePrincipalTokenFromCertificate(
 | 
			
		||||
			*oauthConfig,
 | 
			
		||||
			config.AADClientID,
 | 
			
		||||
			certificate,
 | 
			
		||||
			privateKey,
 | 
			
		||||
			env.ServiceManagementEndpoint)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return nil, ErrorNoAuth
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// GetMultiTenantServicePrincipalToken is used when (and only when) NetworkResourceTenantID and NetworkResourceSubscriptionID are specified to have different values than TenantID and SubscriptionID.
 | 
			
		||||
//
 | 
			
		||||
// In that scenario, network resources are deployed in different AAD Tenant and Subscription than those for the cluster,
 | 
			
		||||
// and this method creates a new multi-tenant service principal token based on the configuration.
 | 
			
		||||
//
 | 
			
		||||
// PrimaryToken of the returned multi-tenant token is for the AAD Tenant specified by TenantID, and AuxiliaryToken of the returned multi-tenant token is for the AAD Tenant specified by NetworkResourceTenantID.
 | 
			
		||||
//
 | 
			
		||||
// Azure VM/VMSS clients use this multi-tenant token, in order to operate those VM/VMSS in AAD Tenant specified by TenantID, and meanwhile in their payload they are referencing network resources (e.g. Load Balancer, Network Security Group, etc.) in AAD Tenant specified by NetworkResourceTenantID.
 | 
			
		||||
func GetMultiTenantServicePrincipalToken(config *AzureAuthConfig, env *azure.Environment) (*adal.MultiTenantServicePrincipalToken, error) {
 | 
			
		||||
	err := config.checkConfigWhenNetworkResourceInDifferentTenant()
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return nil, fmt.Errorf("got error(%v) in getting multi-tenant service principal token", err)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	multiTenantOAuthConfig, err := adal.NewMultiTenantOAuthConfig(
 | 
			
		||||
		env.ActiveDirectoryEndpoint, config.TenantID, []string{config.NetworkResourceTenantID}, adal.OAuthOptions{})
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return nil, fmt.Errorf("creating the multi-tenant OAuth config: %v", err)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if len(config.AADClientSecret) > 0 {
 | 
			
		||||
		klog.V(2).Infoln("azure: using client_id+client_secret to retrieve multi-tenant access token")
 | 
			
		||||
		return adal.NewMultiTenantServicePrincipalToken(
 | 
			
		||||
			multiTenantOAuthConfig,
 | 
			
		||||
			config.AADClientID,
 | 
			
		||||
			config.AADClientSecret,
 | 
			
		||||
			env.ServiceManagementEndpoint)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if len(config.AADClientCertPath) > 0 && len(config.AADClientCertPassword) > 0 {
 | 
			
		||||
		return nil, fmt.Errorf("AAD Application client certificate authentication is not supported in getting multi-tenant service principal token")
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return nil, ErrorNoAuth
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// GetNetworkResourceServicePrincipalToken is used when (and only when) NetworkResourceTenantID and NetworkResourceSubscriptionID are specified to have different values than TenantID and SubscriptionID.
 | 
			
		||||
//
 | 
			
		||||
// In that scenario, network resources are deployed in different AAD Tenant and Subscription than those for the cluster,
 | 
			
		||||
// and this method creates a new service principal token for network resources tenant based on the configuration.
 | 
			
		||||
//
 | 
			
		||||
// Azure network resource (Load Balancer, Public IP, Route Table, Network Security Group and their sub level resources) clients use this multi-tenant token, in order to operate resources in AAD Tenant specified by NetworkResourceTenantID.
 | 
			
		||||
func GetNetworkResourceServicePrincipalToken(config *AzureAuthConfig, env *azure.Environment) (*adal.ServicePrincipalToken, error) {
 | 
			
		||||
	err := config.checkConfigWhenNetworkResourceInDifferentTenant()
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return nil, fmt.Errorf("got error(%v) in getting network resources service principal token", err)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	oauthConfig, err := adal.NewOAuthConfigWithAPIVersion(env.ActiveDirectoryEndpoint, config.NetworkResourceTenantID, nil)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return nil, fmt.Errorf("creating the OAuth config for network resources tenant: %v", err)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if len(config.AADClientSecret) > 0 {
 | 
			
		||||
		klog.V(2).Infoln("azure: using client_id+client_secret to retrieve access token for network resources tenant")
 | 
			
		||||
		return adal.NewServicePrincipalToken(
 | 
			
		||||
			*oauthConfig,
 | 
			
		||||
			config.AADClientID,
 | 
			
		||||
			config.AADClientSecret,
 | 
			
		||||
			env.ServiceManagementEndpoint)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if len(config.AADClientCertPath) > 0 && len(config.AADClientCertPassword) > 0 {
 | 
			
		||||
		return nil, fmt.Errorf("AAD Application client certificate authentication is not supported in getting network resources service principal token")
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return nil, ErrorNoAuth
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// ParseAzureEnvironment returns the azure environment.
 | 
			
		||||
// If 'resourceManagerEndpoint' is set, the environment is computed by querying the cloud's resource manager endpoint.
 | 
			
		||||
// Otherwise, a pre-defined Environment is looked up by name.
 | 
			
		||||
func ParseAzureEnvironment(cloudName, resourceManagerEndpoint, identitySystem string) (*azure.Environment, error) {
 | 
			
		||||
	var env azure.Environment
 | 
			
		||||
	var err error
 | 
			
		||||
	if resourceManagerEndpoint != "" {
 | 
			
		||||
		klog.V(4).Infof("Loading environment from resource manager endpoint: %s", resourceManagerEndpoint)
 | 
			
		||||
		nameOverride := azure.OverrideProperty{Key: azure.EnvironmentName, Value: cloudName}
 | 
			
		||||
		env, err = azure.EnvironmentFromURL(resourceManagerEndpoint, nameOverride)
 | 
			
		||||
		if err == nil {
 | 
			
		||||
			azureStackOverrides(&env, resourceManagerEndpoint, identitySystem)
 | 
			
		||||
		}
 | 
			
		||||
	} else if cloudName == "" {
 | 
			
		||||
		klog.V(4).Info("Using public cloud environment")
 | 
			
		||||
		env = azure.PublicCloud
 | 
			
		||||
	} else {
 | 
			
		||||
		klog.V(4).Infof("Using %s environment", cloudName)
 | 
			
		||||
		env, err = azure.EnvironmentFromName(cloudName)
 | 
			
		||||
	}
 | 
			
		||||
	return &env, err
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// UsesNetworkResourceInDifferentTenant determines whether the AzureAuthConfig indicates to use network resources in different AAD Tenant and Subscription than those for the cluster
 | 
			
		||||
// Return true only when both NetworkResourceTenantID and NetworkResourceSubscriptionID are specified
 | 
			
		||||
// and they are not equals to TenantID and SubscriptionID
 | 
			
		||||
func (config *AzureAuthConfig) UsesNetworkResourceInDifferentTenant() bool {
 | 
			
		||||
	return len(config.NetworkResourceTenantID) > 0 &&
 | 
			
		||||
		len(config.NetworkResourceSubscriptionID) > 0 &&
 | 
			
		||||
		!strings.EqualFold(config.NetworkResourceTenantID, config.TenantID) &&
 | 
			
		||||
		!strings.EqualFold(config.NetworkResourceSubscriptionID, config.SubscriptionID)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// decodePkcs12 decodes a PKCS#12 client certificate by extracting the public certificate and
 | 
			
		||||
// the private RSA key
 | 
			
		||||
func decodePkcs12(pkcs []byte, password string) (*x509.Certificate, *rsa.PrivateKey, error) {
 | 
			
		||||
	privateKey, certificate, err := pkcs12.Decode(pkcs, password)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return nil, nil, fmt.Errorf("decoding the PKCS#12 client certificate: %v", err)
 | 
			
		||||
	}
 | 
			
		||||
	rsaPrivateKey, isRsaKey := privateKey.(*rsa.PrivateKey)
 | 
			
		||||
	if !isRsaKey {
 | 
			
		||||
		return nil, nil, fmt.Errorf("PKCS#12 certificate must contain a RSA private key")
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return certificate, rsaPrivateKey, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// azureStackOverrides ensures that the Environment matches what AKSe currently generates for Azure Stack
 | 
			
		||||
func azureStackOverrides(env *azure.Environment, resourceManagerEndpoint, identitySystem string) {
 | 
			
		||||
	env.ManagementPortalURL = strings.Replace(resourceManagerEndpoint, "https://management.", "https://portal.", -1)
 | 
			
		||||
	env.ServiceManagementEndpoint = env.TokenAudience
 | 
			
		||||
	env.ResourceManagerVMDNSSuffix = strings.Replace(resourceManagerEndpoint, "https://management.", "cloudapp.", -1)
 | 
			
		||||
	env.ResourceManagerVMDNSSuffix = strings.TrimSuffix(env.ResourceManagerVMDNSSuffix, "/")
 | 
			
		||||
	if strings.EqualFold(identitySystem, ADFSIdentitySystem) {
 | 
			
		||||
		env.ActiveDirectoryEndpoint = strings.TrimSuffix(env.ActiveDirectoryEndpoint, "/")
 | 
			
		||||
		env.ActiveDirectoryEndpoint = strings.TrimSuffix(env.ActiveDirectoryEndpoint, "adfs")
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// checkConfigWhenNetworkResourceInDifferentTenant checks configuration for the scenario of using network resource in different tenant
 | 
			
		||||
func (config *AzureAuthConfig) checkConfigWhenNetworkResourceInDifferentTenant() error {
 | 
			
		||||
	if !config.UsesNetworkResourceInDifferentTenant() {
 | 
			
		||||
		return fmt.Errorf("NetworkResourceTenantID and NetworkResourceSubscriptionID must be configured")
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if strings.EqualFold(config.IdentitySystem, ADFSIdentitySystem) {
 | 
			
		||||
		return fmt.Errorf("ADFS identity system is not supported")
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if config.UseManagedIdentityExtension {
 | 
			
		||||
		return fmt.Errorf("managed identity is not supported")
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
@@ -1,176 +0,0 @@
 | 
			
		||||
/*
 | 
			
		||||
Copyright 2019 The Kubernetes Authors.
 | 
			
		||||
 | 
			
		||||
Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
you may not use this file except in compliance with the License.
 | 
			
		||||
You may obtain a copy of the License at
 | 
			
		||||
 | 
			
		||||
    http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
 | 
			
		||||
Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
See the License for the specific language governing permissions and
 | 
			
		||||
limitations under the License.
 | 
			
		||||
*/
 | 
			
		||||
 | 
			
		||||
package auth
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"testing"
 | 
			
		||||
 | 
			
		||||
	"github.com/Azure/go-autorest/autorest/adal"
 | 
			
		||||
	"github.com/Azure/go-autorest/autorest/azure"
 | 
			
		||||
	"github.com/stretchr/testify/assert"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
var (
 | 
			
		||||
	CrossTenantNetworkResourceNegativeConfig = []*AzureAuthConfig{
 | 
			
		||||
		{
 | 
			
		||||
			TenantID:        "TenantID",
 | 
			
		||||
			AADClientID:     "AADClientID",
 | 
			
		||||
			AADClientSecret: "AADClientSecret",
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			TenantID:                      "TenantID",
 | 
			
		||||
			AADClientID:                   "AADClientID",
 | 
			
		||||
			AADClientSecret:               "AADClientSecret",
 | 
			
		||||
			NetworkResourceTenantID:       "NetworkResourceTenantID",
 | 
			
		||||
			NetworkResourceSubscriptionID: "NetworkResourceSubscriptionID",
 | 
			
		||||
			IdentitySystem:                ADFSIdentitySystem,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			TenantID:                      "TenantID",
 | 
			
		||||
			AADClientID:                   "AADClientID",
 | 
			
		||||
			AADClientSecret:               "AADClientSecret",
 | 
			
		||||
			NetworkResourceTenantID:       "NetworkResourceTenantID",
 | 
			
		||||
			NetworkResourceSubscriptionID: "NetworkResourceSubscriptionID",
 | 
			
		||||
			UseManagedIdentityExtension:   true,
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
func TestGetServicePrincipalToken(t *testing.T) {
 | 
			
		||||
	config := &AzureAuthConfig{
 | 
			
		||||
		TenantID:        "TenantID",
 | 
			
		||||
		AADClientID:     "AADClientID",
 | 
			
		||||
		AADClientSecret: "AADClientSecret",
 | 
			
		||||
	}
 | 
			
		||||
	env := &azure.PublicCloud
 | 
			
		||||
 | 
			
		||||
	token, err := GetServicePrincipalToken(config, env)
 | 
			
		||||
	assert.NoError(t, err)
 | 
			
		||||
 | 
			
		||||
	oauthConfig, err := adal.NewOAuthConfigWithAPIVersion(env.ActiveDirectoryEndpoint, config.TenantID, nil)
 | 
			
		||||
	assert.NoError(t, err)
 | 
			
		||||
 | 
			
		||||
	spt, err := adal.NewServicePrincipalToken(*oauthConfig, config.AADClientID, config.AADClientSecret, env.ServiceManagementEndpoint)
 | 
			
		||||
	assert.NoError(t, err)
 | 
			
		||||
 | 
			
		||||
	assert.Equal(t, token, spt)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestGetMultiTenantServicePrincipalToken(t *testing.T) {
 | 
			
		||||
	config := &AzureAuthConfig{
 | 
			
		||||
		TenantID:                      "TenantID",
 | 
			
		||||
		AADClientID:                   "AADClientID",
 | 
			
		||||
		AADClientSecret:               "AADClientSecret",
 | 
			
		||||
		NetworkResourceTenantID:       "NetworkResourceTenantID",
 | 
			
		||||
		NetworkResourceSubscriptionID: "NetworkResourceSubscriptionID",
 | 
			
		||||
	}
 | 
			
		||||
	env := &azure.PublicCloud
 | 
			
		||||
 | 
			
		||||
	multiTenantToken, err := GetMultiTenantServicePrincipalToken(config, env)
 | 
			
		||||
	assert.NoError(t, err)
 | 
			
		||||
 | 
			
		||||
	multiTenantOAuthConfig, err := adal.NewMultiTenantOAuthConfig(env.ActiveDirectoryEndpoint, config.TenantID, []string{config.NetworkResourceTenantID}, adal.OAuthOptions{})
 | 
			
		||||
	assert.NoError(t, err)
 | 
			
		||||
 | 
			
		||||
	spt, err := adal.NewMultiTenantServicePrincipalToken(multiTenantOAuthConfig, config.AADClientID, config.AADClientSecret, env.ServiceManagementEndpoint)
 | 
			
		||||
	assert.NoError(t, err)
 | 
			
		||||
 | 
			
		||||
	assert.Equal(t, multiTenantToken, spt)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestGetMultiTenantServicePrincipalTokenNegative(t *testing.T) {
 | 
			
		||||
	env := &azure.PublicCloud
 | 
			
		||||
	for _, config := range CrossTenantNetworkResourceNegativeConfig {
 | 
			
		||||
		_, err := GetMultiTenantServicePrincipalToken(config, env)
 | 
			
		||||
		assert.Error(t, err)
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestGetNetworkResourceServicePrincipalToken(t *testing.T) {
 | 
			
		||||
	config := &AzureAuthConfig{
 | 
			
		||||
		TenantID:                      "TenantID",
 | 
			
		||||
		AADClientID:                   "AADClientID",
 | 
			
		||||
		AADClientSecret:               "AADClientSecret",
 | 
			
		||||
		NetworkResourceTenantID:       "NetworkResourceTenantID",
 | 
			
		||||
		NetworkResourceSubscriptionID: "NetworkResourceSubscriptionID",
 | 
			
		||||
	}
 | 
			
		||||
	env := &azure.PublicCloud
 | 
			
		||||
 | 
			
		||||
	token, err := GetNetworkResourceServicePrincipalToken(config, env)
 | 
			
		||||
	assert.NoError(t, err)
 | 
			
		||||
 | 
			
		||||
	oauthConfig, err := adal.NewOAuthConfigWithAPIVersion(env.ActiveDirectoryEndpoint, config.NetworkResourceTenantID, nil)
 | 
			
		||||
	assert.NoError(t, err)
 | 
			
		||||
 | 
			
		||||
	spt, err := adal.NewServicePrincipalToken(*oauthConfig, config.AADClientID, config.AADClientSecret, env.ServiceManagementEndpoint)
 | 
			
		||||
	assert.NoError(t, err)
 | 
			
		||||
 | 
			
		||||
	assert.Equal(t, token, spt)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestGetNetworkResourceServicePrincipalTokenNegative(t *testing.T) {
 | 
			
		||||
	env := &azure.PublicCloud
 | 
			
		||||
	for _, config := range CrossTenantNetworkResourceNegativeConfig {
 | 
			
		||||
		_, err := GetNetworkResourceServicePrincipalToken(config, env)
 | 
			
		||||
		assert.Error(t, err)
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestParseAzureEnvironment(t *testing.T) {
 | 
			
		||||
	cases := []struct {
 | 
			
		||||
		cloudName               string
 | 
			
		||||
		resourceManagerEndpoint string
 | 
			
		||||
		identitySystem          string
 | 
			
		||||
		expected                *azure.Environment
 | 
			
		||||
	}{
 | 
			
		||||
		{
 | 
			
		||||
			cloudName:               "",
 | 
			
		||||
			resourceManagerEndpoint: "",
 | 
			
		||||
			identitySystem:          "",
 | 
			
		||||
			expected:                &azure.PublicCloud,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			cloudName:               "AZURECHINACLOUD",
 | 
			
		||||
			resourceManagerEndpoint: "",
 | 
			
		||||
			identitySystem:          "",
 | 
			
		||||
			expected:                &azure.ChinaCloud,
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	for _, c := range cases {
 | 
			
		||||
		env, err := ParseAzureEnvironment(c.cloudName, c.resourceManagerEndpoint, c.identitySystem)
 | 
			
		||||
		assert.NoError(t, err)
 | 
			
		||||
		assert.Equal(t, c.expected, env)
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestAzureStackOverrides(t *testing.T) {
 | 
			
		||||
	env := &azure.PublicCloud
 | 
			
		||||
	resourceManagerEndpoint := "https://management.test.com/"
 | 
			
		||||
 | 
			
		||||
	azureStackOverrides(env, resourceManagerEndpoint, "")
 | 
			
		||||
	assert.Equal(t, env.ManagementPortalURL, "https://portal.test.com/")
 | 
			
		||||
	assert.Equal(t, env.ServiceManagementEndpoint, env.TokenAudience)
 | 
			
		||||
	assert.Equal(t, env.ResourceManagerVMDNSSuffix, "cloudapp.test.com")
 | 
			
		||||
	assert.Equal(t, env.ActiveDirectoryEndpoint, "https://login.microsoftonline.com/")
 | 
			
		||||
 | 
			
		||||
	azureStackOverrides(env, resourceManagerEndpoint, "adfs")
 | 
			
		||||
	assert.Equal(t, env.ManagementPortalURL, "https://portal.test.com/")
 | 
			
		||||
	assert.Equal(t, env.ServiceManagementEndpoint, env.TokenAudience)
 | 
			
		||||
	assert.Equal(t, env.ResourceManagerVMDNSSuffix, "cloudapp.test.com")
 | 
			
		||||
	assert.Equal(t, env.ActiveDirectoryEndpoint, "https://login.microsoftonline.com")
 | 
			
		||||
}
 | 
			
		||||
@@ -1,18 +0,0 @@
 | 
			
		||||
/*
 | 
			
		||||
Copyright 2019 The Kubernetes Authors.
 | 
			
		||||
 | 
			
		||||
Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
you may not use this file except in compliance with the License.
 | 
			
		||||
You may obtain a copy of the License at
 | 
			
		||||
 | 
			
		||||
    http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
 | 
			
		||||
Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
See the License for the specific language governing permissions and
 | 
			
		||||
limitations under the License.
 | 
			
		||||
*/
 | 
			
		||||
 | 
			
		||||
// Package auth provides a general libraty to authorize Azure ARM clients.
 | 
			
		||||
package auth // import "k8s.io/legacy-cloud-providers/azure/auth"
 | 
			
		||||
@@ -1,988 +0,0 @@
 | 
			
		||||
//go:build !providerless
 | 
			
		||||
// +build !providerless
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
Copyright 2016 The Kubernetes Authors.
 | 
			
		||||
 | 
			
		||||
Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
you may not use this file except in compliance with the License.
 | 
			
		||||
You may obtain a copy of the License at
 | 
			
		||||
 | 
			
		||||
    http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
 | 
			
		||||
Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
See the License for the specific language governing permissions and
 | 
			
		||||
limitations under the License.
 | 
			
		||||
*/
 | 
			
		||||
 | 
			
		||||
package azure
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"io"
 | 
			
		||||
	"io/ioutil"
 | 
			
		||||
	"net/http"
 | 
			
		||||
	"strings"
 | 
			
		||||
	"sync"
 | 
			
		||||
	"time"
 | 
			
		||||
 | 
			
		||||
	"github.com/Azure/go-autorest/autorest"
 | 
			
		||||
	"github.com/Azure/go-autorest/autorest/adal"
 | 
			
		||||
	"github.com/Azure/go-autorest/autorest/azure"
 | 
			
		||||
	v1 "k8s.io/api/core/v1"
 | 
			
		||||
	"k8s.io/apimachinery/pkg/util/sets"
 | 
			
		||||
	"k8s.io/apimachinery/pkg/util/wait"
 | 
			
		||||
	"k8s.io/client-go/informers"
 | 
			
		||||
	clientset "k8s.io/client-go/kubernetes"
 | 
			
		||||
	"k8s.io/client-go/kubernetes/scheme"
 | 
			
		||||
	v1core "k8s.io/client-go/kubernetes/typed/core/v1"
 | 
			
		||||
	"k8s.io/client-go/tools/cache"
 | 
			
		||||
	"k8s.io/client-go/tools/record"
 | 
			
		||||
	cloudprovider "k8s.io/cloud-provider"
 | 
			
		||||
	"k8s.io/klog/v2"
 | 
			
		||||
	"k8s.io/legacy-cloud-providers/azure/auth"
 | 
			
		||||
	azcache "k8s.io/legacy-cloud-providers/azure/cache"
 | 
			
		||||
	azclients "k8s.io/legacy-cloud-providers/azure/clients"
 | 
			
		||||
	"k8s.io/legacy-cloud-providers/azure/clients/diskclient"
 | 
			
		||||
	"k8s.io/legacy-cloud-providers/azure/clients/fileclient"
 | 
			
		||||
	"k8s.io/legacy-cloud-providers/azure/clients/interfaceclient"
 | 
			
		||||
	"k8s.io/legacy-cloud-providers/azure/clients/loadbalancerclient"
 | 
			
		||||
	"k8s.io/legacy-cloud-providers/azure/clients/publicipclient"
 | 
			
		||||
	"k8s.io/legacy-cloud-providers/azure/clients/routeclient"
 | 
			
		||||
	"k8s.io/legacy-cloud-providers/azure/clients/routetableclient"
 | 
			
		||||
	"k8s.io/legacy-cloud-providers/azure/clients/securitygroupclient"
 | 
			
		||||
	"k8s.io/legacy-cloud-providers/azure/clients/snapshotclient"
 | 
			
		||||
	"k8s.io/legacy-cloud-providers/azure/clients/storageaccountclient"
 | 
			
		||||
	"k8s.io/legacy-cloud-providers/azure/clients/subnetclient"
 | 
			
		||||
	"k8s.io/legacy-cloud-providers/azure/clients/vmclient"
 | 
			
		||||
	"k8s.io/legacy-cloud-providers/azure/clients/vmsizeclient"
 | 
			
		||||
	"k8s.io/legacy-cloud-providers/azure/clients/vmssclient"
 | 
			
		||||
	"k8s.io/legacy-cloud-providers/azure/clients/vmssvmclient"
 | 
			
		||||
	"k8s.io/legacy-cloud-providers/azure/retry"
 | 
			
		||||
 | 
			
		||||
	// ensure the newly added package from azure-sdk-for-go is in vendor/
 | 
			
		||||
	_ "k8s.io/legacy-cloud-providers/azure/clients/containerserviceclient"
 | 
			
		||||
	// ensure the newly added package from azure-sdk-for-go is in vendor/
 | 
			
		||||
	_ "k8s.io/legacy-cloud-providers/azure/clients/deploymentclient"
 | 
			
		||||
 | 
			
		||||
	"sigs.k8s.io/yaml"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
const (
 | 
			
		||||
	// CloudProviderName is the value used for the --cloud-provider flag
 | 
			
		||||
	CloudProviderName = "azure"
 | 
			
		||||
	// AzureStackCloudName is the cloud name of Azure Stack
 | 
			
		||||
	AzureStackCloudName    = "AZURESTACKCLOUD"
 | 
			
		||||
	rateLimitQPSDefault    = 1.0
 | 
			
		||||
	rateLimitBucketDefault = 5
 | 
			
		||||
	backoffRetriesDefault  = 6
 | 
			
		||||
	backoffExponentDefault = 1.5
 | 
			
		||||
	backoffDurationDefault = 5 // in seconds
 | 
			
		||||
	backoffJitterDefault   = 1.0
 | 
			
		||||
	// According to https://docs.microsoft.com/en-us/azure/azure-subscription-service-limits#load-balancer.
 | 
			
		||||
	maximumLoadBalancerRuleCount = 250
 | 
			
		||||
 | 
			
		||||
	vmTypeVMSS     = "vmss"
 | 
			
		||||
	vmTypeStandard = "standard"
 | 
			
		||||
 | 
			
		||||
	loadBalancerSkuBasic    = "basic"
 | 
			
		||||
	loadBalancerSkuStandard = "standard"
 | 
			
		||||
 | 
			
		||||
	externalResourceGroupLabel = "kubernetes.azure.com/resource-group"
 | 
			
		||||
	managedByAzureLabel        = "kubernetes.azure.com/managed"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
const (
 | 
			
		||||
	// PreConfiguredBackendPoolLoadBalancerTypesNone means that the load balancers are not pre-configured
 | 
			
		||||
	PreConfiguredBackendPoolLoadBalancerTypesNone = ""
 | 
			
		||||
	// PreConfiguredBackendPoolLoadBalancerTypesInternal means that the `internal` load balancers are pre-configured
 | 
			
		||||
	PreConfiguredBackendPoolLoadBalancerTypesInternal = "internal"
 | 
			
		||||
	// PreConfiguredBackendPoolLoadBalancerTypesExternal means that the `external` load balancers are pre-configured
 | 
			
		||||
	PreConfiguredBackendPoolLoadBalancerTypesExternal = "external"
 | 
			
		||||
	// PreConfiguredBackendPoolLoadBalancerTypesAll means that all load balancers are pre-configured
 | 
			
		||||
	PreConfiguredBackendPoolLoadBalancerTypesAll = "all"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
var (
 | 
			
		||||
	// Master nodes are not added to standard load balancer by default.
 | 
			
		||||
	defaultExcludeMasterFromStandardLB = true
 | 
			
		||||
	// Outbound SNAT is enabled by default.
 | 
			
		||||
	defaultDisableOutboundSNAT = false
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// Config holds the configuration parsed from the --cloud-config flag
 | 
			
		||||
// All fields are required unless otherwise specified
 | 
			
		||||
// NOTE: Cloud config files should follow the same Kubernetes deprecation policy as
 | 
			
		||||
// flags or CLIs. Config fields should not change behavior in incompatible ways and
 | 
			
		||||
// should be deprecated for at least 2 release prior to removing.
 | 
			
		||||
// See https://kubernetes.io/docs/reference/using-api/deprecation-policy/#deprecating-a-flag-or-cli
 | 
			
		||||
// for more details.
 | 
			
		||||
type Config struct {
 | 
			
		||||
	auth.AzureAuthConfig
 | 
			
		||||
	CloudProviderRateLimitConfig
 | 
			
		||||
 | 
			
		||||
	// The name of the resource group that the cluster is deployed in
 | 
			
		||||
	ResourceGroup string `json:"resourceGroup,omitempty" yaml:"resourceGroup,omitempty"`
 | 
			
		||||
	// The location of the resource group that the cluster is deployed in
 | 
			
		||||
	Location string `json:"location,omitempty" yaml:"location,omitempty"`
 | 
			
		||||
	// The name of the VNet that the cluster is deployed in
 | 
			
		||||
	VnetName string `json:"vnetName,omitempty" yaml:"vnetName,omitempty"`
 | 
			
		||||
	// The name of the resource group that the Vnet is deployed in
 | 
			
		||||
	VnetResourceGroup string `json:"vnetResourceGroup,omitempty" yaml:"vnetResourceGroup,omitempty"`
 | 
			
		||||
	// The name of the subnet that the cluster is deployed in
 | 
			
		||||
	SubnetName string `json:"subnetName,omitempty" yaml:"subnetName,omitempty"`
 | 
			
		||||
	// The name of the security group attached to the cluster's subnet
 | 
			
		||||
	SecurityGroupName string `json:"securityGroupName,omitempty" yaml:"securityGroupName,omitempty"`
 | 
			
		||||
	// The name of the resource group that the security group is deployed in
 | 
			
		||||
	SecurityGroupResourceGroup string `json:"securityGroupResourceGroup,omitempty" yaml:"securityGroupResourceGroup,omitempty"`
 | 
			
		||||
	// (Optional in 1.6) The name of the route table attached to the subnet that the cluster is deployed in
 | 
			
		||||
	RouteTableName string `json:"routeTableName,omitempty" yaml:"routeTableName,omitempty"`
 | 
			
		||||
	// The name of the resource group that the RouteTable is deployed in
 | 
			
		||||
	RouteTableResourceGroup string `json:"routeTableResourceGroup,omitempty" yaml:"routeTableResourceGroup,omitempty"`
 | 
			
		||||
	// (Optional) The name of the availability set that should be used as the load balancer backend
 | 
			
		||||
	// If this is set, the Azure cloudprovider will only add nodes from that availability set to the load
 | 
			
		||||
	// balancer backend pool. If this is not set, and multiple agent pools (availability sets) are used, then
 | 
			
		||||
	// the cloudprovider will try to add all nodes to a single backend pool which is forbidden.
 | 
			
		||||
	// In other words, if you use multiple agent pools (availability sets), you MUST set this field.
 | 
			
		||||
	PrimaryAvailabilitySetName string `json:"primaryAvailabilitySetName,omitempty" yaml:"primaryAvailabilitySetName,omitempty"`
 | 
			
		||||
	// The type of azure nodes. Candidate values are: vmss and standard.
 | 
			
		||||
	// If not set, it will be default to standard.
 | 
			
		||||
	VMType string `json:"vmType,omitempty" yaml:"vmType,omitempty"`
 | 
			
		||||
	// The name of the scale set that should be used as the load balancer backend.
 | 
			
		||||
	// If this is set, the Azure cloudprovider will only add nodes from that scale set to the load
 | 
			
		||||
	// balancer backend pool. If this is not set, and multiple agent pools (scale sets) are used, then
 | 
			
		||||
	// the cloudprovider will try to add all nodes to a single backend pool which is forbidden.
 | 
			
		||||
	// In other words, if you use multiple agent pools (scale sets), you MUST set this field.
 | 
			
		||||
	PrimaryScaleSetName string `json:"primaryScaleSetName,omitempty" yaml:"primaryScaleSetName,omitempty"`
 | 
			
		||||
	// Enable exponential backoff to manage resource request retries
 | 
			
		||||
	CloudProviderBackoff bool `json:"cloudProviderBackoff,omitempty" yaml:"cloudProviderBackoff,omitempty"`
 | 
			
		||||
	// Backoff retry limit
 | 
			
		||||
	CloudProviderBackoffRetries int `json:"cloudProviderBackoffRetries,omitempty" yaml:"cloudProviderBackoffRetries,omitempty"`
 | 
			
		||||
	// Backoff exponent
 | 
			
		||||
	CloudProviderBackoffExponent float64 `json:"cloudProviderBackoffExponent,omitempty" yaml:"cloudProviderBackoffExponent,omitempty"`
 | 
			
		||||
	// Backoff duration
 | 
			
		||||
	CloudProviderBackoffDuration int `json:"cloudProviderBackoffDuration,omitempty" yaml:"cloudProviderBackoffDuration,omitempty"`
 | 
			
		||||
	// Backoff jitter
 | 
			
		||||
	CloudProviderBackoffJitter float64 `json:"cloudProviderBackoffJitter,omitempty" yaml:"cloudProviderBackoffJitter,omitempty"`
 | 
			
		||||
	// Use instance metadata service where possible
 | 
			
		||||
	UseInstanceMetadata bool `json:"useInstanceMetadata,omitempty" yaml:"useInstanceMetadata,omitempty"`
 | 
			
		||||
 | 
			
		||||
	// Sku of Load Balancer and Public IP. Candidate values are: basic and standard.
 | 
			
		||||
	// If not set, it will be default to basic.
 | 
			
		||||
	LoadBalancerSku string `json:"loadBalancerSku,omitempty" yaml:"loadBalancerSku,omitempty"`
 | 
			
		||||
	// ExcludeMasterFromStandardLB excludes master nodes from standard load balancer.
 | 
			
		||||
	// If not set, it will be default to true.
 | 
			
		||||
	ExcludeMasterFromStandardLB *bool `json:"excludeMasterFromStandardLB,omitempty" yaml:"excludeMasterFromStandardLB,omitempty"`
 | 
			
		||||
	// DisableOutboundSNAT disables the outbound SNAT for public load balancer rules.
 | 
			
		||||
	// It should only be set when loadBalancerSku is standard. If not set, it will be default to false.
 | 
			
		||||
	DisableOutboundSNAT *bool `json:"disableOutboundSNAT,omitempty" yaml:"disableOutboundSNAT,omitempty"`
 | 
			
		||||
 | 
			
		||||
	// Maximum allowed LoadBalancer Rule Count is the limit enforced by Azure Load balancer
 | 
			
		||||
	MaximumLoadBalancerRuleCount int `json:"maximumLoadBalancerRuleCount,omitempty" yaml:"maximumLoadBalancerRuleCount,omitempty"`
 | 
			
		||||
 | 
			
		||||
	// The cloud configure type for Azure cloud provider. Supported values are file, secret and merge.
 | 
			
		||||
	CloudConfigType cloudConfigType `json:"cloudConfigType,omitempty" yaml:"cloudConfigType,omitempty"`
 | 
			
		||||
 | 
			
		||||
	// LoadBalancerName determines the specific name of the load balancer user want to use, working with
 | 
			
		||||
	// LoadBalancerResourceGroup
 | 
			
		||||
	LoadBalancerName string `json:"loadBalancerName,omitempty" yaml:"loadBalancerName,omitempty"`
 | 
			
		||||
	// LoadBalancerResourceGroup determines the specific resource group of the load balancer user want to use, working
 | 
			
		||||
	// with LoadBalancerName
 | 
			
		||||
	LoadBalancerResourceGroup string `json:"loadBalancerResourceGroup,omitempty" yaml:"loadBalancerResourceGroup,omitempty"`
 | 
			
		||||
	// PreConfiguredBackendPoolLoadBalancerTypes determines whether the LoadBalancer BackendPool has been preconfigured.
 | 
			
		||||
	// Candidate values are:
 | 
			
		||||
	//   "": exactly with today (not pre-configured for any LBs)
 | 
			
		||||
	//   "internal": for internal LoadBalancer
 | 
			
		||||
	//   "external": for external LoadBalancer
 | 
			
		||||
	//   "all": for both internal and external LoadBalancer
 | 
			
		||||
	PreConfiguredBackendPoolLoadBalancerTypes string `json:"preConfiguredBackendPoolLoadBalancerTypes,omitempty" yaml:"preConfiguredBackendPoolLoadBalancerTypes,omitempty"`
 | 
			
		||||
	// EnableMultipleStandardLoadBalancers determines the behavior of the standard load balancer. If set to true
 | 
			
		||||
	// there would be one standard load balancer per VMAS or VMSS, which is similar with the behavior of the basic
 | 
			
		||||
	// load balancer. Users could select the specific standard load balancer for their service by the service
 | 
			
		||||
	// annotation `service.beta.kubernetes.io/azure-load-balancer-mode`, If set to false, the same standard load balancer
 | 
			
		||||
	// would be shared by all services in the cluster. In this case, the mode selection annotation would be ignored.
 | 
			
		||||
	EnableMultipleStandardLoadBalancers bool `json:"enableMultipleStandardLoadBalancers,omitempty" yaml:"enableMultipleStandardLoadBalancers,omitempty"`
 | 
			
		||||
 | 
			
		||||
	// AvailabilitySetNodesCacheTTLInSeconds sets the Cache TTL for availabilitySetNodesCache
 | 
			
		||||
	// if not set, will use default value
 | 
			
		||||
	AvailabilitySetNodesCacheTTLInSeconds int `json:"availabilitySetNodesCacheTTLInSeconds,omitempty" yaml:"availabilitySetNodesCacheTTLInSeconds,omitempty"`
 | 
			
		||||
	// VmssCacheTTLInSeconds sets the cache TTL for VMSS
 | 
			
		||||
	VmssCacheTTLInSeconds int `json:"vmssCacheTTLInSeconds,omitempty" yaml:"vmssCacheTTLInSeconds,omitempty"`
 | 
			
		||||
	// VmssVirtualMachinesCacheTTLInSeconds sets the cache TTL for vmssVirtualMachines
 | 
			
		||||
	VmssVirtualMachinesCacheTTLInSeconds int `json:"vmssVirtualMachinesCacheTTLInSeconds,omitempty" yaml:"vmssVirtualMachinesCacheTTLInSeconds,omitempty"`
 | 
			
		||||
	// VmCacheTTLInSeconds sets the cache TTL for vm
 | 
			
		||||
	VMCacheTTLInSeconds int `json:"vmCacheTTLInSeconds,omitempty" yaml:"vmCacheTTLInSeconds,omitempty"`
 | 
			
		||||
	// LoadBalancerCacheTTLInSeconds sets the cache TTL for load balancer
 | 
			
		||||
	LoadBalancerCacheTTLInSeconds int `json:"loadBalancerCacheTTLInSeconds,omitempty" yaml:"loadBalancerCacheTTLInSeconds,omitempty"`
 | 
			
		||||
	// NsgCacheTTLInSeconds sets the cache TTL for network security group
 | 
			
		||||
	NsgCacheTTLInSeconds int `json:"nsgCacheTTLInSeconds,omitempty" yaml:"nsgCacheTTLInSeconds,omitempty"`
 | 
			
		||||
	// RouteTableCacheTTLInSeconds sets the cache TTL for route table
 | 
			
		||||
	RouteTableCacheTTLInSeconds int `json:"routeTableCacheTTLInSeconds,omitempty" yaml:"routeTableCacheTTLInSeconds,omitempty"`
 | 
			
		||||
 | 
			
		||||
	// DisableAvailabilitySetNodes disables VMAS nodes support when "VMType" is set to "vmss".
 | 
			
		||||
	DisableAvailabilitySetNodes bool `json:"disableAvailabilitySetNodes,omitempty" yaml:"disableAvailabilitySetNodes,omitempty"`
 | 
			
		||||
 | 
			
		||||
	// Tags determines what tags shall be applied to the shared resources managed by controller manager, which
 | 
			
		||||
	// includes load balancer, security group and route table. The supported format is `a=b,c=d,...`. After updated
 | 
			
		||||
	// this config, the old tags would be replaced by the new ones.
 | 
			
		||||
	Tags string `json:"tags,omitempty" yaml:"tags,omitempty"`
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
var (
 | 
			
		||||
	_ cloudprovider.Interface    = (*Cloud)(nil)
 | 
			
		||||
	_ cloudprovider.Instances    = (*Cloud)(nil)
 | 
			
		||||
	_ cloudprovider.LoadBalancer = (*Cloud)(nil)
 | 
			
		||||
	_ cloudprovider.Routes       = (*Cloud)(nil)
 | 
			
		||||
	_ cloudprovider.Zones        = (*Cloud)(nil)
 | 
			
		||||
	_ cloudprovider.PVLabeler    = (*Cloud)(nil)
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// Cloud holds the config and clients
 | 
			
		||||
type Cloud struct {
 | 
			
		||||
	Config
 | 
			
		||||
	Environment azure.Environment
 | 
			
		||||
 | 
			
		||||
	RoutesClient                    routeclient.Interface
 | 
			
		||||
	SubnetsClient                   subnetclient.Interface
 | 
			
		||||
	InterfacesClient                interfaceclient.Interface
 | 
			
		||||
	RouteTablesClient               routetableclient.Interface
 | 
			
		||||
	LoadBalancerClient              loadbalancerclient.Interface
 | 
			
		||||
	PublicIPAddressesClient         publicipclient.Interface
 | 
			
		||||
	SecurityGroupsClient            securitygroupclient.Interface
 | 
			
		||||
	VirtualMachinesClient           vmclient.Interface
 | 
			
		||||
	StorageAccountClient            storageaccountclient.Interface
 | 
			
		||||
	DisksClient                     diskclient.Interface
 | 
			
		||||
	SnapshotsClient                 snapshotclient.Interface
 | 
			
		||||
	FileClient                      fileclient.Interface
 | 
			
		||||
	VirtualMachineScaleSetsClient   vmssclient.Interface
 | 
			
		||||
	VirtualMachineScaleSetVMsClient vmssvmclient.Interface
 | 
			
		||||
	VirtualMachineSizesClient       vmsizeclient.Interface
 | 
			
		||||
 | 
			
		||||
	ResourceRequestBackoff wait.Backoff
 | 
			
		||||
	metadata               *InstanceMetadataService
 | 
			
		||||
	VMSet                  VMSet
 | 
			
		||||
 | 
			
		||||
	// ipv6DualStack allows overriding for unit testing.  It's normally initialized from featuregates
 | 
			
		||||
	ipv6DualStackEnabled bool
 | 
			
		||||
	// Lock for access to node caches, includes nodeZones, nodeResourceGroups, and unmanagedNodes.
 | 
			
		||||
	nodeCachesLock sync.RWMutex
 | 
			
		||||
	// nodeNames holds current nodes for tracking added nodes in VM caches.
 | 
			
		||||
	nodeNames sets.String
 | 
			
		||||
	// nodeZones is a mapping from Zone to a sets.String of Node's names in the Zone
 | 
			
		||||
	// it is updated by the nodeInformer
 | 
			
		||||
	nodeZones map[string]sets.String
 | 
			
		||||
	// nodeResourceGroups holds nodes external resource groups
 | 
			
		||||
	nodeResourceGroups map[string]string
 | 
			
		||||
	// unmanagedNodes holds a list of nodes not managed by Azure cloud provider.
 | 
			
		||||
	unmanagedNodes sets.String
 | 
			
		||||
	// excludeLoadBalancerNodes holds a list of nodes that should be excluded from LoadBalancer.
 | 
			
		||||
	excludeLoadBalancerNodes sets.String
 | 
			
		||||
	// nodeInformerSynced is for determining if the informer has synced.
 | 
			
		||||
	nodeInformerSynced cache.InformerSynced
 | 
			
		||||
 | 
			
		||||
	// routeCIDRsLock holds lock for routeCIDRs cache.
 | 
			
		||||
	routeCIDRsLock sync.Mutex
 | 
			
		||||
	// routeCIDRs holds cache for route CIDRs.
 | 
			
		||||
	routeCIDRs map[string]string
 | 
			
		||||
 | 
			
		||||
	KubeClient       clientset.Interface
 | 
			
		||||
	eventBroadcaster record.EventBroadcaster
 | 
			
		||||
	eventRecorder    record.EventRecorder
 | 
			
		||||
	routeUpdater     *delayedRouteUpdater
 | 
			
		||||
 | 
			
		||||
	vmCache  *azcache.TimedCache
 | 
			
		||||
	lbCache  *azcache.TimedCache
 | 
			
		||||
	nsgCache *azcache.TimedCache
 | 
			
		||||
	rtCache  *azcache.TimedCache
 | 
			
		||||
 | 
			
		||||
	*BlobDiskController
 | 
			
		||||
	*ManagedDiskController
 | 
			
		||||
	*controllerCommon
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func init() {
 | 
			
		||||
	// In go-autorest SDK https://github.com/Azure/go-autorest/blob/master/autorest/sender.go#L258-L287,
 | 
			
		||||
	// if ARM returns http.StatusTooManyRequests, the sender doesn't increase the retry attempt count,
 | 
			
		||||
	// hence the Azure clients will keep retrying forever until it get a status code other than 429.
 | 
			
		||||
	// So we explicitly removes http.StatusTooManyRequests from autorest.StatusCodesForRetry.
 | 
			
		||||
	// Refer https://github.com/Azure/go-autorest/issues/398.
 | 
			
		||||
	// TODO(feiskyer): Use autorest.SendDecorator to customize the retry policy when new Azure SDK is available.
 | 
			
		||||
	statusCodesForRetry := make([]int, 0)
 | 
			
		||||
	for _, code := range autorest.StatusCodesForRetry {
 | 
			
		||||
		if code != http.StatusTooManyRequests {
 | 
			
		||||
			statusCodesForRetry = append(statusCodesForRetry, code)
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	autorest.StatusCodesForRetry = statusCodesForRetry
 | 
			
		||||
 | 
			
		||||
	cloudprovider.RegisterCloudProvider(CloudProviderName, NewCloud)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// NewCloud returns a Cloud with initialized clients
 | 
			
		||||
func NewCloud(configReader io.Reader) (cloudprovider.Interface, error) {
 | 
			
		||||
	az, err := NewCloudWithoutFeatureGates(configReader)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return nil, err
 | 
			
		||||
	}
 | 
			
		||||
	az.ipv6DualStackEnabled = true
 | 
			
		||||
 | 
			
		||||
	return az, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// NewCloudWithoutFeatureGates returns a Cloud without trying to wire the feature gates.  This is used by the unit tests
 | 
			
		||||
// that don't load the actual features being used in the cluster.
 | 
			
		||||
func NewCloudWithoutFeatureGates(configReader io.Reader) (*Cloud, error) {
 | 
			
		||||
	config, err := parseConfig(configReader)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return nil, err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	az := &Cloud{
 | 
			
		||||
		nodeNames:                sets.NewString(),
 | 
			
		||||
		nodeZones:                map[string]sets.String{},
 | 
			
		||||
		nodeResourceGroups:       map[string]string{},
 | 
			
		||||
		unmanagedNodes:           sets.NewString(),
 | 
			
		||||
		excludeLoadBalancerNodes: sets.NewString(),
 | 
			
		||||
		routeCIDRs:               map[string]string{},
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	err = az.InitializeCloudFromConfig(config, false)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return nil, err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return az, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// InitializeCloudFromConfig initializes the Cloud from config.
 | 
			
		||||
func (az *Cloud) InitializeCloudFromConfig(config *Config, fromSecret bool) error {
 | 
			
		||||
	// cloud-config not set, return nil so that it would be initialized from secret.
 | 
			
		||||
	if config == nil {
 | 
			
		||||
		klog.Warning("cloud-config is not provided, Azure cloud provider would be initialized from secret")
 | 
			
		||||
		return nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if config.RouteTableResourceGroup == "" {
 | 
			
		||||
		config.RouteTableResourceGroup = config.ResourceGroup
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if config.SecurityGroupResourceGroup == "" {
 | 
			
		||||
		config.SecurityGroupResourceGroup = config.ResourceGroup
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if config.VMType == "" {
 | 
			
		||||
		// default to standard vmType if not set.
 | 
			
		||||
		config.VMType = vmTypeStandard
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if config.DisableAvailabilitySetNodes && config.VMType != vmTypeVMSS {
 | 
			
		||||
		return fmt.Errorf("disableAvailabilitySetNodes %v is only supported when vmType is 'vmss'", config.DisableAvailabilitySetNodes)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if config.CloudConfigType == "" {
 | 
			
		||||
		// The default cloud config type is cloudConfigTypeMerge.
 | 
			
		||||
		config.CloudConfigType = cloudConfigTypeMerge
 | 
			
		||||
	} else {
 | 
			
		||||
		supportedCloudConfigTypes := sets.NewString(
 | 
			
		||||
			string(cloudConfigTypeMerge),
 | 
			
		||||
			string(cloudConfigTypeFile),
 | 
			
		||||
			string(cloudConfigTypeSecret))
 | 
			
		||||
		if !supportedCloudConfigTypes.Has(string(config.CloudConfigType)) {
 | 
			
		||||
			return fmt.Errorf("cloudConfigType %v is not supported, supported values are %v", config.CloudConfigType, supportedCloudConfigTypes.List())
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	env, err := auth.ParseAzureEnvironment(config.Cloud, config.ResourceManagerEndpoint, config.IdentitySystem)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	servicePrincipalToken, err := auth.GetServicePrincipalToken(&config.AzureAuthConfig, env)
 | 
			
		||||
	if err == auth.ErrorNoAuth {
 | 
			
		||||
		// Only controller-manager would lazy-initialize from secret, and credentials are required for such case.
 | 
			
		||||
		if fromSecret {
 | 
			
		||||
			err := fmt.Errorf("no credentials provided for Azure cloud provider")
 | 
			
		||||
			klog.Fatalf("%v", err)
 | 
			
		||||
			return err
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		// No credentials provided, useInstanceMetadata should be enabled for Kubelet.
 | 
			
		||||
		// TODO(feiskyer): print different error message for Kubelet and controller-manager, as they're
 | 
			
		||||
		// requiring different credential settings.
 | 
			
		||||
		if !config.UseInstanceMetadata && config.CloudConfigType == cloudConfigTypeFile {
 | 
			
		||||
			return fmt.Errorf("useInstanceMetadata must be enabled without Azure credentials")
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		klog.V(2).Infof("Azure cloud provider is starting without credentials")
 | 
			
		||||
	} else if err != nil {
 | 
			
		||||
		return err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Initialize rate limiting config options.
 | 
			
		||||
	InitializeCloudProviderRateLimitConfig(&config.CloudProviderRateLimitConfig)
 | 
			
		||||
 | 
			
		||||
	// Conditionally configure resource request backoff
 | 
			
		||||
	resourceRequestBackoff := wait.Backoff{
 | 
			
		||||
		Steps: 1,
 | 
			
		||||
	}
 | 
			
		||||
	if config.CloudProviderBackoff {
 | 
			
		||||
		// Assign backoff defaults if no configuration was passed in
 | 
			
		||||
		if config.CloudProviderBackoffRetries == 0 {
 | 
			
		||||
			config.CloudProviderBackoffRetries = backoffRetriesDefault
 | 
			
		||||
		}
 | 
			
		||||
		if config.CloudProviderBackoffDuration == 0 {
 | 
			
		||||
			config.CloudProviderBackoffDuration = backoffDurationDefault
 | 
			
		||||
		}
 | 
			
		||||
		if config.CloudProviderBackoffExponent == 0 {
 | 
			
		||||
			config.CloudProviderBackoffExponent = backoffExponentDefault
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		if config.CloudProviderBackoffJitter == 0 {
 | 
			
		||||
			config.CloudProviderBackoffJitter = backoffJitterDefault
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		resourceRequestBackoff = wait.Backoff{
 | 
			
		||||
			Steps:    config.CloudProviderBackoffRetries,
 | 
			
		||||
			Factor:   config.CloudProviderBackoffExponent,
 | 
			
		||||
			Duration: time.Duration(config.CloudProviderBackoffDuration) * time.Second,
 | 
			
		||||
			Jitter:   config.CloudProviderBackoffJitter,
 | 
			
		||||
		}
 | 
			
		||||
		klog.V(2).Infof("Azure cloudprovider using try backoff: retries=%d, exponent=%f, duration=%d, jitter=%f",
 | 
			
		||||
			config.CloudProviderBackoffRetries,
 | 
			
		||||
			config.CloudProviderBackoffExponent,
 | 
			
		||||
			config.CloudProviderBackoffDuration,
 | 
			
		||||
			config.CloudProviderBackoffJitter)
 | 
			
		||||
	} else {
 | 
			
		||||
		// CloudProviderBackoffRetries will be set to 1 by default as the requirements of Azure SDK.
 | 
			
		||||
		config.CloudProviderBackoffRetries = 1
 | 
			
		||||
		config.CloudProviderBackoffDuration = backoffDurationDefault
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if strings.EqualFold(config.LoadBalancerSku, loadBalancerSkuStandard) {
 | 
			
		||||
		// Do not add master nodes to standard LB by default.
 | 
			
		||||
		if config.ExcludeMasterFromStandardLB == nil {
 | 
			
		||||
			config.ExcludeMasterFromStandardLB = &defaultExcludeMasterFromStandardLB
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		// Enable outbound SNAT by default.
 | 
			
		||||
		if config.DisableOutboundSNAT == nil {
 | 
			
		||||
			config.DisableOutboundSNAT = &defaultDisableOutboundSNAT
 | 
			
		||||
		}
 | 
			
		||||
	} else {
 | 
			
		||||
		if config.DisableOutboundSNAT != nil && *config.DisableOutboundSNAT {
 | 
			
		||||
			return fmt.Errorf("disableOutboundSNAT should only set when loadBalancerSku is standard")
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	az.Config = *config
 | 
			
		||||
	az.Environment = *env
 | 
			
		||||
	az.ResourceRequestBackoff = resourceRequestBackoff
 | 
			
		||||
	az.metadata, err = NewInstanceMetadataService(imdsServer)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// No credentials provided, InstanceMetadataService would be used for getting Azure resources.
 | 
			
		||||
	// Note that this only applies to Kubelet, controller-manager should configure credentials for managing Azure resources.
 | 
			
		||||
	if servicePrincipalToken == nil {
 | 
			
		||||
		return nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// If uses network resources in different AAD Tenant, then prepare corresponding Service Principal Token for VM/VMSS client and network resources client
 | 
			
		||||
	var multiTenantServicePrincipalToken *adal.MultiTenantServicePrincipalToken
 | 
			
		||||
	var networkResourceServicePrincipalToken *adal.ServicePrincipalToken
 | 
			
		||||
	if az.Config.UsesNetworkResourceInDifferentTenant() {
 | 
			
		||||
		multiTenantServicePrincipalToken, err = auth.GetMultiTenantServicePrincipalToken(&az.Config.AzureAuthConfig, &az.Environment)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			return err
 | 
			
		||||
		}
 | 
			
		||||
		networkResourceServicePrincipalToken, err = auth.GetNetworkResourceServicePrincipalToken(&az.Config.AzureAuthConfig, &az.Environment)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			return err
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	az.configAzureClients(servicePrincipalToken, multiTenantServicePrincipalToken, networkResourceServicePrincipalToken)
 | 
			
		||||
 | 
			
		||||
	if az.MaximumLoadBalancerRuleCount == 0 {
 | 
			
		||||
		az.MaximumLoadBalancerRuleCount = maximumLoadBalancerRuleCount
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if strings.EqualFold(vmTypeVMSS, az.Config.VMType) {
 | 
			
		||||
		az.VMSet, err = newScaleSet(az)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			return err
 | 
			
		||||
		}
 | 
			
		||||
	} else {
 | 
			
		||||
		az.VMSet = newAvailabilitySet(az)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	az.vmCache, err = az.newVMCache()
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	az.lbCache, err = az.newLBCache()
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	az.nsgCache, err = az.newNSGCache()
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	az.rtCache, err = az.newRouteTableCache()
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if err := initDiskControllers(az); err != nil {
 | 
			
		||||
		return err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// start delayed route updater.
 | 
			
		||||
	az.routeUpdater = newDelayedRouteUpdater(az, routeUpdateInterval)
 | 
			
		||||
	go az.routeUpdater.run()
 | 
			
		||||
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (az *Cloud) configAzureClients(
 | 
			
		||||
	servicePrincipalToken *adal.ServicePrincipalToken,
 | 
			
		||||
	multiTenantServicePrincipalToken *adal.MultiTenantServicePrincipalToken,
 | 
			
		||||
	networkResourceServicePrincipalToken *adal.ServicePrincipalToken) {
 | 
			
		||||
	azClientConfig := az.getAzureClientConfig(servicePrincipalToken)
 | 
			
		||||
 | 
			
		||||
	// Prepare AzureClientConfig for all azure clients
 | 
			
		||||
	interfaceClientConfig := azClientConfig.WithRateLimiter(az.Config.InterfaceRateLimit)
 | 
			
		||||
	vmSizeClientConfig := azClientConfig.WithRateLimiter(az.Config.VirtualMachineSizeRateLimit)
 | 
			
		||||
	snapshotClientConfig := azClientConfig.WithRateLimiter(az.Config.SnapshotRateLimit)
 | 
			
		||||
	storageAccountClientConfig := azClientConfig.WithRateLimiter(az.Config.StorageAccountRateLimit)
 | 
			
		||||
	diskClientConfig := azClientConfig.WithRateLimiter(az.Config.DiskRateLimit)
 | 
			
		||||
	vmClientConfig := azClientConfig.WithRateLimiter(az.Config.VirtualMachineRateLimit)
 | 
			
		||||
	vmssClientConfig := azClientConfig.WithRateLimiter(az.Config.VirtualMachineScaleSetRateLimit)
 | 
			
		||||
	// Error "not an active Virtual Machine Scale Set VM" is not retriable for VMSS VM.
 | 
			
		||||
	// But http.StatusNotFound is retriable because of ARM replication latency.
 | 
			
		||||
	vmssVMClientConfig := azClientConfig.WithRateLimiter(az.Config.VirtualMachineScaleSetRateLimit)
 | 
			
		||||
	vmssVMClientConfig.Backoff = vmssVMClientConfig.Backoff.WithNonRetriableErrors([]string{vmssVMNotActiveErrorMessage}).WithRetriableHTTPStatusCodes([]int{http.StatusNotFound})
 | 
			
		||||
	routeClientConfig := azClientConfig.WithRateLimiter(az.Config.RouteRateLimit)
 | 
			
		||||
	subnetClientConfig := azClientConfig.WithRateLimiter(az.Config.SubnetsRateLimit)
 | 
			
		||||
	routeTableClientConfig := azClientConfig.WithRateLimiter(az.Config.RouteTableRateLimit)
 | 
			
		||||
	loadBalancerClientConfig := azClientConfig.WithRateLimiter(az.Config.LoadBalancerRateLimit)
 | 
			
		||||
	securityGroupClientConfig := azClientConfig.WithRateLimiter(az.Config.SecurityGroupRateLimit)
 | 
			
		||||
	publicIPClientConfig := azClientConfig.WithRateLimiter(az.Config.PublicIPAddressRateLimit)
 | 
			
		||||
	// TODO(ZeroMagic): add azurefileRateLimit
 | 
			
		||||
	fileClientConfig := azClientConfig.WithRateLimiter(nil)
 | 
			
		||||
 | 
			
		||||
	// If uses network resources in different AAD Tenant, update Authorizer for VM/VMSS client config
 | 
			
		||||
	if multiTenantServicePrincipalToken != nil {
 | 
			
		||||
		multiTenantServicePrincipalTokenAuthorizer := autorest.NewMultiTenantServicePrincipalTokenAuthorizer(multiTenantServicePrincipalToken)
 | 
			
		||||
		vmClientConfig.Authorizer = multiTenantServicePrincipalTokenAuthorizer
 | 
			
		||||
		vmssClientConfig.Authorizer = multiTenantServicePrincipalTokenAuthorizer
 | 
			
		||||
		vmssVMClientConfig.Authorizer = multiTenantServicePrincipalTokenAuthorizer
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// If uses network resources in different AAD Tenant, update SubscriptionID and Authorizer for network resources client config
 | 
			
		||||
	if networkResourceServicePrincipalToken != nil {
 | 
			
		||||
		networkResourceServicePrincipalTokenAuthorizer := autorest.NewBearerAuthorizer(networkResourceServicePrincipalToken)
 | 
			
		||||
		routeClientConfig.Authorizer = networkResourceServicePrincipalTokenAuthorizer
 | 
			
		||||
		subnetClientConfig.Authorizer = networkResourceServicePrincipalTokenAuthorizer
 | 
			
		||||
		routeTableClientConfig.Authorizer = networkResourceServicePrincipalTokenAuthorizer
 | 
			
		||||
		loadBalancerClientConfig.Authorizer = networkResourceServicePrincipalTokenAuthorizer
 | 
			
		||||
		securityGroupClientConfig.Authorizer = networkResourceServicePrincipalTokenAuthorizer
 | 
			
		||||
		publicIPClientConfig.Authorizer = networkResourceServicePrincipalTokenAuthorizer
 | 
			
		||||
 | 
			
		||||
		routeClientConfig.SubscriptionID = az.Config.NetworkResourceSubscriptionID
 | 
			
		||||
		subnetClientConfig.SubscriptionID = az.Config.NetworkResourceSubscriptionID
 | 
			
		||||
		routeTableClientConfig.SubscriptionID = az.Config.NetworkResourceSubscriptionID
 | 
			
		||||
		loadBalancerClientConfig.SubscriptionID = az.Config.NetworkResourceSubscriptionID
 | 
			
		||||
		securityGroupClientConfig.SubscriptionID = az.Config.NetworkResourceSubscriptionID
 | 
			
		||||
		publicIPClientConfig.SubscriptionID = az.Config.NetworkResourceSubscriptionID
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Initialize all azure clients based on client config
 | 
			
		||||
	az.InterfacesClient = interfaceclient.New(interfaceClientConfig)
 | 
			
		||||
	az.VirtualMachineSizesClient = vmsizeclient.New(vmSizeClientConfig)
 | 
			
		||||
	az.SnapshotsClient = snapshotclient.New(snapshotClientConfig)
 | 
			
		||||
	az.StorageAccountClient = storageaccountclient.New(storageAccountClientConfig)
 | 
			
		||||
	az.DisksClient = diskclient.New(diskClientConfig)
 | 
			
		||||
	az.VirtualMachinesClient = vmclient.New(vmClientConfig)
 | 
			
		||||
	az.VirtualMachineScaleSetsClient = vmssclient.New(vmssClientConfig)
 | 
			
		||||
	az.VirtualMachineScaleSetVMsClient = vmssvmclient.New(vmssVMClientConfig)
 | 
			
		||||
	az.RoutesClient = routeclient.New(routeClientConfig)
 | 
			
		||||
	az.SubnetsClient = subnetclient.New(subnetClientConfig)
 | 
			
		||||
	az.RouteTablesClient = routetableclient.New(routeTableClientConfig)
 | 
			
		||||
	az.LoadBalancerClient = loadbalancerclient.New(loadBalancerClientConfig)
 | 
			
		||||
	az.SecurityGroupsClient = securitygroupclient.New(securityGroupClientConfig)
 | 
			
		||||
	az.PublicIPAddressesClient = publicipclient.New(publicIPClientConfig)
 | 
			
		||||
	az.FileClient = fileclient.New(fileClientConfig)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (az *Cloud) getAzureClientConfig(servicePrincipalToken *adal.ServicePrincipalToken) *azclients.ClientConfig {
 | 
			
		||||
	azClientConfig := &azclients.ClientConfig{
 | 
			
		||||
		CloudName:               az.Config.Cloud,
 | 
			
		||||
		Location:                az.Config.Location,
 | 
			
		||||
		SubscriptionID:          az.Config.SubscriptionID,
 | 
			
		||||
		ResourceManagerEndpoint: az.Environment.ResourceManagerEndpoint,
 | 
			
		||||
		Authorizer:              autorest.NewBearerAuthorizer(servicePrincipalToken),
 | 
			
		||||
		Backoff:                 &retry.Backoff{Steps: 1},
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if az.Config.CloudProviderBackoff {
 | 
			
		||||
		azClientConfig.Backoff = &retry.Backoff{
 | 
			
		||||
			Steps:    az.Config.CloudProviderBackoffRetries,
 | 
			
		||||
			Factor:   az.Config.CloudProviderBackoffExponent,
 | 
			
		||||
			Duration: time.Duration(az.Config.CloudProviderBackoffDuration) * time.Second,
 | 
			
		||||
			Jitter:   az.Config.CloudProviderBackoffJitter,
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return azClientConfig
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// parseConfig returns a parsed configuration for an Azure cloudprovider config file
 | 
			
		||||
func parseConfig(configReader io.Reader) (*Config, error) {
 | 
			
		||||
	var config Config
 | 
			
		||||
	if configReader == nil {
 | 
			
		||||
		return nil, nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	configContents, err := ioutil.ReadAll(configReader)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return nil, err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	err = yaml.Unmarshal(configContents, &config)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return nil, err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// The resource group name may be in different cases from different Azure APIs, hence it is converted to lower here.
 | 
			
		||||
	// See more context at https://github.com/kubernetes/kubernetes/issues/71994.
 | 
			
		||||
	config.ResourceGroup = strings.ToLower(config.ResourceGroup)
 | 
			
		||||
	return &config, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Initialize passes a Kubernetes clientBuilder interface to the cloud provider
 | 
			
		||||
func (az *Cloud) Initialize(clientBuilder cloudprovider.ControllerClientBuilder, stop <-chan struct{}) {
 | 
			
		||||
	az.KubeClient = clientBuilder.ClientOrDie("azure-cloud-provider")
 | 
			
		||||
	az.eventBroadcaster = record.NewBroadcaster()
 | 
			
		||||
	az.eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: az.KubeClient.CoreV1().Events("")})
 | 
			
		||||
	az.eventRecorder = az.eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "azure-cloud-provider"})
 | 
			
		||||
	az.InitializeCloudFromSecret()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// LoadBalancer returns a balancer interface. Also returns true if the interface is supported, false otherwise.
 | 
			
		||||
func (az *Cloud) LoadBalancer() (cloudprovider.LoadBalancer, bool) {
 | 
			
		||||
	return az, true
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Instances returns an instances interface. Also returns true if the interface is supported, false otherwise.
 | 
			
		||||
func (az *Cloud) Instances() (cloudprovider.Instances, bool) {
 | 
			
		||||
	return az, true
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// InstancesV2 returns an instancesV2 interface. Also returns true if the interface is supported, false otherwise.
 | 
			
		||||
// TODO: implement ONLY for external cloud provider
 | 
			
		||||
func (az *Cloud) InstancesV2() (cloudprovider.InstancesV2, bool) {
 | 
			
		||||
	return nil, false
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Zones returns a zones interface. Also returns true if the interface is supported, false otherwise.
 | 
			
		||||
func (az *Cloud) Zones() (cloudprovider.Zones, bool) {
 | 
			
		||||
	return az, true
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Clusters returns a clusters interface.  Also returns true if the interface is supported, false otherwise.
 | 
			
		||||
func (az *Cloud) Clusters() (cloudprovider.Clusters, bool) {
 | 
			
		||||
	return nil, false
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Routes returns a routes interface along with whether the interface is supported.
 | 
			
		||||
func (az *Cloud) Routes() (cloudprovider.Routes, bool) {
 | 
			
		||||
	return az, true
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// HasClusterID returns true if the cluster has a clusterID
 | 
			
		||||
func (az *Cloud) HasClusterID() bool {
 | 
			
		||||
	return true
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// ProviderName returns the cloud provider ID.
 | 
			
		||||
func (az *Cloud) ProviderName() string {
 | 
			
		||||
	return CloudProviderName
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func initDiskControllers(az *Cloud) error {
 | 
			
		||||
	// Common controller contains the function
 | 
			
		||||
	// needed by both blob disk and managed disk controllers
 | 
			
		||||
 | 
			
		||||
	common := &controllerCommon{
 | 
			
		||||
		location:              az.Location,
 | 
			
		||||
		storageEndpointSuffix: az.Environment.StorageEndpointSuffix,
 | 
			
		||||
		resourceGroup:         az.ResourceGroup,
 | 
			
		||||
		subscriptionID:        az.SubscriptionID,
 | 
			
		||||
		cloud:                 az,
 | 
			
		||||
		vmLockMap:             newLockMap(),
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	az.BlobDiskController = &BlobDiskController{common: common}
 | 
			
		||||
	az.ManagedDiskController = &ManagedDiskController{common: common}
 | 
			
		||||
	az.controllerCommon = common
 | 
			
		||||
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// SetInformers sets informers for Azure cloud provider.
 | 
			
		||||
func (az *Cloud) SetInformers(informerFactory informers.SharedInformerFactory) {
 | 
			
		||||
	klog.Infof("Setting up informers for Azure cloud provider")
 | 
			
		||||
	nodeInformer := informerFactory.Core().V1().Nodes().Informer()
 | 
			
		||||
	nodeInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
 | 
			
		||||
		AddFunc: func(obj interface{}) {
 | 
			
		||||
			node := obj.(*v1.Node)
 | 
			
		||||
			az.updateNodeCaches(nil, node)
 | 
			
		||||
		},
 | 
			
		||||
		UpdateFunc: func(prev, obj interface{}) {
 | 
			
		||||
			prevNode := prev.(*v1.Node)
 | 
			
		||||
			newNode := obj.(*v1.Node)
 | 
			
		||||
			az.updateNodeCaches(prevNode, newNode)
 | 
			
		||||
		},
 | 
			
		||||
		DeleteFunc: func(obj interface{}) {
 | 
			
		||||
			node, isNode := obj.(*v1.Node)
 | 
			
		||||
			// We can get DeletedFinalStateUnknown instead of *v1.Node here
 | 
			
		||||
			// and we need to handle that correctly.
 | 
			
		||||
			if !isNode {
 | 
			
		||||
				deletedState, ok := obj.(cache.DeletedFinalStateUnknown)
 | 
			
		||||
				if !ok {
 | 
			
		||||
					klog.Errorf("Received unexpected object: %v", obj)
 | 
			
		||||
					return
 | 
			
		||||
				}
 | 
			
		||||
				node, ok = deletedState.Obj.(*v1.Node)
 | 
			
		||||
				if !ok {
 | 
			
		||||
					klog.Errorf("DeletedFinalStateUnknown contained non-Node object: %v", deletedState.Obj)
 | 
			
		||||
					return
 | 
			
		||||
				}
 | 
			
		||||
			}
 | 
			
		||||
			az.updateNodeCaches(node, nil)
 | 
			
		||||
		},
 | 
			
		||||
	})
 | 
			
		||||
	az.nodeInformerSynced = nodeInformer.HasSynced
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// updateNodeCaches updates local cache for node's zones and external resource groups.
 | 
			
		||||
func (az *Cloud) updateNodeCaches(prevNode, newNode *v1.Node) {
 | 
			
		||||
	az.nodeCachesLock.Lock()
 | 
			
		||||
	defer az.nodeCachesLock.Unlock()
 | 
			
		||||
 | 
			
		||||
	if prevNode != nil {
 | 
			
		||||
 | 
			
		||||
		// Remove from nodeNames cache.
 | 
			
		||||
		az.nodeNames.Delete(prevNode.ObjectMeta.Name)
 | 
			
		||||
 | 
			
		||||
		// Remove from nodeZones cache
 | 
			
		||||
		prevZone, ok := prevNode.ObjectMeta.Labels[v1.LabelTopologyZone]
 | 
			
		||||
 | 
			
		||||
		if ok && az.isAvailabilityZone(prevZone) {
 | 
			
		||||
			az.nodeZones[prevZone].Delete(prevNode.ObjectMeta.Name)
 | 
			
		||||
			if az.nodeZones[prevZone].Len() == 0 {
 | 
			
		||||
				az.nodeZones[prevZone] = nil
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		// Remove from nodeZones cache if using deprecated LabelFailureDomainBetaZone
 | 
			
		||||
		prevZoneFailureDomain, ok := prevNode.ObjectMeta.Labels[v1.LabelFailureDomainBetaZone]
 | 
			
		||||
		if ok && az.isAvailabilityZone(prevZoneFailureDomain) {
 | 
			
		||||
			az.nodeZones[prevZone].Delete(prevNode.ObjectMeta.Name)
 | 
			
		||||
			if az.nodeZones[prevZone].Len() == 0 {
 | 
			
		||||
				az.nodeZones[prevZone] = nil
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		// Remove from nodeResourceGroups cache.
 | 
			
		||||
		_, ok = prevNode.ObjectMeta.Labels[externalResourceGroupLabel]
 | 
			
		||||
		if ok {
 | 
			
		||||
			delete(az.nodeResourceGroups, prevNode.ObjectMeta.Name)
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		managed, ok := prevNode.ObjectMeta.Labels[managedByAzureLabel]
 | 
			
		||||
		isNodeManagedByCloudProvider := !ok || managed != "false"
 | 
			
		||||
 | 
			
		||||
		// Remove from unmanagedNodes cache
 | 
			
		||||
		if !isNodeManagedByCloudProvider {
 | 
			
		||||
			az.unmanagedNodes.Delete(prevNode.ObjectMeta.Name)
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		// if the node is being deleted from the cluster, exclude it from load balancers
 | 
			
		||||
		if newNode == nil {
 | 
			
		||||
			az.excludeLoadBalancerNodes.Insert(prevNode.ObjectMeta.Name)
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if newNode != nil {
 | 
			
		||||
		// Add to nodeNames cache.
 | 
			
		||||
		az.nodeNames.Insert(newNode.ObjectMeta.Name)
 | 
			
		||||
 | 
			
		||||
		// Add to nodeZones cache.
 | 
			
		||||
		newZone, ok := newNode.ObjectMeta.Labels[v1.LabelTopologyZone]
 | 
			
		||||
		if ok && az.isAvailabilityZone(newZone) {
 | 
			
		||||
			if az.nodeZones[newZone] == nil {
 | 
			
		||||
				az.nodeZones[newZone] = sets.NewString()
 | 
			
		||||
			}
 | 
			
		||||
			az.nodeZones[newZone].Insert(newNode.ObjectMeta.Name)
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		// Add to nodeResourceGroups cache.
 | 
			
		||||
		newRG, ok := newNode.ObjectMeta.Labels[externalResourceGroupLabel]
 | 
			
		||||
		if ok && len(newRG) > 0 {
 | 
			
		||||
			az.nodeResourceGroups[newNode.ObjectMeta.Name] = strings.ToLower(newRG)
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		_, hasExcludeBalancerLabel := newNode.ObjectMeta.Labels[v1.LabelNodeExcludeBalancers]
 | 
			
		||||
		managed, ok := newNode.ObjectMeta.Labels[managedByAzureLabel]
 | 
			
		||||
		isNodeManagedByCloudProvider := !ok || managed != "false"
 | 
			
		||||
 | 
			
		||||
		// Update unmanagedNodes cache
 | 
			
		||||
		if !isNodeManagedByCloudProvider {
 | 
			
		||||
			az.unmanagedNodes.Insert(newNode.ObjectMeta.Name)
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		// Update excludeLoadBalancerNodes cache
 | 
			
		||||
		switch {
 | 
			
		||||
		case !isNodeManagedByCloudProvider:
 | 
			
		||||
			az.excludeLoadBalancerNodes.Insert(newNode.ObjectMeta.Name)
 | 
			
		||||
 | 
			
		||||
		case hasExcludeBalancerLabel:
 | 
			
		||||
			az.excludeLoadBalancerNodes.Insert(newNode.ObjectMeta.Name)
 | 
			
		||||
 | 
			
		||||
		default:
 | 
			
		||||
			// Nodes not falling into the three cases above are valid backends and
 | 
			
		||||
			// should not appear in excludeLoadBalancerNodes cache.
 | 
			
		||||
			az.excludeLoadBalancerNodes.Delete(newNode.ObjectMeta.Name)
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// GetActiveZones returns all the zones in which k8s nodes are currently running.
 | 
			
		||||
func (az *Cloud) GetActiveZones() (sets.String, error) {
 | 
			
		||||
	if az.nodeInformerSynced == nil {
 | 
			
		||||
		return nil, fmt.Errorf("azure cloud provider doesn't have informers set")
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	az.nodeCachesLock.RLock()
 | 
			
		||||
	defer az.nodeCachesLock.RUnlock()
 | 
			
		||||
	if !az.nodeInformerSynced() {
 | 
			
		||||
		return nil, fmt.Errorf("node informer is not synced when trying to GetActiveZones")
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	zones := sets.NewString()
 | 
			
		||||
	for zone, nodes := range az.nodeZones {
 | 
			
		||||
		if len(nodes) > 0 {
 | 
			
		||||
			zones.Insert(zone)
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	return zones, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// GetLocation returns the location in which k8s cluster is currently running.
 | 
			
		||||
func (az *Cloud) GetLocation() string {
 | 
			
		||||
	return az.Location
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// GetNodeResourceGroup gets resource group for given node.
 | 
			
		||||
func (az *Cloud) GetNodeResourceGroup(nodeName string) (string, error) {
 | 
			
		||||
	// Kubelet won't set az.nodeInformerSynced, always return configured resourceGroup.
 | 
			
		||||
	if az.nodeInformerSynced == nil {
 | 
			
		||||
		return az.ResourceGroup, nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	az.nodeCachesLock.RLock()
 | 
			
		||||
	defer az.nodeCachesLock.RUnlock()
 | 
			
		||||
	if !az.nodeInformerSynced() {
 | 
			
		||||
		return "", fmt.Errorf("node informer is not synced when trying to GetNodeResourceGroup")
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Return external resource group if it has been cached.
 | 
			
		||||
	if cachedRG, ok := az.nodeResourceGroups[nodeName]; ok {
 | 
			
		||||
		return cachedRG, nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Return resource group from cloud provider options.
 | 
			
		||||
	return az.ResourceGroup, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// GetNodeNames returns a set of all node names in the k8s cluster.
 | 
			
		||||
func (az *Cloud) GetNodeNames() (sets.String, error) {
 | 
			
		||||
	// Kubelet won't set az.nodeInformerSynced, return nil.
 | 
			
		||||
	if az.nodeInformerSynced == nil {
 | 
			
		||||
		return nil, nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	az.nodeCachesLock.RLock()
 | 
			
		||||
	defer az.nodeCachesLock.RUnlock()
 | 
			
		||||
	if !az.nodeInformerSynced() {
 | 
			
		||||
		return nil, fmt.Errorf("node informer is not synced when trying to GetNodeNames")
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return sets.NewString(az.nodeNames.List()...), nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// GetResourceGroups returns a set of resource groups that all nodes are running on.
 | 
			
		||||
func (az *Cloud) GetResourceGroups() (sets.String, error) {
 | 
			
		||||
	// Kubelet won't set az.nodeInformerSynced, always return configured resourceGroup.
 | 
			
		||||
	if az.nodeInformerSynced == nil {
 | 
			
		||||
		return sets.NewString(az.ResourceGroup), nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	az.nodeCachesLock.RLock()
 | 
			
		||||
	defer az.nodeCachesLock.RUnlock()
 | 
			
		||||
	if !az.nodeInformerSynced() {
 | 
			
		||||
		return nil, fmt.Errorf("node informer is not synced when trying to GetResourceGroups")
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	resourceGroups := sets.NewString(az.ResourceGroup)
 | 
			
		||||
	for _, rg := range az.nodeResourceGroups {
 | 
			
		||||
		resourceGroups.Insert(rg)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return resourceGroups, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// GetUnmanagedNodes returns a list of nodes not managed by Azure cloud provider (e.g. on-prem nodes).
 | 
			
		||||
func (az *Cloud) GetUnmanagedNodes() (sets.String, error) {
 | 
			
		||||
	// Kubelet won't set az.nodeInformerSynced, always return nil.
 | 
			
		||||
	if az.nodeInformerSynced == nil {
 | 
			
		||||
		return nil, nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	az.nodeCachesLock.RLock()
 | 
			
		||||
	defer az.nodeCachesLock.RUnlock()
 | 
			
		||||
	if !az.nodeInformerSynced() {
 | 
			
		||||
		return nil, fmt.Errorf("node informer is not synced when trying to GetUnmanagedNodes")
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return sets.NewString(az.unmanagedNodes.List()...), nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// ShouldNodeExcludedFromLoadBalancer returns true if node is unmanaged, in external resource group or labeled with "node.kubernetes.io/exclude-from-external-load-balancers".
 | 
			
		||||
func (az *Cloud) ShouldNodeExcludedFromLoadBalancer(nodeName string) (bool, error) {
 | 
			
		||||
	// Kubelet won't set az.nodeInformerSynced, always return nil.
 | 
			
		||||
	if az.nodeInformerSynced == nil {
 | 
			
		||||
		return false, nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	az.nodeCachesLock.RLock()
 | 
			
		||||
	defer az.nodeCachesLock.RUnlock()
 | 
			
		||||
	if !az.nodeInformerSynced() {
 | 
			
		||||
		return false, fmt.Errorf("node informer is not synced when trying to fetch node caches")
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Return true if the node is in external resource group.
 | 
			
		||||
	if cachedRG, ok := az.nodeResourceGroups[nodeName]; ok && !strings.EqualFold(cachedRG, az.ResourceGroup) {
 | 
			
		||||
		return true, nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return az.excludeLoadBalancerNodes.Has(nodeName), nil
 | 
			
		||||
}
 | 
			
		||||
@@ -1,470 +0,0 @@
 | 
			
		||||
//go:build !providerless
 | 
			
		||||
// +build !providerless
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
Copyright 2017 The Kubernetes Authors.
 | 
			
		||||
 | 
			
		||||
Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
you may not use this file except in compliance with the License.
 | 
			
		||||
You may obtain a copy of the License at
 | 
			
		||||
 | 
			
		||||
    http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
 | 
			
		||||
Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
See the License for the specific language governing permissions and
 | 
			
		||||
limitations under the License.
 | 
			
		||||
*/
 | 
			
		||||
 | 
			
		||||
package azure
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"net/http"
 | 
			
		||||
	"regexp"
 | 
			
		||||
	"strings"
 | 
			
		||||
 | 
			
		||||
	"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-12-01/compute"
 | 
			
		||||
	"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network"
 | 
			
		||||
 | 
			
		||||
	v1 "k8s.io/api/core/v1"
 | 
			
		||||
	"k8s.io/apimachinery/pkg/runtime"
 | 
			
		||||
	"k8s.io/apimachinery/pkg/types"
 | 
			
		||||
	"k8s.io/apimachinery/pkg/util/wait"
 | 
			
		||||
	cloudprovider "k8s.io/cloud-provider"
 | 
			
		||||
	"k8s.io/klog/v2"
 | 
			
		||||
	azcache "k8s.io/legacy-cloud-providers/azure/cache"
 | 
			
		||||
	"k8s.io/legacy-cloud-providers/azure/retry"
 | 
			
		||||
	"k8s.io/utils/pointer"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
const (
 | 
			
		||||
	// not active means the instance is under deleting from Azure VMSS.
 | 
			
		||||
	vmssVMNotActiveErrorMessage = "not an active Virtual Machine Scale Set VM instanceId"
 | 
			
		||||
 | 
			
		||||
	// operationCanceledErrorMessage means the operation is canceled by another new operation.
 | 
			
		||||
	operationCanceledErrorMessage = "canceledandsupersededduetoanotheroperation"
 | 
			
		||||
 | 
			
		||||
	cannotDeletePublicIPErrorMessageCode = "PublicIPAddressCannotBeDeleted"
 | 
			
		||||
 | 
			
		||||
	referencedResourceNotProvisionedMessageCode = "ReferencedResourceNotProvisioned"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
var (
 | 
			
		||||
	pipErrorMessageRE = regexp.MustCompile(`(?:.*)/subscriptions/(?:.*)/resourceGroups/(.*)/providers/Microsoft.Network/publicIPAddresses/([^\s]+)(?:.*)`)
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// RequestBackoff if backoff is disabled in cloud provider it
 | 
			
		||||
// returns a new Backoff object steps = 1
 | 
			
		||||
// This is to make sure that the requested command executes
 | 
			
		||||
// at least once
 | 
			
		||||
func (az *Cloud) RequestBackoff() (resourceRequestBackoff wait.Backoff) {
 | 
			
		||||
	if az.CloudProviderBackoff {
 | 
			
		||||
		return az.ResourceRequestBackoff
 | 
			
		||||
	}
 | 
			
		||||
	resourceRequestBackoff = wait.Backoff{
 | 
			
		||||
		Steps: 1,
 | 
			
		||||
	}
 | 
			
		||||
	return resourceRequestBackoff
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Event creates a event for the specified object.
 | 
			
		||||
func (az *Cloud) Event(obj runtime.Object, eventType, reason, message string) {
 | 
			
		||||
	if obj != nil && reason != "" {
 | 
			
		||||
		az.eventRecorder.Event(obj, eventType, reason, message)
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// GetVirtualMachineWithRetry invokes az.getVirtualMachine with exponential backoff retry
 | 
			
		||||
func (az *Cloud) GetVirtualMachineWithRetry(name types.NodeName, crt azcache.AzureCacheReadType) (compute.VirtualMachine, error) {
 | 
			
		||||
	var machine compute.VirtualMachine
 | 
			
		||||
	var retryErr error
 | 
			
		||||
	err := wait.ExponentialBackoff(az.RequestBackoff(), func() (bool, error) {
 | 
			
		||||
		machine, retryErr = az.getVirtualMachine(name, crt)
 | 
			
		||||
		if retryErr == cloudprovider.InstanceNotFound {
 | 
			
		||||
			return true, cloudprovider.InstanceNotFound
 | 
			
		||||
		}
 | 
			
		||||
		if retryErr != nil {
 | 
			
		||||
			klog.Errorf("GetVirtualMachineWithRetry(%s): backoff failure, will retry, err=%v", name, retryErr)
 | 
			
		||||
			return false, nil
 | 
			
		||||
		}
 | 
			
		||||
		klog.V(2).Infof("GetVirtualMachineWithRetry(%s): backoff success", name)
 | 
			
		||||
		return true, nil
 | 
			
		||||
	})
 | 
			
		||||
	if err == wait.ErrWaitTimeout {
 | 
			
		||||
		err = retryErr
 | 
			
		||||
	}
 | 
			
		||||
	return machine, err
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// ListVirtualMachines invokes az.VirtualMachinesClient.List with exponential backoff retry
 | 
			
		||||
func (az *Cloud) ListVirtualMachines(resourceGroup string) ([]compute.VirtualMachine, error) {
 | 
			
		||||
	ctx, cancel := getContextWithCancel()
 | 
			
		||||
	defer cancel()
 | 
			
		||||
 | 
			
		||||
	allNodes, rerr := az.VirtualMachinesClient.List(ctx, resourceGroup)
 | 
			
		||||
	if rerr != nil {
 | 
			
		||||
		klog.Errorf("VirtualMachinesClient.List(%v) failure with err=%v", resourceGroup, rerr)
 | 
			
		||||
		return nil, rerr.Error()
 | 
			
		||||
	}
 | 
			
		||||
	klog.V(2).Infof("VirtualMachinesClient.List(%v) success", resourceGroup)
 | 
			
		||||
	return allNodes, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// getPrivateIPsForMachine is wrapper for optional backoff getting private ips
 | 
			
		||||
// list of a node by name
 | 
			
		||||
func (az *Cloud) getPrivateIPsForMachine(nodeName types.NodeName) ([]string, error) {
 | 
			
		||||
	return az.getPrivateIPsForMachineWithRetry(nodeName)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (az *Cloud) getPrivateIPsForMachineWithRetry(nodeName types.NodeName) ([]string, error) {
 | 
			
		||||
	var privateIPs []string
 | 
			
		||||
	err := wait.ExponentialBackoff(az.RequestBackoff(), func() (bool, error) {
 | 
			
		||||
		var retryErr error
 | 
			
		||||
		privateIPs, retryErr = az.VMSet.GetPrivateIPsByNodeName(string(nodeName))
 | 
			
		||||
		if retryErr != nil {
 | 
			
		||||
			// won't retry since the instance doesn't exist on Azure.
 | 
			
		||||
			if retryErr == cloudprovider.InstanceNotFound {
 | 
			
		||||
				return true, retryErr
 | 
			
		||||
			}
 | 
			
		||||
			klog.Errorf("GetPrivateIPsByNodeName(%s): backoff failure, will retry,err=%v", nodeName, retryErr)
 | 
			
		||||
			return false, nil
 | 
			
		||||
		}
 | 
			
		||||
		klog.V(3).Infof("GetPrivateIPsByNodeName(%s): backoff success", nodeName)
 | 
			
		||||
		return true, nil
 | 
			
		||||
	})
 | 
			
		||||
	return privateIPs, err
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (az *Cloud) getIPForMachine(nodeName types.NodeName) (string, string, error) {
 | 
			
		||||
	return az.GetIPForMachineWithRetry(nodeName)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// GetIPForMachineWithRetry invokes az.getIPForMachine with exponential backoff retry
 | 
			
		||||
func (az *Cloud) GetIPForMachineWithRetry(name types.NodeName) (string, string, error) {
 | 
			
		||||
	var ip, publicIP string
 | 
			
		||||
	err := wait.ExponentialBackoff(az.RequestBackoff(), func() (bool, error) {
 | 
			
		||||
		var retryErr error
 | 
			
		||||
		ip, publicIP, retryErr = az.VMSet.GetIPByNodeName(string(name))
 | 
			
		||||
		if retryErr != nil {
 | 
			
		||||
			klog.Errorf("GetIPForMachineWithRetry(%s): backoff failure, will retry,err=%v", name, retryErr)
 | 
			
		||||
			return false, nil
 | 
			
		||||
		}
 | 
			
		||||
		klog.V(3).Infof("GetIPForMachineWithRetry(%s): backoff success", name)
 | 
			
		||||
		return true, nil
 | 
			
		||||
	})
 | 
			
		||||
	return ip, publicIP, err
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// CreateOrUpdateSecurityGroup invokes az.SecurityGroupsClient.CreateOrUpdate with exponential backoff retry
 | 
			
		||||
func (az *Cloud) CreateOrUpdateSecurityGroup(sg network.SecurityGroup) error {
 | 
			
		||||
	ctx, cancel := getContextWithCancel()
 | 
			
		||||
	defer cancel()
 | 
			
		||||
 | 
			
		||||
	rerr := az.SecurityGroupsClient.CreateOrUpdate(ctx, az.SecurityGroupResourceGroup, *sg.Name, sg, pointer.StringDeref(sg.Etag, ""))
 | 
			
		||||
	klog.V(10).Infof("SecurityGroupsClient.CreateOrUpdate(%s): end", *sg.Name)
 | 
			
		||||
	if rerr == nil {
 | 
			
		||||
		// Invalidate the cache right after updating
 | 
			
		||||
		az.nsgCache.Delete(*sg.Name)
 | 
			
		||||
		return nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Invalidate the cache because ETAG precondition mismatch.
 | 
			
		||||
	if rerr.HTTPStatusCode == http.StatusPreconditionFailed {
 | 
			
		||||
		klog.V(3).Infof("SecurityGroup cache for %s is cleanup because of http.StatusPreconditionFailed", *sg.Name)
 | 
			
		||||
		az.nsgCache.Delete(*sg.Name)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Invalidate the cache because another new operation has canceled the current request.
 | 
			
		||||
	if strings.Contains(strings.ToLower(rerr.Error().Error()), operationCanceledErrorMessage) {
 | 
			
		||||
		klog.V(3).Infof("SecurityGroup cache for %s is cleanup because CreateOrUpdateSecurityGroup is canceled by another operation", *sg.Name)
 | 
			
		||||
		az.nsgCache.Delete(*sg.Name)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return rerr.Error()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func cleanupSubnetInFrontendIPConfigurations(lb *network.LoadBalancer) network.LoadBalancer {
 | 
			
		||||
	if lb.LoadBalancerPropertiesFormat == nil || lb.FrontendIPConfigurations == nil {
 | 
			
		||||
		return *lb
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	frontendIPConfigurations := *lb.FrontendIPConfigurations
 | 
			
		||||
	for i := range frontendIPConfigurations {
 | 
			
		||||
		config := frontendIPConfigurations[i]
 | 
			
		||||
		if config.FrontendIPConfigurationPropertiesFormat != nil &&
 | 
			
		||||
			config.Subnet != nil &&
 | 
			
		||||
			config.Subnet.ID != nil {
 | 
			
		||||
			subnet := network.Subnet{
 | 
			
		||||
				ID: config.Subnet.ID,
 | 
			
		||||
			}
 | 
			
		||||
			if config.Subnet.Name != nil {
 | 
			
		||||
				subnet.Name = config.FrontendIPConfigurationPropertiesFormat.Subnet.Name
 | 
			
		||||
			}
 | 
			
		||||
			config.FrontendIPConfigurationPropertiesFormat.Subnet = &subnet
 | 
			
		||||
			frontendIPConfigurations[i] = config
 | 
			
		||||
			continue
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	lb.FrontendIPConfigurations = &frontendIPConfigurations
 | 
			
		||||
	return *lb
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// CreateOrUpdateLB invokes az.LoadBalancerClient.CreateOrUpdate with exponential backoff retry
 | 
			
		||||
func (az *Cloud) CreateOrUpdateLB(service *v1.Service, lb network.LoadBalancer) error {
 | 
			
		||||
	ctx, cancel := getContextWithCancel()
 | 
			
		||||
	defer cancel()
 | 
			
		||||
 | 
			
		||||
	lb = cleanupSubnetInFrontendIPConfigurations(&lb)
 | 
			
		||||
 | 
			
		||||
	rgName := az.getLoadBalancerResourceGroup()
 | 
			
		||||
	rerr := az.LoadBalancerClient.CreateOrUpdate(ctx, rgName, pointer.StringDeref(lb.Name, ""), lb, pointer.StringDeref(lb.Etag, ""))
 | 
			
		||||
	klog.V(10).Infof("LoadBalancerClient.CreateOrUpdate(%s): end", *lb.Name)
 | 
			
		||||
	if rerr == nil {
 | 
			
		||||
		// Invalidate the cache right after updating
 | 
			
		||||
		az.lbCache.Delete(*lb.Name)
 | 
			
		||||
		return nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Invalidate the cache because ETAG precondition mismatch.
 | 
			
		||||
	if rerr.HTTPStatusCode == http.StatusPreconditionFailed {
 | 
			
		||||
		klog.V(3).Infof("LoadBalancer cache for %s is cleanup because of http.StatusPreconditionFailed", pointer.StringDeref(lb.Name, ""))
 | 
			
		||||
		az.lbCache.Delete(*lb.Name)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	retryErrorMessage := rerr.Error().Error()
 | 
			
		||||
	// Invalidate the cache because another new operation has canceled the current request.
 | 
			
		||||
	if strings.Contains(strings.ToLower(retryErrorMessage), operationCanceledErrorMessage) {
 | 
			
		||||
		klog.V(3).Infof("LoadBalancer cache for %s is cleanup because CreateOrUpdate is canceled by another operation", pointer.StringDeref(lb.Name, ""))
 | 
			
		||||
		az.lbCache.Delete(*lb.Name)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// The LB update may fail because the referenced PIP is not in the Succeeded provisioning state
 | 
			
		||||
	if strings.Contains(strings.ToLower(retryErrorMessage), strings.ToLower(referencedResourceNotProvisionedMessageCode)) {
 | 
			
		||||
		matches := pipErrorMessageRE.FindStringSubmatch(retryErrorMessage)
 | 
			
		||||
		if len(matches) != 3 {
 | 
			
		||||
			klog.Warningf("Failed to parse the retry error message %s", retryErrorMessage)
 | 
			
		||||
			return rerr.Error()
 | 
			
		||||
		}
 | 
			
		||||
		pipRG, pipName := matches[1], matches[2]
 | 
			
		||||
		klog.V(3).Infof("The public IP %s referenced by load balancer %s is not in Succeeded provisioning state, will try to update it", pipName, pointer.StringDeref(lb.Name, ""))
 | 
			
		||||
		pip, _, err := az.getPublicIPAddress(pipRG, pipName)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			klog.Warningf("Failed to get the public IP %s in resource group %s: %v", pipName, pipRG, err)
 | 
			
		||||
			return rerr.Error()
 | 
			
		||||
		}
 | 
			
		||||
		// Perform a dummy update to fix the provisioning state
 | 
			
		||||
		err = az.CreateOrUpdatePIP(service, pipRG, pip)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			klog.Warningf("Failed to update the public IP %s in resource group %s: %v", pipName, pipRG, err)
 | 
			
		||||
			return rerr.Error()
 | 
			
		||||
		}
 | 
			
		||||
		// Invalidate the LB cache, return the error, and the controller manager
 | 
			
		||||
		// would retry the LB update in the next reconcile loop
 | 
			
		||||
		az.lbCache.Delete(*lb.Name)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return rerr.Error()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// ListLB invokes az.LoadBalancerClient.List with exponential backoff retry
 | 
			
		||||
func (az *Cloud) ListLB(service *v1.Service) ([]network.LoadBalancer, error) {
 | 
			
		||||
	ctx, cancel := getContextWithCancel()
 | 
			
		||||
	defer cancel()
 | 
			
		||||
 | 
			
		||||
	rgName := az.getLoadBalancerResourceGroup()
 | 
			
		||||
	allLBs, rerr := az.LoadBalancerClient.List(ctx, rgName)
 | 
			
		||||
	if rerr != nil {
 | 
			
		||||
		if rerr.IsNotFound() {
 | 
			
		||||
			return nil, nil
 | 
			
		||||
		}
 | 
			
		||||
		az.Event(service, v1.EventTypeWarning, "ListLoadBalancers", rerr.Error().Error())
 | 
			
		||||
		klog.Errorf("LoadBalancerClient.List(%v) failure with err=%v", rgName, rerr)
 | 
			
		||||
		return nil, rerr.Error()
 | 
			
		||||
	}
 | 
			
		||||
	klog.V(2).Infof("LoadBalancerClient.List(%v) success", rgName)
 | 
			
		||||
	return allLBs, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// ListPIP list the PIP resources in the given resource group
 | 
			
		||||
func (az *Cloud) ListPIP(service *v1.Service, pipResourceGroup string) ([]network.PublicIPAddress, error) {
 | 
			
		||||
	ctx, cancel := getContextWithCancel()
 | 
			
		||||
	defer cancel()
 | 
			
		||||
 | 
			
		||||
	allPIPs, rerr := az.PublicIPAddressesClient.List(ctx, pipResourceGroup)
 | 
			
		||||
	if rerr != nil {
 | 
			
		||||
		if rerr.IsNotFound() {
 | 
			
		||||
			return nil, nil
 | 
			
		||||
		}
 | 
			
		||||
		az.Event(service, v1.EventTypeWarning, "ListPublicIPs", rerr.Error().Error())
 | 
			
		||||
		klog.Errorf("PublicIPAddressesClient.List(%v) failure with err=%v", pipResourceGroup, rerr)
 | 
			
		||||
		return nil, rerr.Error()
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	klog.V(2).Infof("PublicIPAddressesClient.List(%v) success", pipResourceGroup)
 | 
			
		||||
	return allPIPs, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// CreateOrUpdatePIP invokes az.PublicIPAddressesClient.CreateOrUpdate with exponential backoff retry
 | 
			
		||||
func (az *Cloud) CreateOrUpdatePIP(service *v1.Service, pipResourceGroup string, pip network.PublicIPAddress) error {
 | 
			
		||||
	ctx, cancel := getContextWithCancel()
 | 
			
		||||
	defer cancel()
 | 
			
		||||
 | 
			
		||||
	rerr := az.PublicIPAddressesClient.CreateOrUpdate(ctx, pipResourceGroup, pointer.StringDeref(pip.Name, ""), pip)
 | 
			
		||||
	klog.V(10).Infof("PublicIPAddressesClient.CreateOrUpdate(%s, %s): end", pipResourceGroup, pointer.StringDeref(pip.Name, ""))
 | 
			
		||||
	if rerr != nil {
 | 
			
		||||
		klog.Errorf("PublicIPAddressesClient.CreateOrUpdate(%s, %s) failed: %s", pipResourceGroup, pointer.StringDeref(pip.Name, ""), rerr.Error().Error())
 | 
			
		||||
		az.Event(service, v1.EventTypeWarning, "CreateOrUpdatePublicIPAddress", rerr.Error().Error())
 | 
			
		||||
		return rerr.Error()
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// CreateOrUpdateInterface invokes az.PublicIPAddressesClient.CreateOrUpdate with exponential backoff retry
 | 
			
		||||
func (az *Cloud) CreateOrUpdateInterface(service *v1.Service, nic network.Interface) error {
 | 
			
		||||
	ctx, cancel := getContextWithCancel()
 | 
			
		||||
	defer cancel()
 | 
			
		||||
 | 
			
		||||
	rerr := az.InterfacesClient.CreateOrUpdate(ctx, az.ResourceGroup, *nic.Name, nic)
 | 
			
		||||
	klog.V(10).Infof("InterfacesClient.CreateOrUpdate(%s): end", *nic.Name)
 | 
			
		||||
	if rerr != nil {
 | 
			
		||||
		klog.Errorf("InterfacesClient.CreateOrUpdate(%s) failed: %s", *nic.Name, rerr.Error().Error())
 | 
			
		||||
		az.Event(service, v1.EventTypeWarning, "CreateOrUpdateInterface", rerr.Error().Error())
 | 
			
		||||
		return rerr.Error()
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// DeletePublicIP invokes az.PublicIPAddressesClient.Delete with exponential backoff retry
 | 
			
		||||
func (az *Cloud) DeletePublicIP(service *v1.Service, pipResourceGroup string, pipName string) error {
 | 
			
		||||
	ctx, cancel := getContextWithCancel()
 | 
			
		||||
	defer cancel()
 | 
			
		||||
 | 
			
		||||
	rerr := az.PublicIPAddressesClient.Delete(ctx, pipResourceGroup, pipName)
 | 
			
		||||
	if rerr != nil {
 | 
			
		||||
		klog.Errorf("PublicIPAddressesClient.Delete(%s) failed: %s", pipName, rerr.Error().Error())
 | 
			
		||||
		az.Event(service, v1.EventTypeWarning, "DeletePublicIPAddress", rerr.Error().Error())
 | 
			
		||||
 | 
			
		||||
		if strings.Contains(rerr.Error().Error(), cannotDeletePublicIPErrorMessageCode) {
 | 
			
		||||
			klog.Warningf("DeletePublicIP for public IP %s failed with error %v, this is because other resources are referencing the public IP. The deletion of the service will continue.", pipName, rerr.Error())
 | 
			
		||||
			return nil
 | 
			
		||||
		}
 | 
			
		||||
		return rerr.Error()
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// DeleteLB invokes az.LoadBalancerClient.Delete with exponential backoff retry
 | 
			
		||||
func (az *Cloud) DeleteLB(service *v1.Service, lbName string) error {
 | 
			
		||||
	ctx, cancel := getContextWithCancel()
 | 
			
		||||
	defer cancel()
 | 
			
		||||
 | 
			
		||||
	rgName := az.getLoadBalancerResourceGroup()
 | 
			
		||||
	rerr := az.LoadBalancerClient.Delete(ctx, rgName, lbName)
 | 
			
		||||
	if rerr == nil {
 | 
			
		||||
		// Invalidate the cache right after updating
 | 
			
		||||
		az.lbCache.Delete(lbName)
 | 
			
		||||
		return nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	klog.Errorf("LoadBalancerClient.Delete(%s) failed: %s", lbName, rerr.Error().Error())
 | 
			
		||||
	az.Event(service, v1.EventTypeWarning, "DeleteLoadBalancer", rerr.Error().Error())
 | 
			
		||||
	return rerr.Error()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// CreateOrUpdateRouteTable invokes az.RouteTablesClient.CreateOrUpdate with exponential backoff retry
 | 
			
		||||
func (az *Cloud) CreateOrUpdateRouteTable(routeTable network.RouteTable) error {
 | 
			
		||||
	ctx, cancel := getContextWithCancel()
 | 
			
		||||
	defer cancel()
 | 
			
		||||
 | 
			
		||||
	rerr := az.RouteTablesClient.CreateOrUpdate(ctx, az.RouteTableResourceGroup, az.RouteTableName, routeTable, pointer.StringDeref(routeTable.Etag, ""))
 | 
			
		||||
	if rerr == nil {
 | 
			
		||||
		// Invalidate the cache right after updating
 | 
			
		||||
		az.rtCache.Delete(*routeTable.Name)
 | 
			
		||||
		return nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Invalidate the cache because etag mismatch.
 | 
			
		||||
	if rerr.HTTPStatusCode == http.StatusPreconditionFailed {
 | 
			
		||||
		klog.V(3).Infof("Route table cache for %s is cleanup because of http.StatusPreconditionFailed", *routeTable.Name)
 | 
			
		||||
		az.rtCache.Delete(*routeTable.Name)
 | 
			
		||||
	}
 | 
			
		||||
	// Invalidate the cache because another new operation has canceled the current request.
 | 
			
		||||
	if strings.Contains(strings.ToLower(rerr.Error().Error()), operationCanceledErrorMessage) {
 | 
			
		||||
		klog.V(3).Infof("Route table cache for %s is cleanup because CreateOrUpdateRouteTable is canceled by another operation", *routeTable.Name)
 | 
			
		||||
		az.rtCache.Delete(*routeTable.Name)
 | 
			
		||||
	}
 | 
			
		||||
	klog.Errorf("RouteTablesClient.CreateOrUpdate(%s) failed: %v", az.RouteTableName, rerr.Error())
 | 
			
		||||
	return rerr.Error()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// CreateOrUpdateRoute invokes az.RoutesClient.CreateOrUpdate with exponential backoff retry
 | 
			
		||||
func (az *Cloud) CreateOrUpdateRoute(route network.Route) error {
 | 
			
		||||
	ctx, cancel := getContextWithCancel()
 | 
			
		||||
	defer cancel()
 | 
			
		||||
 | 
			
		||||
	rerr := az.RoutesClient.CreateOrUpdate(ctx, az.RouteTableResourceGroup, az.RouteTableName, *route.Name, route, pointer.StringDeref(route.Etag, ""))
 | 
			
		||||
	klog.V(10).Infof("RoutesClient.CreateOrUpdate(%s): end", *route.Name)
 | 
			
		||||
	if rerr == nil {
 | 
			
		||||
		az.rtCache.Delete(az.RouteTableName)
 | 
			
		||||
		return nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if rerr.HTTPStatusCode == http.StatusPreconditionFailed {
 | 
			
		||||
		klog.V(3).Infof("Route cache for %s is cleanup because of http.StatusPreconditionFailed", *route.Name)
 | 
			
		||||
		az.rtCache.Delete(az.RouteTableName)
 | 
			
		||||
	}
 | 
			
		||||
	// Invalidate the cache because another new operation has canceled the current request.
 | 
			
		||||
	if strings.Contains(strings.ToLower(rerr.Error().Error()), operationCanceledErrorMessage) {
 | 
			
		||||
		klog.V(3).Infof("Route cache for %s is cleanup because CreateOrUpdateRouteTable is canceled by another operation", *route.Name)
 | 
			
		||||
		az.rtCache.Delete(az.RouteTableName)
 | 
			
		||||
	}
 | 
			
		||||
	return rerr.Error()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// DeleteRouteWithName invokes az.RoutesClient.CreateOrUpdate with exponential backoff retry
 | 
			
		||||
func (az *Cloud) DeleteRouteWithName(routeName string) error {
 | 
			
		||||
	ctx, cancel := getContextWithCancel()
 | 
			
		||||
	defer cancel()
 | 
			
		||||
 | 
			
		||||
	rerr := az.RoutesClient.Delete(ctx, az.RouteTableResourceGroup, az.RouteTableName, routeName)
 | 
			
		||||
	klog.V(10).Infof("RoutesClient.Delete(%s,%s): end", az.RouteTableName, routeName)
 | 
			
		||||
	if rerr == nil {
 | 
			
		||||
		return nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	klog.Errorf("RoutesClient.Delete(%s, %s) failed: %v", az.RouteTableName, routeName, rerr.Error())
 | 
			
		||||
	return rerr.Error()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// CreateOrUpdateVMSS invokes az.VirtualMachineScaleSetsClient.Update().
 | 
			
		||||
func (az *Cloud) CreateOrUpdateVMSS(resourceGroupName string, VMScaleSetName string, parameters compute.VirtualMachineScaleSet) *retry.Error {
 | 
			
		||||
	ctx, cancel := getContextWithCancel()
 | 
			
		||||
	defer cancel()
 | 
			
		||||
 | 
			
		||||
	// When vmss is being deleted, CreateOrUpdate API would report "the vmss is being deleted" error.
 | 
			
		||||
	// Since it is being deleted, we shouldn't send more CreateOrUpdate requests for it.
 | 
			
		||||
	klog.V(3).Infof("CreateOrUpdateVMSS: verify the status of the vmss being created or updated")
 | 
			
		||||
	vmss, rerr := az.VirtualMachineScaleSetsClient.Get(ctx, resourceGroupName, VMScaleSetName)
 | 
			
		||||
	if rerr != nil {
 | 
			
		||||
		klog.Errorf("CreateOrUpdateVMSS: error getting vmss(%s): %v", VMScaleSetName, rerr)
 | 
			
		||||
		return rerr
 | 
			
		||||
	}
 | 
			
		||||
	if vmss.ProvisioningState != nil && strings.EqualFold(*vmss.ProvisioningState, virtualMachineScaleSetsDeallocating) {
 | 
			
		||||
		klog.V(3).Infof("CreateOrUpdateVMSS: found vmss %s being deleted, skipping", VMScaleSetName)
 | 
			
		||||
		return nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	rerr = az.VirtualMachineScaleSetsClient.CreateOrUpdate(ctx, resourceGroupName, VMScaleSetName, parameters)
 | 
			
		||||
	klog.V(10).Infof("UpdateVmssVMWithRetry: VirtualMachineScaleSetsClient.CreateOrUpdate(%s): end", VMScaleSetName)
 | 
			
		||||
	if rerr != nil {
 | 
			
		||||
		klog.Errorf("CreateOrUpdateVMSS: error CreateOrUpdate vmss(%s): %v", VMScaleSetName, rerr)
 | 
			
		||||
		return rerr
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
@@ -1,563 +0,0 @@
 | 
			
		||||
//go:build !providerless
 | 
			
		||||
// +build !providerless
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
Copyright 2020 The Kubernetes Authors.
 | 
			
		||||
 | 
			
		||||
Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
you may not use this file except in compliance with the License.
 | 
			
		||||
You may obtain a copy of the License at
 | 
			
		||||
 | 
			
		||||
    http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
 | 
			
		||||
Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
See the License for the specific language governing permissions and
 | 
			
		||||
limitations under the License.
 | 
			
		||||
*/
 | 
			
		||||
 | 
			
		||||
package azure
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"net/http"
 | 
			
		||||
	"testing"
 | 
			
		||||
 | 
			
		||||
	v1 "k8s.io/api/core/v1"
 | 
			
		||||
	"k8s.io/apimachinery/pkg/util/wait"
 | 
			
		||||
	cloudprovider "k8s.io/cloud-provider"
 | 
			
		||||
	"k8s.io/legacy-cloud-providers/azure/cache"
 | 
			
		||||
	"k8s.io/legacy-cloud-providers/azure/clients/interfaceclient/mockinterfaceclient"
 | 
			
		||||
	"k8s.io/legacy-cloud-providers/azure/clients/loadbalancerclient/mockloadbalancerclient"
 | 
			
		||||
	"k8s.io/legacy-cloud-providers/azure/clients/publicipclient/mockpublicipclient"
 | 
			
		||||
	"k8s.io/legacy-cloud-providers/azure/clients/routeclient/mockrouteclient"
 | 
			
		||||
	"k8s.io/legacy-cloud-providers/azure/clients/routetableclient/mockroutetableclient"
 | 
			
		||||
	"k8s.io/legacy-cloud-providers/azure/clients/securitygroupclient/mocksecuritygroupclient"
 | 
			
		||||
	"k8s.io/legacy-cloud-providers/azure/clients/vmclient/mockvmclient"
 | 
			
		||||
	"k8s.io/legacy-cloud-providers/azure/clients/vmssclient/mockvmssclient"
 | 
			
		||||
	"k8s.io/legacy-cloud-providers/azure/retry"
 | 
			
		||||
	"k8s.io/utils/pointer"
 | 
			
		||||
 | 
			
		||||
	"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-12-01/compute"
 | 
			
		||||
	"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network"
 | 
			
		||||
	"github.com/golang/mock/gomock"
 | 
			
		||||
	"github.com/stretchr/testify/assert"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
func TestGetVirtualMachineWithRetry(t *testing.T) {
 | 
			
		||||
	ctrl := gomock.NewController(t)
 | 
			
		||||
	defer ctrl.Finish()
 | 
			
		||||
 | 
			
		||||
	tests := []struct {
 | 
			
		||||
		vmClientErr *retry.Error
 | 
			
		||||
		expectedErr error
 | 
			
		||||
	}{
 | 
			
		||||
		{
 | 
			
		||||
			vmClientErr: &retry.Error{HTTPStatusCode: http.StatusNotFound},
 | 
			
		||||
			expectedErr: cloudprovider.InstanceNotFound,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			vmClientErr: &retry.Error{HTTPStatusCode: http.StatusInternalServerError},
 | 
			
		||||
			expectedErr: fmt.Errorf("Retriable: false, RetryAfter: 0s, HTTPStatusCode: 500, RawError: %w", error(nil)),
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	for _, test := range tests {
 | 
			
		||||
		az := GetTestCloud(ctrl)
 | 
			
		||||
		mockVMClient := az.VirtualMachinesClient.(*mockvmclient.MockInterface)
 | 
			
		||||
		mockVMClient.EXPECT().Get(gomock.Any(), az.ResourceGroup, "vm", gomock.Any()).Return(compute.VirtualMachine{}, test.vmClientErr)
 | 
			
		||||
 | 
			
		||||
		vm, err := az.GetVirtualMachineWithRetry("vm", cache.CacheReadTypeDefault)
 | 
			
		||||
		assert.Empty(t, vm)
 | 
			
		||||
		assert.Equal(t, test.expectedErr, err)
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestGetPrivateIPsForMachine(t *testing.T) {
 | 
			
		||||
	ctrl := gomock.NewController(t)
 | 
			
		||||
	defer ctrl.Finish()
 | 
			
		||||
 | 
			
		||||
	tests := []struct {
 | 
			
		||||
		vmClientErr        *retry.Error
 | 
			
		||||
		expectedPrivateIPs []string
 | 
			
		||||
		expectedErr        error
 | 
			
		||||
	}{
 | 
			
		||||
		{
 | 
			
		||||
			expectedPrivateIPs: []string{"1.2.3.4"},
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			vmClientErr:        &retry.Error{HTTPStatusCode: http.StatusNotFound},
 | 
			
		||||
			expectedErr:        cloudprovider.InstanceNotFound,
 | 
			
		||||
			expectedPrivateIPs: []string{},
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			vmClientErr:        &retry.Error{HTTPStatusCode: http.StatusInternalServerError},
 | 
			
		||||
			expectedErr:        wait.ErrWaitTimeout,
 | 
			
		||||
			expectedPrivateIPs: []string{},
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	expectedVM := compute.VirtualMachine{
 | 
			
		||||
		VirtualMachineProperties: &compute.VirtualMachineProperties{
 | 
			
		||||
			AvailabilitySet: &compute.SubResource{ID: pointer.String("availability-set")},
 | 
			
		||||
			NetworkProfile: &compute.NetworkProfile{
 | 
			
		||||
				NetworkInterfaces: &[]compute.NetworkInterfaceReference{
 | 
			
		||||
					{
 | 
			
		||||
						NetworkInterfaceReferenceProperties: &compute.NetworkInterfaceReferenceProperties{
 | 
			
		||||
							Primary: pointer.Bool(true),
 | 
			
		||||
						},
 | 
			
		||||
						ID: pointer.String("/subscriptions/sub/resourceGroups/rg/providers/Microsoft.Network/networkInterfaces/nic"),
 | 
			
		||||
					},
 | 
			
		||||
				},
 | 
			
		||||
			},
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	expectedInterface := network.Interface{
 | 
			
		||||
		InterfacePropertiesFormat: &network.InterfacePropertiesFormat{
 | 
			
		||||
			IPConfigurations: &[]network.InterfaceIPConfiguration{
 | 
			
		||||
				{
 | 
			
		||||
					InterfaceIPConfigurationPropertiesFormat: &network.InterfaceIPConfigurationPropertiesFormat{
 | 
			
		||||
						PrivateIPAddress: pointer.String("1.2.3.4"),
 | 
			
		||||
					},
 | 
			
		||||
				},
 | 
			
		||||
			},
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	for _, test := range tests {
 | 
			
		||||
		az := GetTestCloud(ctrl)
 | 
			
		||||
		mockVMClient := az.VirtualMachinesClient.(*mockvmclient.MockInterface)
 | 
			
		||||
		mockVMClient.EXPECT().Get(gomock.Any(), az.ResourceGroup, "vm", gomock.Any()).Return(expectedVM, test.vmClientErr)
 | 
			
		||||
 | 
			
		||||
		mockInterfaceClient := az.InterfacesClient.(*mockinterfaceclient.MockInterface)
 | 
			
		||||
		mockInterfaceClient.EXPECT().Get(gomock.Any(), az.ResourceGroup, "nic", gomock.Any()).Return(expectedInterface, nil).MaxTimes(1)
 | 
			
		||||
 | 
			
		||||
		privateIPs, err := az.getPrivateIPsForMachine("vm")
 | 
			
		||||
		assert.Equal(t, test.expectedErr, err)
 | 
			
		||||
		assert.Equal(t, test.expectedPrivateIPs, privateIPs)
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestGetIPForMachineWithRetry(t *testing.T) {
 | 
			
		||||
	ctrl := gomock.NewController(t)
 | 
			
		||||
	defer ctrl.Finish()
 | 
			
		||||
 | 
			
		||||
	tests := []struct {
 | 
			
		||||
		clientErr         *retry.Error
 | 
			
		||||
		expectedPrivateIP string
 | 
			
		||||
		expectedPublicIP  string
 | 
			
		||||
		expectedErr       error
 | 
			
		||||
	}{
 | 
			
		||||
		{
 | 
			
		||||
			expectedPrivateIP: "1.2.3.4",
 | 
			
		||||
			expectedPublicIP:  "5.6.7.8",
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			clientErr:   &retry.Error{HTTPStatusCode: http.StatusNotFound},
 | 
			
		||||
			expectedErr: wait.ErrWaitTimeout,
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	expectedVM := compute.VirtualMachine{
 | 
			
		||||
		VirtualMachineProperties: &compute.VirtualMachineProperties{
 | 
			
		||||
			AvailabilitySet: &compute.SubResource{ID: pointer.String("availability-set")},
 | 
			
		||||
			NetworkProfile: &compute.NetworkProfile{
 | 
			
		||||
				NetworkInterfaces: &[]compute.NetworkInterfaceReference{
 | 
			
		||||
					{
 | 
			
		||||
						NetworkInterfaceReferenceProperties: &compute.NetworkInterfaceReferenceProperties{
 | 
			
		||||
							Primary: pointer.Bool(true),
 | 
			
		||||
						},
 | 
			
		||||
						ID: pointer.String("/subscriptions/sub/resourceGroups/rg/providers/Microsoft.Network/networkInterfaces/nic"),
 | 
			
		||||
					},
 | 
			
		||||
				},
 | 
			
		||||
			},
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	expectedInterface := network.Interface{
 | 
			
		||||
		InterfacePropertiesFormat: &network.InterfacePropertiesFormat{
 | 
			
		||||
			IPConfigurations: &[]network.InterfaceIPConfiguration{
 | 
			
		||||
				{
 | 
			
		||||
					InterfaceIPConfigurationPropertiesFormat: &network.InterfaceIPConfigurationPropertiesFormat{
 | 
			
		||||
						PrivateIPAddress: pointer.String("1.2.3.4"),
 | 
			
		||||
						PublicIPAddress: &network.PublicIPAddress{
 | 
			
		||||
							ID: pointer.String("test/pip"),
 | 
			
		||||
						},
 | 
			
		||||
					},
 | 
			
		||||
				},
 | 
			
		||||
			},
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	expectedPIP := network.PublicIPAddress{
 | 
			
		||||
		PublicIPAddressPropertiesFormat: &network.PublicIPAddressPropertiesFormat{
 | 
			
		||||
			IPAddress: pointer.String("5.6.7.8"),
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	for _, test := range tests {
 | 
			
		||||
		az := GetTestCloud(ctrl)
 | 
			
		||||
		mockVMClient := az.VirtualMachinesClient.(*mockvmclient.MockInterface)
 | 
			
		||||
		mockVMClient.EXPECT().Get(gomock.Any(), az.ResourceGroup, "vm", gomock.Any()).Return(expectedVM, test.clientErr)
 | 
			
		||||
 | 
			
		||||
		mockInterfaceClient := az.InterfacesClient.(*mockinterfaceclient.MockInterface)
 | 
			
		||||
		mockInterfaceClient.EXPECT().Get(gomock.Any(), az.ResourceGroup, "nic", gomock.Any()).Return(expectedInterface, nil).MaxTimes(1)
 | 
			
		||||
 | 
			
		||||
		mockPIPClient := az.PublicIPAddressesClient.(*mockpublicipclient.MockInterface)
 | 
			
		||||
		mockPIPClient.EXPECT().Get(gomock.Any(), az.ResourceGroup, "pip", gomock.Any()).Return(expectedPIP, nil).MaxTimes(1)
 | 
			
		||||
 | 
			
		||||
		privateIP, publicIP, err := az.GetIPForMachineWithRetry("vm")
 | 
			
		||||
		assert.Equal(t, test.expectedErr, err)
 | 
			
		||||
		assert.Equal(t, test.expectedPrivateIP, privateIP)
 | 
			
		||||
		assert.Equal(t, test.expectedPublicIP, publicIP)
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestCreateOrUpdateSecurityGroupCanceled(t *testing.T) {
 | 
			
		||||
	ctrl := gomock.NewController(t)
 | 
			
		||||
	defer ctrl.Finish()
 | 
			
		||||
 | 
			
		||||
	az := GetTestCloud(ctrl)
 | 
			
		||||
	az.nsgCache.Set("sg", "test")
 | 
			
		||||
 | 
			
		||||
	mockSGClient := az.SecurityGroupsClient.(*mocksecuritygroupclient.MockInterface)
 | 
			
		||||
	mockSGClient.EXPECT().CreateOrUpdate(gomock.Any(), az.ResourceGroup, gomock.Any(), gomock.Any(), gomock.Any()).Return(&retry.Error{
 | 
			
		||||
		RawError: fmt.Errorf(operationCanceledErrorMessage),
 | 
			
		||||
	})
 | 
			
		||||
	mockSGClient.EXPECT().Get(gomock.Any(), az.ResourceGroup, "sg", gomock.Any()).Return(network.SecurityGroup{}, nil)
 | 
			
		||||
 | 
			
		||||
	err := az.CreateOrUpdateSecurityGroup(network.SecurityGroup{Name: pointer.String("sg")})
 | 
			
		||||
	assert.EqualError(t, fmt.Errorf("Retriable: false, RetryAfter: 0s, HTTPStatusCode: 0, RawError: %w", fmt.Errorf("canceledandsupersededduetoanotheroperation")), err.Error())
 | 
			
		||||
 | 
			
		||||
	// security group should be removed from cache if the operation is canceled
 | 
			
		||||
	shouldBeEmpty, err := az.nsgCache.Get("sg", cache.CacheReadTypeDefault)
 | 
			
		||||
	assert.NoError(t, err)
 | 
			
		||||
	assert.Empty(t, shouldBeEmpty)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestCreateOrUpdateLB(t *testing.T) {
 | 
			
		||||
	ctrl := gomock.NewController(t)
 | 
			
		||||
	defer ctrl.Finish()
 | 
			
		||||
 | 
			
		||||
	referencedResourceNotProvisionedRawErrorString := `Code="ReferencedResourceNotProvisioned" Message="Cannot proceed with operation because resource /subscriptions/sub/resourceGroups/rg/providers/Microsoft.Network/publicIPAddresses/pip used by resource /subscriptions/sub/resourceGroups/rg/providers/Microsoft.Network/loadBalancers/lb is not in Succeeded state. Resource is in Failed state and the last operation that updated/is updating the resource is PutPublicIpAddressOperation."`
 | 
			
		||||
 | 
			
		||||
	tests := []struct {
 | 
			
		||||
		clientErr   *retry.Error
 | 
			
		||||
		expectedErr error
 | 
			
		||||
	}{
 | 
			
		||||
		{
 | 
			
		||||
			clientErr:   &retry.Error{HTTPStatusCode: http.StatusPreconditionFailed},
 | 
			
		||||
			expectedErr: fmt.Errorf("Retriable: false, RetryAfter: 0s, HTTPStatusCode: 412, RawError: %w", error(nil)),
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			clientErr:   &retry.Error{RawError: fmt.Errorf("canceledandsupersededduetoanotheroperation")},
 | 
			
		||||
			expectedErr: fmt.Errorf("Retriable: false, RetryAfter: 0s, HTTPStatusCode: 0, RawError: %w", fmt.Errorf("canceledandsupersededduetoanotheroperation")),
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			clientErr:   &retry.Error{RawError: fmt.Errorf(referencedResourceNotProvisionedRawErrorString)},
 | 
			
		||||
			expectedErr: fmt.Errorf("Retriable: false, RetryAfter: 0s, HTTPStatusCode: 0, RawError: %w", fmt.Errorf(referencedResourceNotProvisionedRawErrorString)),
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	for _, test := range tests {
 | 
			
		||||
		az := GetTestCloud(ctrl)
 | 
			
		||||
		az.lbCache.Set("lb", "test")
 | 
			
		||||
 | 
			
		||||
		mockLBClient := az.LoadBalancerClient.(*mockloadbalancerclient.MockInterface)
 | 
			
		||||
		mockLBClient.EXPECT().CreateOrUpdate(gomock.Any(), az.ResourceGroup, gomock.Any(), gomock.Any(), gomock.Any()).Return(test.clientErr)
 | 
			
		||||
		mockLBClient.EXPECT().Get(gomock.Any(), az.ResourceGroup, "lb", gomock.Any()).Return(network.LoadBalancer{}, nil)
 | 
			
		||||
 | 
			
		||||
		mockPIPClient := az.PublicIPAddressesClient.(*mockpublicipclient.MockInterface)
 | 
			
		||||
		mockPIPClient.EXPECT().CreateOrUpdate(gomock.Any(), az.ResourceGroup, "pip", gomock.Any()).Return(nil).AnyTimes()
 | 
			
		||||
		mockPIPClient.EXPECT().Get(gomock.Any(), az.ResourceGroup, "pip", gomock.Any()).Return(network.PublicIPAddress{
 | 
			
		||||
			Name: pointer.String("pip"),
 | 
			
		||||
			PublicIPAddressPropertiesFormat: &network.PublicIPAddressPropertiesFormat{
 | 
			
		||||
				ProvisioningState: pointer.String("Succeeded"),
 | 
			
		||||
			},
 | 
			
		||||
		}, nil).AnyTimes()
 | 
			
		||||
 | 
			
		||||
		err := az.CreateOrUpdateLB(&v1.Service{}, network.LoadBalancer{
 | 
			
		||||
			Name: pointer.String("lb"),
 | 
			
		||||
			Etag: pointer.String("etag"),
 | 
			
		||||
		})
 | 
			
		||||
		assert.Equal(t, test.expectedErr, err)
 | 
			
		||||
 | 
			
		||||
		// loadbalancer should be removed from cache if the etag is mismatch or the operation is canceled
 | 
			
		||||
		shouldBeEmpty, err := az.lbCache.Get("lb", cache.CacheReadTypeDefault)
 | 
			
		||||
		assert.NoError(t, err)
 | 
			
		||||
		assert.Empty(t, shouldBeEmpty)
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestListLB(t *testing.T) {
 | 
			
		||||
	ctrl := gomock.NewController(t)
 | 
			
		||||
	defer ctrl.Finish()
 | 
			
		||||
 | 
			
		||||
	tests := []struct {
 | 
			
		||||
		clientErr   *retry.Error
 | 
			
		||||
		expectedErr error
 | 
			
		||||
	}{
 | 
			
		||||
		{
 | 
			
		||||
			clientErr:   &retry.Error{HTTPStatusCode: http.StatusInternalServerError},
 | 
			
		||||
			expectedErr: fmt.Errorf("Retriable: false, RetryAfter: 0s, HTTPStatusCode: 500, RawError: %w", error(nil)),
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			clientErr:   &retry.Error{HTTPStatusCode: http.StatusNotFound},
 | 
			
		||||
			expectedErr: nil,
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
	for _, test := range tests {
 | 
			
		||||
		az := GetTestCloud(ctrl)
 | 
			
		||||
		mockLBClient := az.LoadBalancerClient.(*mockloadbalancerclient.MockInterface)
 | 
			
		||||
		mockLBClient.EXPECT().List(gomock.Any(), az.ResourceGroup).Return(nil, test.clientErr)
 | 
			
		||||
 | 
			
		||||
		pips, err := az.ListLB(&v1.Service{})
 | 
			
		||||
		assert.Equal(t, test.expectedErr, err)
 | 
			
		||||
		assert.Empty(t, pips)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestListPIP(t *testing.T) {
 | 
			
		||||
	ctrl := gomock.NewController(t)
 | 
			
		||||
	defer ctrl.Finish()
 | 
			
		||||
 | 
			
		||||
	tests := []struct {
 | 
			
		||||
		clientErr   *retry.Error
 | 
			
		||||
		expectedErr error
 | 
			
		||||
	}{
 | 
			
		||||
		{
 | 
			
		||||
			clientErr:   &retry.Error{HTTPStatusCode: http.StatusInternalServerError},
 | 
			
		||||
			expectedErr: fmt.Errorf("Retriable: false, RetryAfter: 0s, HTTPStatusCode: 500, RawError: %w", error(nil)),
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			clientErr:   &retry.Error{HTTPStatusCode: http.StatusNotFound},
 | 
			
		||||
			expectedErr: nil,
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
	for _, test := range tests {
 | 
			
		||||
		az := GetTestCloud(ctrl)
 | 
			
		||||
		mockPIPClient := az.PublicIPAddressesClient.(*mockpublicipclient.MockInterface)
 | 
			
		||||
		mockPIPClient.EXPECT().List(gomock.Any(), az.ResourceGroup).Return(nil, test.clientErr)
 | 
			
		||||
 | 
			
		||||
		pips, err := az.ListPIP(&v1.Service{}, az.ResourceGroup)
 | 
			
		||||
		assert.Equal(t, test.expectedErr, err)
 | 
			
		||||
		assert.Empty(t, pips)
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestCreateOrUpdatePIP(t *testing.T) {
 | 
			
		||||
	ctrl := gomock.NewController(t)
 | 
			
		||||
	defer ctrl.Finish()
 | 
			
		||||
 | 
			
		||||
	az := GetTestCloud(ctrl)
 | 
			
		||||
	mockPIPClient := az.PublicIPAddressesClient.(*mockpublicipclient.MockInterface)
 | 
			
		||||
	mockPIPClient.EXPECT().CreateOrUpdate(gomock.Any(), az.ResourceGroup, "nic", gomock.Any()).Return(&retry.Error{HTTPStatusCode: http.StatusInternalServerError})
 | 
			
		||||
 | 
			
		||||
	err := az.CreateOrUpdatePIP(&v1.Service{}, az.ResourceGroup, network.PublicIPAddress{Name: pointer.String("nic")})
 | 
			
		||||
	assert.Equal(t, fmt.Errorf("Retriable: false, RetryAfter: 0s, HTTPStatusCode: 500, RawError: %w", error(nil)), err)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestCreateOrUpdateInterface(t *testing.T) {
 | 
			
		||||
	ctrl := gomock.NewController(t)
 | 
			
		||||
	defer ctrl.Finish()
 | 
			
		||||
 | 
			
		||||
	az := GetTestCloud(ctrl)
 | 
			
		||||
	mockInterfaceClient := az.InterfacesClient.(*mockinterfaceclient.MockInterface)
 | 
			
		||||
	mockInterfaceClient.EXPECT().CreateOrUpdate(gomock.Any(), az.ResourceGroup, "nic", gomock.Any()).Return(&retry.Error{HTTPStatusCode: http.StatusInternalServerError})
 | 
			
		||||
 | 
			
		||||
	err := az.CreateOrUpdateInterface(&v1.Service{}, network.Interface{Name: pointer.String("nic")})
 | 
			
		||||
	assert.Equal(t, fmt.Errorf("Retriable: false, RetryAfter: 0s, HTTPStatusCode: 500, RawError: %w", error(nil)), err)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestDeletePublicIP(t *testing.T) {
 | 
			
		||||
	ctrl := gomock.NewController(t)
 | 
			
		||||
	defer ctrl.Finish()
 | 
			
		||||
 | 
			
		||||
	az := GetTestCloud(ctrl)
 | 
			
		||||
	mockPIPClient := az.PublicIPAddressesClient.(*mockpublicipclient.MockInterface)
 | 
			
		||||
	mockPIPClient.EXPECT().Delete(gomock.Any(), az.ResourceGroup, "pip").Return(&retry.Error{HTTPStatusCode: http.StatusInternalServerError})
 | 
			
		||||
 | 
			
		||||
	err := az.DeletePublicIP(&v1.Service{}, az.ResourceGroup, "pip")
 | 
			
		||||
	assert.Equal(t, fmt.Errorf("Retriable: false, RetryAfter: 0s, HTTPStatusCode: 500, RawError: %w", error(nil)), err)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestDeleteLB(t *testing.T) {
 | 
			
		||||
	ctrl := gomock.NewController(t)
 | 
			
		||||
	defer ctrl.Finish()
 | 
			
		||||
 | 
			
		||||
	az := GetTestCloud(ctrl)
 | 
			
		||||
	mockLBClient := az.LoadBalancerClient.(*mockloadbalancerclient.MockInterface)
 | 
			
		||||
	mockLBClient.EXPECT().Delete(gomock.Any(), az.ResourceGroup, "lb").Return(&retry.Error{HTTPStatusCode: http.StatusInternalServerError})
 | 
			
		||||
 | 
			
		||||
	err := az.DeleteLB(&v1.Service{}, "lb")
 | 
			
		||||
	assert.Equal(t, fmt.Errorf("Retriable: false, RetryAfter: 0s, HTTPStatusCode: 500, RawError: %w", error(nil)), err)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestCreateOrUpdateRouteTable(t *testing.T) {
 | 
			
		||||
	ctrl := gomock.NewController(t)
 | 
			
		||||
	defer ctrl.Finish()
 | 
			
		||||
 | 
			
		||||
	tests := []struct {
 | 
			
		||||
		clientErr   *retry.Error
 | 
			
		||||
		expectedErr error
 | 
			
		||||
	}{
 | 
			
		||||
		{
 | 
			
		||||
			clientErr:   &retry.Error{HTTPStatusCode: http.StatusPreconditionFailed},
 | 
			
		||||
			expectedErr: fmt.Errorf("Retriable: false, RetryAfter: 0s, HTTPStatusCode: 412, RawError: %w", error(nil)),
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			clientErr:   &retry.Error{RawError: fmt.Errorf("canceledandsupersededduetoanotheroperation")},
 | 
			
		||||
			expectedErr: fmt.Errorf("Retriable: false, RetryAfter: 0s, HTTPStatusCode: 0, RawError: %w", fmt.Errorf("canceledandsupersededduetoanotheroperation")),
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	for _, test := range tests {
 | 
			
		||||
		az := GetTestCloud(ctrl)
 | 
			
		||||
		az.rtCache.Set("rt", "test")
 | 
			
		||||
 | 
			
		||||
		mockRTClient := az.RouteTablesClient.(*mockroutetableclient.MockInterface)
 | 
			
		||||
		mockRTClient.EXPECT().CreateOrUpdate(gomock.Any(), az.ResourceGroup, gomock.Any(), gomock.Any(), gomock.Any()).Return(test.clientErr)
 | 
			
		||||
		mockRTClient.EXPECT().Get(gomock.Any(), az.ResourceGroup, "rt", gomock.Any()).Return(network.RouteTable{}, nil)
 | 
			
		||||
 | 
			
		||||
		err := az.CreateOrUpdateRouteTable(network.RouteTable{
 | 
			
		||||
			Name: pointer.String("rt"),
 | 
			
		||||
			Etag: pointer.String("etag"),
 | 
			
		||||
		})
 | 
			
		||||
		assert.Equal(t, test.expectedErr, err)
 | 
			
		||||
 | 
			
		||||
		// route table should be removed from cache if the etag is mismatch or the operation is canceled
 | 
			
		||||
		shouldBeEmpty, err := az.rtCache.Get("rt", cache.CacheReadTypeDefault)
 | 
			
		||||
		assert.NoError(t, err)
 | 
			
		||||
		assert.Empty(t, shouldBeEmpty)
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestCreateOrUpdateRoute(t *testing.T) {
 | 
			
		||||
	ctrl := gomock.NewController(t)
 | 
			
		||||
	defer ctrl.Finish()
 | 
			
		||||
 | 
			
		||||
	tests := []struct {
 | 
			
		||||
		clientErr   *retry.Error
 | 
			
		||||
		expectedErr error
 | 
			
		||||
	}{
 | 
			
		||||
		{
 | 
			
		||||
			clientErr:   &retry.Error{HTTPStatusCode: http.StatusPreconditionFailed},
 | 
			
		||||
			expectedErr: fmt.Errorf("Retriable: false, RetryAfter: 0s, HTTPStatusCode: 412, RawError: %w", error(nil)),
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			clientErr:   &retry.Error{RawError: fmt.Errorf("canceledandsupersededduetoanotheroperation")},
 | 
			
		||||
			expectedErr: fmt.Errorf("Retriable: false, RetryAfter: 0s, HTTPStatusCode: 0, RawError: %w", fmt.Errorf("canceledandsupersededduetoanotheroperation")),
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			clientErr:   nil,
 | 
			
		||||
			expectedErr: nil,
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	for _, test := range tests {
 | 
			
		||||
		az := GetTestCloud(ctrl)
 | 
			
		||||
		az.rtCache.Set("rt", "test")
 | 
			
		||||
 | 
			
		||||
		mockRTClient := az.RoutesClient.(*mockrouteclient.MockInterface)
 | 
			
		||||
		mockRTClient.EXPECT().CreateOrUpdate(gomock.Any(), az.ResourceGroup, "rt", gomock.Any(), gomock.Any(), gomock.Any()).Return(test.clientErr)
 | 
			
		||||
 | 
			
		||||
		mockRTableClient := az.RouteTablesClient.(*mockroutetableclient.MockInterface)
 | 
			
		||||
		mockRTableClient.EXPECT().Get(gomock.Any(), az.ResourceGroup, "rt", gomock.Any()).Return(network.RouteTable{}, nil)
 | 
			
		||||
 | 
			
		||||
		err := az.CreateOrUpdateRoute(network.Route{
 | 
			
		||||
			Name: pointer.String("rt"),
 | 
			
		||||
			Etag: pointer.String("etag"),
 | 
			
		||||
		})
 | 
			
		||||
		assert.Equal(t, test.expectedErr, err)
 | 
			
		||||
 | 
			
		||||
		shouldBeEmpty, err := az.rtCache.Get("rt", cache.CacheReadTypeDefault)
 | 
			
		||||
		assert.NoError(t, err)
 | 
			
		||||
		assert.Empty(t, shouldBeEmpty)
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestDeleteRouteWithName(t *testing.T) {
 | 
			
		||||
	ctrl := gomock.NewController(t)
 | 
			
		||||
	defer ctrl.Finish()
 | 
			
		||||
 | 
			
		||||
	tests := []struct {
 | 
			
		||||
		clientErr   *retry.Error
 | 
			
		||||
		expectedErr error
 | 
			
		||||
	}{
 | 
			
		||||
		{
 | 
			
		||||
			clientErr:   &retry.Error{HTTPStatusCode: http.StatusInternalServerError},
 | 
			
		||||
			expectedErr: fmt.Errorf("Retriable: false, RetryAfter: 0s, HTTPStatusCode: 500, RawError: %w", error(nil)),
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			clientErr:   nil,
 | 
			
		||||
			expectedErr: nil,
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	for _, test := range tests {
 | 
			
		||||
		az := GetTestCloud(ctrl)
 | 
			
		||||
 | 
			
		||||
		mockRTClient := az.RoutesClient.(*mockrouteclient.MockInterface)
 | 
			
		||||
		mockRTClient.EXPECT().Delete(gomock.Any(), az.ResourceGroup, "rt", "rt").Return(test.clientErr)
 | 
			
		||||
 | 
			
		||||
		err := az.DeleteRouteWithName("rt")
 | 
			
		||||
		assert.Equal(t, test.expectedErr, err)
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestCreateOrUpdateVMSS(t *testing.T) {
 | 
			
		||||
	ctrl := gomock.NewController(t)
 | 
			
		||||
	defer ctrl.Finish()
 | 
			
		||||
 | 
			
		||||
	tests := []struct {
 | 
			
		||||
		vmss        compute.VirtualMachineScaleSet
 | 
			
		||||
		clientErr   *retry.Error
 | 
			
		||||
		expectedErr *retry.Error
 | 
			
		||||
	}{
 | 
			
		||||
		{
 | 
			
		||||
			clientErr:   &retry.Error{HTTPStatusCode: http.StatusInternalServerError},
 | 
			
		||||
			expectedErr: &retry.Error{HTTPStatusCode: http.StatusInternalServerError},
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			clientErr:   &retry.Error{HTTPStatusCode: http.StatusTooManyRequests},
 | 
			
		||||
			expectedErr: &retry.Error{HTTPStatusCode: http.StatusTooManyRequests},
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			clientErr:   &retry.Error{RawError: fmt.Errorf("azure cloud provider rate limited(write) for operation CreateOrUpdate")},
 | 
			
		||||
			expectedErr: &retry.Error{RawError: fmt.Errorf("azure cloud provider rate limited(write) for operation CreateOrUpdate")},
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			vmss: compute.VirtualMachineScaleSet{
 | 
			
		||||
				VirtualMachineScaleSetProperties: &compute.VirtualMachineScaleSetProperties{
 | 
			
		||||
					ProvisioningState: &virtualMachineScaleSetsDeallocating,
 | 
			
		||||
				},
 | 
			
		||||
			},
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	for _, test := range tests {
 | 
			
		||||
		az := GetTestCloud(ctrl)
 | 
			
		||||
 | 
			
		||||
		mockVMSSClient := az.VirtualMachineScaleSetsClient.(*mockvmssclient.MockInterface)
 | 
			
		||||
		mockVMSSClient.EXPECT().Get(gomock.Any(), az.ResourceGroup, testVMSSName).Return(test.vmss, test.clientErr)
 | 
			
		||||
 | 
			
		||||
		err := az.CreateOrUpdateVMSS(az.ResourceGroup, testVMSSName, compute.VirtualMachineScaleSet{})
 | 
			
		||||
		assert.Equal(t, test.expectedErr, err)
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestRequestBackoff(t *testing.T) {
 | 
			
		||||
	ctrl := gomock.NewController(t)
 | 
			
		||||
	defer ctrl.Finish()
 | 
			
		||||
 | 
			
		||||
	az := GetTestCloud(ctrl)
 | 
			
		||||
	az.CloudProviderBackoff = true
 | 
			
		||||
	az.ResourceRequestBackoff = wait.Backoff{Steps: 3}
 | 
			
		||||
 | 
			
		||||
	backoff := az.RequestBackoff()
 | 
			
		||||
	assert.Equal(t, wait.Backoff{Steps: 3}, backoff)
 | 
			
		||||
 | 
			
		||||
}
 | 
			
		||||
@@ -1,648 +0,0 @@
 | 
			
		||||
//go:build !providerless
 | 
			
		||||
// +build !providerless
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
Copyright 2017 The Kubernetes Authors.
 | 
			
		||||
 | 
			
		||||
Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
you may not use this file except in compliance with the License.
 | 
			
		||||
You may obtain a copy of the License at
 | 
			
		||||
 | 
			
		||||
    http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
 | 
			
		||||
Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
See the License for the specific language governing permissions and
 | 
			
		||||
limitations under the License.
 | 
			
		||||
*/
 | 
			
		||||
 | 
			
		||||
package azure
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"bytes"
 | 
			
		||||
	"context"
 | 
			
		||||
	"encoding/binary"
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"net/url"
 | 
			
		||||
	"regexp"
 | 
			
		||||
	"strings"
 | 
			
		||||
	"sync"
 | 
			
		||||
	"sync/atomic"
 | 
			
		||||
	"time"
 | 
			
		||||
 | 
			
		||||
	"github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-06-01/storage"
 | 
			
		||||
	azstorage "github.com/Azure/azure-sdk-for-go/storage"
 | 
			
		||||
	"github.com/rubiojr/go-vhd/vhd"
 | 
			
		||||
 | 
			
		||||
	kwait "k8s.io/apimachinery/pkg/util/wait"
 | 
			
		||||
	volerr "k8s.io/cloud-provider/volume/errors"
 | 
			
		||||
	"k8s.io/klog/v2"
 | 
			
		||||
	"k8s.io/utils/pointer"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// Attention: blob disk feature is deprecated
 | 
			
		||||
const (
 | 
			
		||||
	vhdContainerName         = "vhds"
 | 
			
		||||
	useHTTPSForBlobBasedDisk = true
 | 
			
		||||
	blobServiceName          = "blob"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
type storageAccountState struct {
 | 
			
		||||
	name                    string
 | 
			
		||||
	saType                  storage.SkuName
 | 
			
		||||
	key                     string
 | 
			
		||||
	diskCount               int32
 | 
			
		||||
	isValidating            int32
 | 
			
		||||
	defaultContainerCreated bool
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// BlobDiskController : blob disk controller struct
 | 
			
		||||
type BlobDiskController struct {
 | 
			
		||||
	common   *controllerCommon
 | 
			
		||||
	accounts map[string]*storageAccountState
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
var (
 | 
			
		||||
	accountsLock = &sync.Mutex{}
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
func (c *BlobDiskController) initStorageAccounts() {
 | 
			
		||||
	accountsLock.Lock()
 | 
			
		||||
	defer accountsLock.Unlock()
 | 
			
		||||
 | 
			
		||||
	if c.accounts == nil {
 | 
			
		||||
		// get accounts
 | 
			
		||||
		accounts, err := c.getAllStorageAccounts()
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			klog.Errorf("azureDisk - getAllStorageAccounts error: %v", err)
 | 
			
		||||
			c.accounts = make(map[string]*storageAccountState)
 | 
			
		||||
		}
 | 
			
		||||
		c.accounts = accounts
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// CreateVolume creates a VHD blob in a storage account that has storageType and location using the given storage account.
 | 
			
		||||
// If no storage account is given, search all the storage accounts associated with the resource group and pick one that
 | 
			
		||||
// fits storage type and location.
 | 
			
		||||
func (c *BlobDiskController) CreateVolume(blobName, accountName, accountType, location string, requestGB int) (string, string, int, error) {
 | 
			
		||||
	accountOptions := &AccountOptions{
 | 
			
		||||
		Name:                   accountName,
 | 
			
		||||
		Type:                   accountType,
 | 
			
		||||
		Kind:                   string(defaultStorageAccountKind),
 | 
			
		||||
		ResourceGroup:          c.common.resourceGroup,
 | 
			
		||||
		Location:               location,
 | 
			
		||||
		EnableHTTPSTrafficOnly: true,
 | 
			
		||||
	}
 | 
			
		||||
	account, key, err := c.common.cloud.EnsureStorageAccount(accountOptions, dedicatedDiskAccountNamePrefix)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return "", "", 0, fmt.Errorf("could not get storage key for storage account %s: %v", accountName, err)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	client, err := azstorage.NewBasicClientOnSovereignCloud(account, key, c.common.cloud.Environment)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return "", "", 0, err
 | 
			
		||||
	}
 | 
			
		||||
	blobClient := client.GetBlobService()
 | 
			
		||||
 | 
			
		||||
	// create a page blob in this account's vhd container
 | 
			
		||||
	diskName, diskURI, err := c.createVHDBlobDisk(blobClient, account, blobName, vhdContainerName, int64(requestGB))
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return "", "", 0, err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	klog.V(4).Infof("azureDisk - created vhd blob uri: %s", diskURI)
 | 
			
		||||
	return diskName, diskURI, requestGB, err
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// DeleteVolume deletes a VHD blob
 | 
			
		||||
func (c *BlobDiskController) DeleteVolume(diskURI string) error {
 | 
			
		||||
	klog.V(4).Infof("azureDisk - begin to delete volume %s", diskURI)
 | 
			
		||||
	accountName, blob, err := c.common.cloud.getBlobNameAndAccountFromURI(diskURI)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return fmt.Errorf("failed to parse vhd URI %v", err)
 | 
			
		||||
	}
 | 
			
		||||
	key, err := c.common.cloud.GetStorageAccesskey(accountName, c.common.resourceGroup)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return fmt.Errorf("no key for storage account %s, err %v", accountName, err)
 | 
			
		||||
	}
 | 
			
		||||
	err = c.common.cloud.deleteVhdBlob(accountName, key, blob)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		klog.Warningf("azureDisk - failed to delete blob %s err: %v", diskURI, err)
 | 
			
		||||
		detail := err.Error()
 | 
			
		||||
		if strings.Contains(detail, errLeaseIDMissing) {
 | 
			
		||||
			// disk is still being used
 | 
			
		||||
			// see https://msdn.microsoft.com/en-us/library/microsoft.windowsazure.storage.blob.protocol.bloberrorcodestrings.leaseidmissing.aspx
 | 
			
		||||
			return volerr.NewDeletedVolumeInUseError(fmt.Sprintf("disk %q is still in use while being deleted", diskURI))
 | 
			
		||||
		}
 | 
			
		||||
		return fmt.Errorf("failed to delete vhd %v, account %s, blob %s, err: %v", diskURI, accountName, blob, err)
 | 
			
		||||
	}
 | 
			
		||||
	klog.V(4).Infof("azureDisk - blob %s deleted", diskURI)
 | 
			
		||||
	return nil
 | 
			
		||||
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// get diskURI https://foo.blob.core.windows.net/vhds/bar.vhd and return foo (account) and bar.vhd (blob name)
 | 
			
		||||
func (c *BlobDiskController) getBlobNameAndAccountFromURI(diskURI string) (string, string, error) {
 | 
			
		||||
	scheme := "http"
 | 
			
		||||
	if useHTTPSForBlobBasedDisk {
 | 
			
		||||
		scheme = "https"
 | 
			
		||||
	}
 | 
			
		||||
	host := fmt.Sprintf("%s://(.*).%s.%s", scheme, blobServiceName, c.common.storageEndpointSuffix)
 | 
			
		||||
	reStr := fmt.Sprintf("%s/%s/(.*)", host, vhdContainerName)
 | 
			
		||||
	re := regexp.MustCompile(reStr)
 | 
			
		||||
	res := re.FindSubmatch([]byte(diskURI))
 | 
			
		||||
	if len(res) < 3 {
 | 
			
		||||
		return "", "", fmt.Errorf("invalid vhd URI for regex %s: %s", reStr, diskURI)
 | 
			
		||||
	}
 | 
			
		||||
	return string(res[1]), string(res[2]), nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (c *BlobDiskController) createVHDBlobDisk(blobClient azstorage.BlobStorageClient, accountName, vhdName, containerName string, sizeGB int64) (string, string, error) {
 | 
			
		||||
	container := blobClient.GetContainerReference(containerName)
 | 
			
		||||
	size := 1024 * 1024 * 1024 * sizeGB
 | 
			
		||||
	vhdSize := size + vhd.VHD_HEADER_SIZE /* header size */
 | 
			
		||||
	// Blob name in URL must end with '.vhd' extension.
 | 
			
		||||
	vhdName = vhdName + ".vhd"
 | 
			
		||||
 | 
			
		||||
	tags := make(map[string]string)
 | 
			
		||||
	tags["createdby"] = "k8sAzureDataDisk"
 | 
			
		||||
	klog.V(4).Infof("azureDisk - creating page blob %s in container %s account %s", vhdName, containerName, accountName)
 | 
			
		||||
 | 
			
		||||
	blob := container.GetBlobReference(vhdName)
 | 
			
		||||
	blob.Properties.ContentLength = vhdSize
 | 
			
		||||
	blob.Metadata = tags
 | 
			
		||||
	err := blob.PutPageBlob(nil)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		// if container doesn't exist, create one and retry PutPageBlob
 | 
			
		||||
		detail := err.Error()
 | 
			
		||||
		if strings.Contains(detail, errContainerNotFound) {
 | 
			
		||||
			err = container.Create(&azstorage.CreateContainerOptions{Access: azstorage.ContainerAccessTypePrivate})
 | 
			
		||||
			if err == nil {
 | 
			
		||||
				err = blob.PutPageBlob(nil)
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return "", "", fmt.Errorf("failed to put page blob %s in container %s: %v", vhdName, containerName, err)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// add VHD signature to the blob
 | 
			
		||||
	h, err := createVHDHeader(uint64(size))
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		blob.DeleteIfExists(nil)
 | 
			
		||||
		return "", "", fmt.Errorf("failed to create vhd header, err: %v", err)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	blobRange := azstorage.BlobRange{
 | 
			
		||||
		Start: uint64(size),
 | 
			
		||||
		End:   uint64(vhdSize - 1),
 | 
			
		||||
	}
 | 
			
		||||
	if err = blob.WriteRange(blobRange, bytes.NewBuffer(h[:vhd.VHD_HEADER_SIZE]), nil); err != nil {
 | 
			
		||||
		klog.Infof("azureDisk - failed to put header page for data disk %s in container %s account %s, error was %s\n",
 | 
			
		||||
			vhdName, containerName, accountName, err.Error())
 | 
			
		||||
		return "", "", err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	scheme := "http"
 | 
			
		||||
	if useHTTPSForBlobBasedDisk {
 | 
			
		||||
		scheme = "https"
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	host := fmt.Sprintf("%s://%s.%s.%s", scheme, accountName, blobServiceName, c.common.storageEndpointSuffix)
 | 
			
		||||
	uri := fmt.Sprintf("%s/%s/%s", host, containerName, vhdName)
 | 
			
		||||
	return vhdName, uri, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// delete a vhd blob
 | 
			
		||||
func (c *BlobDiskController) deleteVhdBlob(accountName, accountKey, blobName string) error {
 | 
			
		||||
	client, err := azstorage.NewBasicClientOnSovereignCloud(accountName, accountKey, c.common.cloud.Environment)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return err
 | 
			
		||||
	}
 | 
			
		||||
	blobSvc := client.GetBlobService()
 | 
			
		||||
 | 
			
		||||
	container := blobSvc.GetContainerReference(vhdContainerName)
 | 
			
		||||
	blob := container.GetBlobReference(blobName)
 | 
			
		||||
	return blob.Delete(nil)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// CreateBlobDisk : create a blob disk in a node
 | 
			
		||||
func (c *BlobDiskController) CreateBlobDisk(dataDiskName string, storageAccountType storage.SkuName, sizeGB int) (string, error) {
 | 
			
		||||
	klog.V(4).Infof("azureDisk - creating blob data disk named:%s on StorageAccountType:%s", dataDiskName, storageAccountType)
 | 
			
		||||
 | 
			
		||||
	c.initStorageAccounts()
 | 
			
		||||
 | 
			
		||||
	storageAccountName, err := c.findSANameForDisk(storageAccountType)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return "", err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	blobClient, err := c.getBlobSvcClient(storageAccountName)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return "", err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	_, diskURI, err := c.createVHDBlobDisk(blobClient, storageAccountName, dataDiskName, vhdContainerName, int64(sizeGB))
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return "", err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	atomic.AddInt32(&c.accounts[storageAccountName].diskCount, 1)
 | 
			
		||||
 | 
			
		||||
	return diskURI, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// DeleteBlobDisk : delete a blob disk from a node
 | 
			
		||||
func (c *BlobDiskController) DeleteBlobDisk(diskURI string) error {
 | 
			
		||||
	storageAccountName, vhdName, err := diskNameAndSANameFromURI(diskURI)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	_, ok := c.accounts[storageAccountName]
 | 
			
		||||
	if !ok {
 | 
			
		||||
		// the storage account is specified by user
 | 
			
		||||
		klog.V(4).Infof("azureDisk - deleting volume %s", diskURI)
 | 
			
		||||
		return c.DeleteVolume(diskURI)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	blobSvc, err := c.getBlobSvcClient(storageAccountName)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	klog.V(4).Infof("azureDisk - About to delete vhd file %s on storage account %s container %s", vhdName, storageAccountName, vhdContainerName)
 | 
			
		||||
 | 
			
		||||
	container := blobSvc.GetContainerReference(vhdContainerName)
 | 
			
		||||
	blob := container.GetBlobReference(vhdName)
 | 
			
		||||
	_, err = blob.DeleteIfExists(nil)
 | 
			
		||||
 | 
			
		||||
	if c.accounts[storageAccountName].diskCount == -1 {
 | 
			
		||||
		if diskCount, err := c.getDiskCount(storageAccountName); err != nil {
 | 
			
		||||
			c.accounts[storageAccountName].diskCount = int32(diskCount)
 | 
			
		||||
		} else {
 | 
			
		||||
			klog.Warningf("azureDisk - failed to get disk count for %s however the delete disk operation was ok", storageAccountName)
 | 
			
		||||
			return nil // we have failed to acquire a new count. not an error condition
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	atomic.AddInt32(&c.accounts[storageAccountName].diskCount, -1)
 | 
			
		||||
	return err
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (c *BlobDiskController) getStorageAccountKey(SAName string) (string, error) {
 | 
			
		||||
	if account, exists := c.accounts[SAName]; exists && account.key != "" {
 | 
			
		||||
		return c.accounts[SAName].key, nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	ctx, cancel := getContextWithCancel()
 | 
			
		||||
	defer cancel()
 | 
			
		||||
	listKeysResult, rerr := c.common.cloud.StorageAccountClient.ListKeys(ctx, c.common.resourceGroup, SAName)
 | 
			
		||||
	if rerr != nil {
 | 
			
		||||
		return "", rerr.Error()
 | 
			
		||||
	}
 | 
			
		||||
	if listKeysResult.Keys == nil {
 | 
			
		||||
		return "", fmt.Errorf("azureDisk - empty listKeysResult in storage account:%s keys", SAName)
 | 
			
		||||
	}
 | 
			
		||||
	for _, v := range *listKeysResult.Keys {
 | 
			
		||||
		if v.Value != nil && *v.Value == "key1" {
 | 
			
		||||
			if _, ok := c.accounts[SAName]; !ok {
 | 
			
		||||
				klog.Warningf("azureDisk - account %s was not cached while getting keys", SAName)
 | 
			
		||||
				return *v.Value, nil
 | 
			
		||||
			}
 | 
			
		||||
			c.accounts[SAName].key = *v.Value
 | 
			
		||||
			return c.accounts[SAName].key, nil
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return "", fmt.Errorf("couldn't find key named key1 in storage account:%s keys", SAName)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (c *BlobDiskController) getBlobSvcClient(SAName string) (azstorage.BlobStorageClient, error) {
 | 
			
		||||
	key := ""
 | 
			
		||||
	var client azstorage.Client
 | 
			
		||||
	var blobSvc azstorage.BlobStorageClient
 | 
			
		||||
	var err error
 | 
			
		||||
	if key, err = c.getStorageAccountKey(SAName); err != nil {
 | 
			
		||||
		return blobSvc, err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if client, err = azstorage.NewBasicClientOnSovereignCloud(SAName, key, c.common.cloud.Environment); err != nil {
 | 
			
		||||
		return blobSvc, err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	blobSvc = client.GetBlobService()
 | 
			
		||||
	return blobSvc, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (c *BlobDiskController) ensureDefaultContainer(storageAccountName string) error {
 | 
			
		||||
	var err error
 | 
			
		||||
	var blobSvc azstorage.BlobStorageClient
 | 
			
		||||
 | 
			
		||||
	// short circuit the check via local cache
 | 
			
		||||
	// we are forgiving the fact that account may not be in cache yet
 | 
			
		||||
	if v, ok := c.accounts[storageAccountName]; ok && v.defaultContainerCreated {
 | 
			
		||||
		return nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// not cached, check existence and readiness
 | 
			
		||||
	bExist, provisionState, _ := c.getStorageAccountState(storageAccountName)
 | 
			
		||||
 | 
			
		||||
	// account does not exist
 | 
			
		||||
	if !bExist {
 | 
			
		||||
		return fmt.Errorf("azureDisk - account %s does not exist while trying to create/ensure default container", storageAccountName)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// account exists but not ready yet
 | 
			
		||||
	if provisionState != storage.Succeeded {
 | 
			
		||||
		// we don't want many attempts to validate the account readiness
 | 
			
		||||
		// here hence we are locking
 | 
			
		||||
		counter := 1
 | 
			
		||||
		for swapped := atomic.CompareAndSwapInt32(&c.accounts[storageAccountName].isValidating, 0, 1); swapped != true; {
 | 
			
		||||
			time.Sleep(3 * time.Second)
 | 
			
		||||
			counter = counter + 1
 | 
			
		||||
			// check if we passed the max sleep
 | 
			
		||||
			if counter >= 20 {
 | 
			
		||||
				return fmt.Errorf("azureDisk - timeout waiting to acquire lock to validate account:%s readiness", storageAccountName)
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		// swapped
 | 
			
		||||
		defer func() {
 | 
			
		||||
			c.accounts[storageAccountName].isValidating = 0
 | 
			
		||||
		}()
 | 
			
		||||
 | 
			
		||||
		// short circuit the check again.
 | 
			
		||||
		if v, ok := c.accounts[storageAccountName]; ok && v.defaultContainerCreated {
 | 
			
		||||
			return nil
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		err = kwait.ExponentialBackoff(defaultBackOff, func() (bool, error) {
 | 
			
		||||
			_, provisionState, err := c.getStorageAccountState(storageAccountName)
 | 
			
		||||
 | 
			
		||||
			if err != nil {
 | 
			
		||||
				klog.V(4).Infof("azureDisk - GetStorageAccount:%s err %s", storageAccountName, err.Error())
 | 
			
		||||
				return false, nil // error performing the query - retryable
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			if provisionState == storage.Succeeded {
 | 
			
		||||
				return true, nil
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			klog.V(4).Infof("azureDisk - GetStorageAccount:%s not ready yet (not flagged Succeeded by ARM)", storageAccountName)
 | 
			
		||||
			return false, nil // back off and see if the account becomes ready on next retry
 | 
			
		||||
		})
 | 
			
		||||
		// we have failed to ensure that account is ready for us to create
 | 
			
		||||
		// the default vhd container
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			if err == kwait.ErrWaitTimeout {
 | 
			
		||||
				return fmt.Errorf("azureDisk - timed out waiting for storage account %s to become ready", storageAccountName)
 | 
			
		||||
			}
 | 
			
		||||
			return err
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if blobSvc, err = c.getBlobSvcClient(storageAccountName); err != nil {
 | 
			
		||||
		return err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	container := blobSvc.GetContainerReference(vhdContainerName)
 | 
			
		||||
	bCreated, err := container.CreateIfNotExists(&azstorage.CreateContainerOptions{Access: azstorage.ContainerAccessTypePrivate})
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return err
 | 
			
		||||
	}
 | 
			
		||||
	if bCreated {
 | 
			
		||||
		klog.V(2).Infof("azureDisk - storage account:%s had no default container(%s) and it was created \n", storageAccountName, vhdContainerName)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// flag so we no longer have to check on ARM
 | 
			
		||||
	c.accounts[storageAccountName].defaultContainerCreated = true
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Gets Disk counts per storage account
 | 
			
		||||
func (c *BlobDiskController) getDiskCount(SAName string) (int, error) {
 | 
			
		||||
	// if we have it in cache
 | 
			
		||||
	if c.accounts[SAName].diskCount != -1 {
 | 
			
		||||
		return int(c.accounts[SAName].diskCount), nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	var err error
 | 
			
		||||
	var blobSvc azstorage.BlobStorageClient
 | 
			
		||||
 | 
			
		||||
	if err = c.ensureDefaultContainer(SAName); err != nil {
 | 
			
		||||
		return 0, err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if blobSvc, err = c.getBlobSvcClient(SAName); err != nil {
 | 
			
		||||
		return 0, err
 | 
			
		||||
	}
 | 
			
		||||
	params := azstorage.ListBlobsParameters{}
 | 
			
		||||
 | 
			
		||||
	container := blobSvc.GetContainerReference(vhdContainerName)
 | 
			
		||||
	response, err := container.ListBlobs(params)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return 0, err
 | 
			
		||||
	}
 | 
			
		||||
	klog.V(4).Infof("azure-Disk -  refreshed data count for account %s and found %v", SAName, len(response.Blobs))
 | 
			
		||||
	c.accounts[SAName].diskCount = int32(len(response.Blobs))
 | 
			
		||||
 | 
			
		||||
	return int(c.accounts[SAName].diskCount), nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (c *BlobDiskController) getAllStorageAccounts() (map[string]*storageAccountState, error) {
 | 
			
		||||
	ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
 | 
			
		||||
	defer cancel()
 | 
			
		||||
	accountList, rerr := c.common.cloud.StorageAccountClient.ListByResourceGroup(ctx, c.common.resourceGroup)
 | 
			
		||||
	if rerr != nil {
 | 
			
		||||
		return nil, rerr.Error()
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	accounts := make(map[string]*storageAccountState)
 | 
			
		||||
	for _, v := range accountList {
 | 
			
		||||
		if v.Name == nil || v.Sku == nil {
 | 
			
		||||
			klog.Info("azureDisk - accountListResult Name or Sku is nil")
 | 
			
		||||
			continue
 | 
			
		||||
		}
 | 
			
		||||
		if !strings.HasPrefix(*v.Name, sharedDiskAccountNamePrefix) {
 | 
			
		||||
			continue
 | 
			
		||||
		}
 | 
			
		||||
		klog.Infof("azureDisk - identified account %s as part of shared PVC accounts", *v.Name)
 | 
			
		||||
 | 
			
		||||
		saState := &storageAccountState{
 | 
			
		||||
			name:      *v.Name,
 | 
			
		||||
			saType:    (*v.Sku).Name,
 | 
			
		||||
			diskCount: -1,
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		accounts[*v.Name] = saState
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return accounts, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (c *BlobDiskController) createStorageAccount(storageAccountName string, storageAccountType storage.SkuName, location string, checkMaxAccounts bool) error {
 | 
			
		||||
	bExist, _, _ := c.getStorageAccountState(storageAccountName)
 | 
			
		||||
	if bExist {
 | 
			
		||||
		newAccountState := &storageAccountState{
 | 
			
		||||
			diskCount: -1,
 | 
			
		||||
			saType:    storageAccountType,
 | 
			
		||||
			name:      storageAccountName,
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		c.addAccountState(storageAccountName, newAccountState)
 | 
			
		||||
	}
 | 
			
		||||
	// Account Does not exist
 | 
			
		||||
	if !bExist {
 | 
			
		||||
		if len(c.accounts) == maxStorageAccounts && checkMaxAccounts {
 | 
			
		||||
			return fmt.Errorf("azureDisk - can not create new storage account, current storage accounts count:%v Max is:%v", len(c.accounts), maxStorageAccounts)
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		klog.V(2).Infof("azureDisk - Creating storage account %s type %s", storageAccountName, string(storageAccountType))
 | 
			
		||||
 | 
			
		||||
		cp := storage.AccountCreateParameters{
 | 
			
		||||
			Sku: &storage.Sku{Name: storageAccountType},
 | 
			
		||||
			// switch to use StorageV2 as it's recommended according to https://docs.microsoft.com/en-us/azure/storage/common/storage-account-options
 | 
			
		||||
			Kind:     defaultStorageAccountKind,
 | 
			
		||||
			Tags:     map[string]*string{"created-by": pointer.String("azure-dd")},
 | 
			
		||||
			Location: &location}
 | 
			
		||||
		ctx, cancel := getContextWithCancel()
 | 
			
		||||
		defer cancel()
 | 
			
		||||
 | 
			
		||||
		err := c.common.cloud.StorageAccountClient.Create(ctx, c.common.resourceGroup, storageAccountName, cp)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			return fmt.Errorf("Create Storage Account: %s, error: %v", storageAccountName, err)
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		newAccountState := &storageAccountState{
 | 
			
		||||
			diskCount: -1,
 | 
			
		||||
			saType:    storageAccountType,
 | 
			
		||||
			name:      storageAccountName,
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		c.addAccountState(storageAccountName, newAccountState)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// finally, make sure that we default container is created
 | 
			
		||||
	// before handing it back over
 | 
			
		||||
	return c.ensureDefaultContainer(storageAccountName)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// finds a new suitable storageAccount for this disk
 | 
			
		||||
func (c *BlobDiskController) findSANameForDisk(storageAccountType storage.SkuName) (string, error) {
 | 
			
		||||
	maxDiskCount := maxDisksPerStorageAccounts
 | 
			
		||||
	SAName := ""
 | 
			
		||||
	totalDiskCounts := 0
 | 
			
		||||
	countAccounts := 0 // account of this type.
 | 
			
		||||
	for _, v := range c.accounts {
 | 
			
		||||
		// filter out any stand-alone disks/accounts
 | 
			
		||||
		if !strings.HasPrefix(v.name, sharedDiskAccountNamePrefix) {
 | 
			
		||||
			continue
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		// note: we compute avg stratified by type.
 | 
			
		||||
		// this is to enable user to grow per SA type to avoid low
 | 
			
		||||
		// avg utilization on one account type skewing all data.
 | 
			
		||||
 | 
			
		||||
		if v.saType == storageAccountType {
 | 
			
		||||
			// compute average
 | 
			
		||||
			dCount, err := c.getDiskCount(v.name)
 | 
			
		||||
			if err != nil {
 | 
			
		||||
				return "", err
 | 
			
		||||
			}
 | 
			
		||||
			totalDiskCounts = totalDiskCounts + dCount
 | 
			
		||||
			countAccounts = countAccounts + 1
 | 
			
		||||
			// empty account
 | 
			
		||||
			if dCount == 0 {
 | 
			
		||||
				klog.V(2).Infof("azureDisk - account %s identified for a new disk  is because it has 0 allocated disks", v.name)
 | 
			
		||||
				return v.name, nil // short circuit, avg is good and no need to adjust
 | 
			
		||||
			}
 | 
			
		||||
			// if this account is less allocated
 | 
			
		||||
			if dCount < maxDiskCount {
 | 
			
		||||
				maxDiskCount = dCount
 | 
			
		||||
				SAName = v.name
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// if we failed to find storageaccount
 | 
			
		||||
	if SAName == "" {
 | 
			
		||||
		klog.V(2).Infof("azureDisk - failed to identify a suitable account for new disk and will attempt to create new account")
 | 
			
		||||
		SAName = generateStorageAccountName(sharedDiskAccountNamePrefix)
 | 
			
		||||
		err := c.createStorageAccount(SAName, storageAccountType, c.common.location, true)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			return "", err
 | 
			
		||||
		}
 | 
			
		||||
		return SAName, nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	disksAfter := totalDiskCounts + 1 // with the new one!
 | 
			
		||||
 | 
			
		||||
	avgUtilization := float64(disksAfter) / float64(countAccounts*maxDisksPerStorageAccounts)
 | 
			
		||||
	aboveAvg := avgUtilization > storageAccountUtilizationBeforeGrowing
 | 
			
		||||
 | 
			
		||||
	// avg are not create and we should create more accounts if we can
 | 
			
		||||
	if aboveAvg && countAccounts < maxStorageAccounts {
 | 
			
		||||
		klog.V(2).Infof("azureDisk - shared storageAccounts utilization(%v) >  grow-at-avg-utilization (%v). New storage account will be created", avgUtilization, storageAccountUtilizationBeforeGrowing)
 | 
			
		||||
		SAName = generateStorageAccountName(sharedDiskAccountNamePrefix)
 | 
			
		||||
		err := c.createStorageAccount(SAName, storageAccountType, c.common.location, true)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			return "", err
 | 
			
		||||
		}
 | 
			
		||||
		return SAName, nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// averages are not ok and we are at capacity (max storage accounts allowed)
 | 
			
		||||
	if aboveAvg && countAccounts == maxStorageAccounts {
 | 
			
		||||
		klog.Infof("azureDisk - shared storageAccounts utilization(%v) > grow-at-avg-utilization (%v). But k8s maxed on SAs for PVC(%v). k8s will now exceed grow-at-avg-utilization without adding accounts",
 | 
			
		||||
			avgUtilization, storageAccountUtilizationBeforeGrowing, maxStorageAccounts)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// we found a  storage accounts && [ avg are ok || we reached max sa count ]
 | 
			
		||||
	return SAName, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Gets storage account exist, provisionStatus, Error if any
 | 
			
		||||
func (c *BlobDiskController) getStorageAccountState(storageAccountName string) (bool, storage.ProvisioningState, error) {
 | 
			
		||||
	ctx, cancel := getContextWithCancel()
 | 
			
		||||
	defer cancel()
 | 
			
		||||
	account, rerr := c.common.cloud.StorageAccountClient.GetProperties(ctx, c.common.resourceGroup, storageAccountName)
 | 
			
		||||
	if rerr != nil {
 | 
			
		||||
		return false, "", rerr.Error()
 | 
			
		||||
	}
 | 
			
		||||
	return true, account.AccountProperties.ProvisioningState, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (c *BlobDiskController) addAccountState(key string, state *storageAccountState) {
 | 
			
		||||
	accountsLock.Lock()
 | 
			
		||||
	defer accountsLock.Unlock()
 | 
			
		||||
 | 
			
		||||
	if _, ok := c.accounts[key]; !ok {
 | 
			
		||||
		c.accounts[key] = state
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func createVHDHeader(size uint64) ([]byte, error) {
 | 
			
		||||
	h := vhd.CreateFixedHeader(size, &vhd.VHDOptions{})
 | 
			
		||||
	b := new(bytes.Buffer)
 | 
			
		||||
	err := binary.Write(b, binary.BigEndian, h)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return nil, err
 | 
			
		||||
	}
 | 
			
		||||
	return b.Bytes(), nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func diskNameAndSANameFromURI(diskURI string) (string, string, error) {
 | 
			
		||||
	uri, err := url.Parse(diskURI)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return "", "", err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	hostName := uri.Host
 | 
			
		||||
	storageAccountName := strings.Split(hostName, ".")[0]
 | 
			
		||||
 | 
			
		||||
	segments := strings.Split(uri.Path, "/")
 | 
			
		||||
	diskNameVhd := segments[len(segments)-1]
 | 
			
		||||
 | 
			
		||||
	return storageAccountName, diskNameVhd, nil
 | 
			
		||||
}
 | 
			
		||||
@@ -1,361 +0,0 @@
 | 
			
		||||
//go:build !providerless
 | 
			
		||||
// +build !providerless
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
Copyright 2020 The Kubernetes Authors.
 | 
			
		||||
 | 
			
		||||
Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
you may not use this file except in compliance with the License.
 | 
			
		||||
You may obtain a copy of the License at
 | 
			
		||||
 | 
			
		||||
    http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
 | 
			
		||||
Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
See the License for the specific language governing permissions and
 | 
			
		||||
limitations under the License.
 | 
			
		||||
*/
 | 
			
		||||
 | 
			
		||||
package azure
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"net/http"
 | 
			
		||||
	"strings"
 | 
			
		||||
	"testing"
 | 
			
		||||
 | 
			
		||||
	"k8s.io/legacy-cloud-providers/azure/clients/storageaccountclient/mockstorageaccountclient"
 | 
			
		||||
	"k8s.io/legacy-cloud-providers/azure/retry"
 | 
			
		||||
	"k8s.io/utils/pointer"
 | 
			
		||||
 | 
			
		||||
	"github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-06-01/storage"
 | 
			
		||||
	azstorage "github.com/Azure/azure-sdk-for-go/storage"
 | 
			
		||||
	autorestazure "github.com/Azure/go-autorest/autorest/azure"
 | 
			
		||||
	"github.com/golang/mock/gomock"
 | 
			
		||||
	"github.com/stretchr/testify/assert"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
var retryError500 = retry.Error{HTTPStatusCode: http.StatusInternalServerError}
 | 
			
		||||
 | 
			
		||||
func GetTestBlobDiskController(t *testing.T) BlobDiskController {
 | 
			
		||||
	ctrl := gomock.NewController(t)
 | 
			
		||||
	defer ctrl.Finish()
 | 
			
		||||
	az := GetTestCloud(ctrl)
 | 
			
		||||
	az.Environment = autorestazure.PublicCloud
 | 
			
		||||
	common := &controllerCommon{cloud: az, resourceGroup: "rg", location: "westus"}
 | 
			
		||||
 | 
			
		||||
	return BlobDiskController{
 | 
			
		||||
		common:   common,
 | 
			
		||||
		accounts: make(map[string]*storageAccountState),
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestInitStorageAccounts(t *testing.T) {
 | 
			
		||||
	ctrl := gomock.NewController(t)
 | 
			
		||||
	defer ctrl.Finish()
 | 
			
		||||
	b := GetTestBlobDiskController(t)
 | 
			
		||||
	b.accounts = nil
 | 
			
		||||
 | 
			
		||||
	mockSAClient := mockstorageaccountclient.NewMockInterface(ctrl)
 | 
			
		||||
	mockSAClient.EXPECT().ListByResourceGroup(gomock.Any(), b.common.resourceGroup).Return([]storage.Account{}, &retryError500)
 | 
			
		||||
	b.common.cloud.StorageAccountClient = mockSAClient
 | 
			
		||||
 | 
			
		||||
	b.initStorageAccounts()
 | 
			
		||||
	assert.Empty(t, b.accounts)
 | 
			
		||||
 | 
			
		||||
	mockSAClient.EXPECT().ListByResourceGroup(gomock.Any(), b.common.resourceGroup).Return([]storage.Account{
 | 
			
		||||
		{
 | 
			
		||||
			Name: pointer.String("ds-0"),
 | 
			
		||||
			Sku:  &storage.Sku{Name: "sku"},
 | 
			
		||||
		},
 | 
			
		||||
	}, nil)
 | 
			
		||||
	b.common.cloud.StorageAccountClient = mockSAClient
 | 
			
		||||
 | 
			
		||||
	b.initStorageAccounts()
 | 
			
		||||
	assert.Equal(t, 1, len(b.accounts))
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestCreateVolume(t *testing.T) {
 | 
			
		||||
	ctrl := gomock.NewController(t)
 | 
			
		||||
	defer ctrl.Finish()
 | 
			
		||||
	b := GetTestBlobDiskController(t)
 | 
			
		||||
 | 
			
		||||
	mockSAClient := mockstorageaccountclient.NewMockInterface(ctrl)
 | 
			
		||||
	mockSAClient.EXPECT().ListKeys(gomock.Any(), b.common.resourceGroup, "testsa").Return(storage.AccountListKeysResult{}, &retryError500)
 | 
			
		||||
	b.common.cloud.StorageAccountClient = mockSAClient
 | 
			
		||||
 | 
			
		||||
	diskName, diskURI, requestGB, err := b.CreateVolume("testBlob", "testsa", "type", b.common.location, 10)
 | 
			
		||||
	expectedErr := fmt.Errorf("could not get storage key for storage account testsa: could not get storage key for "+
 | 
			
		||||
		"storage account testsa: Retriable: false, RetryAfter: 0s, HTTPStatusCode: 500, RawError: %w", error(nil))
 | 
			
		||||
	assert.EqualError(t, err, expectedErr.Error())
 | 
			
		||||
	assert.Empty(t, diskName)
 | 
			
		||||
	assert.Empty(t, diskURI)
 | 
			
		||||
	assert.Zero(t, requestGB)
 | 
			
		||||
 | 
			
		||||
	mockSAClient.EXPECT().ListKeys(gomock.Any(), b.common.resourceGroup, "testsa").Return(storage.AccountListKeysResult{
 | 
			
		||||
		Keys: &[]storage.AccountKey{
 | 
			
		||||
			{
 | 
			
		||||
				KeyName: pointer.String("key1"),
 | 
			
		||||
				Value:   pointer.String("dmFsdWUK"),
 | 
			
		||||
			},
 | 
			
		||||
		},
 | 
			
		||||
	}, nil)
 | 
			
		||||
	diskName, diskURI, requestGB, err = b.CreateVolume("testBlob", "testsa", "type", b.common.location, 10)
 | 
			
		||||
	expectedErrStr := "failed to put page blob testBlob.vhd in container vhds: storage: service returned error: StatusCode=403, ErrorCode=AccountIsDisabled, ErrorMessage=The specified account is disabled."
 | 
			
		||||
	assert.Error(t, err)
 | 
			
		||||
	assert.True(t, strings.Contains(err.Error(), expectedErrStr))
 | 
			
		||||
	assert.Empty(t, diskName)
 | 
			
		||||
	assert.Empty(t, diskURI)
 | 
			
		||||
	assert.Zero(t, requestGB)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestDeleteVolume(t *testing.T) {
 | 
			
		||||
	ctrl := gomock.NewController(t)
 | 
			
		||||
	defer ctrl.Finish()
 | 
			
		||||
	b := GetTestBlobDiskController(t)
 | 
			
		||||
	b.common.cloud.BlobDiskController = &b
 | 
			
		||||
 | 
			
		||||
	mockSAClient := mockstorageaccountclient.NewMockInterface(ctrl)
 | 
			
		||||
	mockSAClient.EXPECT().ListKeys(gomock.Any(), b.common.resourceGroup, "foo").Return(storage.AccountListKeysResult{}, &retryError500).Times(2)
 | 
			
		||||
	b.common.cloud.StorageAccountClient = mockSAClient
 | 
			
		||||
 | 
			
		||||
	fakeDiskURL := "fake"
 | 
			
		||||
	diskURL := "https://foo.blob./vhds/bar.vhd"
 | 
			
		||||
	err := b.DeleteVolume(diskURL)
 | 
			
		||||
	expectedErr := fmt.Errorf("no key for storage account foo, err Retriable: false, RetryAfter: 0s, HTTPStatusCode: 500, RawError: %w", error(nil))
 | 
			
		||||
	assert.EqualError(t, err, expectedErr.Error())
 | 
			
		||||
 | 
			
		||||
	err = b.DeleteVolume(diskURL)
 | 
			
		||||
	assert.EqualError(t, err, expectedErr.Error())
 | 
			
		||||
 | 
			
		||||
	mockSAClient.EXPECT().ListKeys(gomock.Any(), b.common.resourceGroup, "foo").Return(storage.AccountListKeysResult{
 | 
			
		||||
		Keys: &[]storage.AccountKey{
 | 
			
		||||
			{
 | 
			
		||||
				KeyName: pointer.String("key1"),
 | 
			
		||||
				Value:   pointer.String("dmFsdWUK"),
 | 
			
		||||
			},
 | 
			
		||||
		},
 | 
			
		||||
	}, nil)
 | 
			
		||||
 | 
			
		||||
	err = b.DeleteVolume(fakeDiskURL)
 | 
			
		||||
	expectedErr = fmt.Errorf("failed to parse vhd URI invalid vhd URI for regex https://(.*).blob./vhds/(.*): fake")
 | 
			
		||||
	assert.Equal(t, expectedErr, err)
 | 
			
		||||
 | 
			
		||||
	err = b.DeleteVolume(diskURL)
 | 
			
		||||
	expectedErrStr := "failed to delete vhd https://foo.blob./vhds/bar.vhd, account foo, blob bar.vhd, err: storage: service returned error: " +
 | 
			
		||||
		"StatusCode=403, ErrorCode=AccountIsDisabled, ErrorMessage=The specified account is disabled."
 | 
			
		||||
	assert.Error(t, err)
 | 
			
		||||
	assert.True(t, strings.Contains(err.Error(), expectedErrStr))
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestCreateVHDBlobDisk(t *testing.T) {
 | 
			
		||||
	ctrl := gomock.NewController(t)
 | 
			
		||||
	defer ctrl.Finish()
 | 
			
		||||
	b := GetTestBlobDiskController(t)
 | 
			
		||||
 | 
			
		||||
	b.common.cloud.Environment = autorestazure.PublicCloud
 | 
			
		||||
	client, err := azstorage.NewBasicClientOnSovereignCloud("testsa", "a2V5Cg==", b.common.cloud.Environment)
 | 
			
		||||
	assert.NoError(t, err)
 | 
			
		||||
	blobClient := client.GetBlobService()
 | 
			
		||||
 | 
			
		||||
	_, _, err = b.createVHDBlobDisk(blobClient, "testsa", "blob", vhdContainerName, int64(10))
 | 
			
		||||
	expectedErr := "failed to put page blob blob.vhd in container vhds: storage: service returned error: StatusCode=403, ErrorCode=AccountIsDisabled, ErrorMessage=The specified account is disabled."
 | 
			
		||||
	assert.Error(t, err)
 | 
			
		||||
	assert.True(t, strings.Contains(err.Error(), expectedErr))
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestGetAllStorageAccounts(t *testing.T) {
 | 
			
		||||
	ctrl := gomock.NewController(t)
 | 
			
		||||
	defer ctrl.Finish()
 | 
			
		||||
	b := GetTestBlobDiskController(t)
 | 
			
		||||
 | 
			
		||||
	expectedStorageAccounts := []storage.Account{
 | 
			
		||||
		{
 | 
			
		||||
			Name: pointer.String("this-should-be-skipped"),
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			Name: pointer.String("this-should-be-skipped"),
 | 
			
		||||
			Sku:  &storage.Sku{Name: "sku"},
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			Name: pointer.String("ds-0"),
 | 
			
		||||
			Sku:  &storage.Sku{Name: "sku"},
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	mockSAClient := mockstorageaccountclient.NewMockInterface(ctrl)
 | 
			
		||||
	mockSAClient.EXPECT().ListByResourceGroup(gomock.Any(), b.common.resourceGroup).Return(expectedStorageAccounts, nil)
 | 
			
		||||
	b.common.cloud.StorageAccountClient = mockSAClient
 | 
			
		||||
 | 
			
		||||
	accounts, err := b.getAllStorageAccounts()
 | 
			
		||||
	assert.NoError(t, err)
 | 
			
		||||
	assert.Equal(t, 1, len(accounts))
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestEnsureDefaultContainer(t *testing.T) {
 | 
			
		||||
	ctrl := gomock.NewController(t)
 | 
			
		||||
	defer ctrl.Finish()
 | 
			
		||||
	b := GetTestBlobDiskController(t)
 | 
			
		||||
 | 
			
		||||
	mockSAClient := mockstorageaccountclient.NewMockInterface(ctrl)
 | 
			
		||||
	b.common.cloud.StorageAccountClient = mockSAClient
 | 
			
		||||
 | 
			
		||||
	mockSAClient.EXPECT().GetProperties(gomock.Any(), b.common.resourceGroup, "testsa").Return(storage.Account{}, &retryError500)
 | 
			
		||||
	err := b.ensureDefaultContainer("testsa")
 | 
			
		||||
	expectedErr := fmt.Errorf("azureDisk - account testsa does not exist while trying to create/ensure default container")
 | 
			
		||||
	assert.Equal(t, expectedErr, err)
 | 
			
		||||
 | 
			
		||||
	b.accounts["testsa"] = &storageAccountState{defaultContainerCreated: true}
 | 
			
		||||
	err = b.ensureDefaultContainer("testsa")
 | 
			
		||||
	assert.NoError(t, err)
 | 
			
		||||
 | 
			
		||||
	b.accounts["testsa"] = &storageAccountState{isValidating: 0}
 | 
			
		||||
	mockSAClient.EXPECT().GetProperties(gomock.Any(), b.common.resourceGroup, "testsa").Return(storage.Account{
 | 
			
		||||
		AccountProperties: &storage.AccountProperties{ProvisioningState: storage.Creating},
 | 
			
		||||
	}, nil)
 | 
			
		||||
	mockSAClient.EXPECT().GetProperties(gomock.Any(), b.common.resourceGroup, "testsa").Return(storage.Account{}, &retryError500)
 | 
			
		||||
	mockSAClient.EXPECT().GetProperties(gomock.Any(), b.common.resourceGroup, "testsa").Return(storage.Account{
 | 
			
		||||
		AccountProperties: &storage.AccountProperties{ProvisioningState: storage.Succeeded},
 | 
			
		||||
	}, nil)
 | 
			
		||||
	mockSAClient.EXPECT().ListKeys(gomock.Any(), b.common.resourceGroup, "testsa").Return(storage.AccountListKeysResult{
 | 
			
		||||
		Keys: &[]storage.AccountKey{
 | 
			
		||||
			{
 | 
			
		||||
				KeyName: pointer.String("key1"),
 | 
			
		||||
				Value:   pointer.String("key1"),
 | 
			
		||||
			},
 | 
			
		||||
		},
 | 
			
		||||
	}, nil)
 | 
			
		||||
	err = b.ensureDefaultContainer("testsa")
 | 
			
		||||
	expectedErrStr := "storage: service returned error: StatusCode=403, ErrorCode=AccountIsDisabled, ErrorMessage=The specified account is disabled."
 | 
			
		||||
	assert.Error(t, err)
 | 
			
		||||
	assert.True(t, strings.Contains(err.Error(), expectedErrStr))
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestGetDiskCount(t *testing.T) {
 | 
			
		||||
	ctrl := gomock.NewController(t)
 | 
			
		||||
	defer ctrl.Finish()
 | 
			
		||||
	b := GetTestBlobDiskController(t)
 | 
			
		||||
 | 
			
		||||
	mockSAClient := mockstorageaccountclient.NewMockInterface(ctrl)
 | 
			
		||||
	b.common.cloud.StorageAccountClient = mockSAClient
 | 
			
		||||
 | 
			
		||||
	b.accounts["testsa"] = &storageAccountState{diskCount: 1}
 | 
			
		||||
	count, err := b.getDiskCount("testsa")
 | 
			
		||||
	assert.Equal(t, 1, count)
 | 
			
		||||
	assert.NoError(t, err)
 | 
			
		||||
 | 
			
		||||
	b.accounts["testsa"] = &storageAccountState{diskCount: -1}
 | 
			
		||||
	mockSAClient.EXPECT().GetProperties(gomock.Any(), b.common.resourceGroup, "testsa").Return(storage.Account{}, &retryError500)
 | 
			
		||||
	count, err = b.getDiskCount("testsa")
 | 
			
		||||
	assert.Zero(t, count)
 | 
			
		||||
	expectedErr := fmt.Errorf("azureDisk - account testsa does not exist while trying to create/ensure default container")
 | 
			
		||||
	assert.Equal(t, expectedErr, err)
 | 
			
		||||
 | 
			
		||||
	b.accounts["testsa"].defaultContainerCreated = true
 | 
			
		||||
	mockSAClient.EXPECT().ListKeys(gomock.Any(), b.common.resourceGroup, "testsa").Return(storage.AccountListKeysResult{
 | 
			
		||||
		Keys: &[]storage.AccountKey{
 | 
			
		||||
			{
 | 
			
		||||
				KeyName: pointer.String("key1"),
 | 
			
		||||
				Value:   pointer.String("key1"),
 | 
			
		||||
			},
 | 
			
		||||
		},
 | 
			
		||||
	}, nil)
 | 
			
		||||
	count, err = b.getDiskCount("testsa")
 | 
			
		||||
	expectedErrStr := "storage: service returned error: StatusCode=403, ErrorCode=AccountIsDisabled, ErrorMessage=The specified account is disabled."
 | 
			
		||||
	assert.Error(t, err)
 | 
			
		||||
	assert.True(t, strings.Contains(err.Error(), expectedErrStr))
 | 
			
		||||
	assert.Zero(t, count)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestFindSANameForDisk(t *testing.T) {
 | 
			
		||||
	ctrl := gomock.NewController(t)
 | 
			
		||||
	defer ctrl.Finish()
 | 
			
		||||
	b := GetTestBlobDiskController(t)
 | 
			
		||||
 | 
			
		||||
	mockSAClient := mockstorageaccountclient.NewMockInterface(ctrl)
 | 
			
		||||
	b.common.cloud.StorageAccountClient = mockSAClient
 | 
			
		||||
 | 
			
		||||
	b.accounts = map[string]*storageAccountState{
 | 
			
		||||
		"this-shall-be-skipped": {name: "fake"},
 | 
			
		||||
		"ds0": {
 | 
			
		||||
			name:      "ds0",
 | 
			
		||||
			saType:    storage.StandardGRS,
 | 
			
		||||
			diskCount: 50,
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
	mockSAClient.EXPECT().GetProperties(gomock.Any(), b.common.resourceGroup, gomock.Any()).Return(storage.Account{}, &retryError500).Times(2)
 | 
			
		||||
	mockSAClient.EXPECT().GetProperties(gomock.Any(), b.common.resourceGroup, gomock.Any()).Return(storage.Account{
 | 
			
		||||
		AccountProperties: &storage.AccountProperties{ProvisioningState: storage.Succeeded},
 | 
			
		||||
	}, nil).Times(2)
 | 
			
		||||
	mockSAClient.EXPECT().ListKeys(gomock.Any(), b.common.resourceGroup, gomock.Any()).Return(storage.AccountListKeysResult{
 | 
			
		||||
		Keys: &[]storage.AccountKey{
 | 
			
		||||
			{
 | 
			
		||||
				KeyName: pointer.String("key1"),
 | 
			
		||||
				Value:   pointer.String("key1"),
 | 
			
		||||
			},
 | 
			
		||||
		},
 | 
			
		||||
	}, nil)
 | 
			
		||||
	mockSAClient.EXPECT().Create(gomock.Any(), b.common.resourceGroup, gomock.Any(), gomock.Any()).Return(nil)
 | 
			
		||||
	name, err := b.findSANameForDisk(storage.StandardGRS)
 | 
			
		||||
	expectedErr := "does not exist while trying to create/ensure default container"
 | 
			
		||||
	assert.True(t, strings.Contains(err.Error(), expectedErr))
 | 
			
		||||
	assert.Error(t, err)
 | 
			
		||||
	assert.Empty(t, name)
 | 
			
		||||
 | 
			
		||||
	b.accounts = make(map[string]*storageAccountState)
 | 
			
		||||
	name, err = b.findSANameForDisk(storage.StandardGRS)
 | 
			
		||||
	assert.Error(t, err)
 | 
			
		||||
	assert.Empty(t, name)
 | 
			
		||||
 | 
			
		||||
	b.accounts = map[string]*storageAccountState{
 | 
			
		||||
		"ds0": {
 | 
			
		||||
			name:      "ds0",
 | 
			
		||||
			saType:    storage.StandardGRS,
 | 
			
		||||
			diskCount: 0,
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
	name, err = b.findSANameForDisk(storage.StandardGRS)
 | 
			
		||||
	assert.Equal(t, "ds0", name)
 | 
			
		||||
	assert.NoError(t, err)
 | 
			
		||||
 | 
			
		||||
	for i := 0; i < maxStorageAccounts; i++ {
 | 
			
		||||
		b.accounts[fmt.Sprintf("ds%d", i)] = &storageAccountState{
 | 
			
		||||
			name:      fmt.Sprintf("ds%d", i),
 | 
			
		||||
			saType:    storage.StandardGRS,
 | 
			
		||||
			diskCount: 59,
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	name, err = b.findSANameForDisk(storage.StandardGRS)
 | 
			
		||||
	assert.NotEmpty(t, name)
 | 
			
		||||
	assert.NoError(t, err)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestCreateBlobDisk(t *testing.T) {
 | 
			
		||||
	ctrl := gomock.NewController(t)
 | 
			
		||||
	defer ctrl.Finish()
 | 
			
		||||
	b := GetTestBlobDiskController(t)
 | 
			
		||||
	b.accounts = map[string]*storageAccountState{
 | 
			
		||||
		"ds0": {
 | 
			
		||||
			name:      "ds0",
 | 
			
		||||
			saType:    storage.StandardGRS,
 | 
			
		||||
			diskCount: 0,
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	mockSAClient := mockstorageaccountclient.NewMockInterface(ctrl)
 | 
			
		||||
	b.common.cloud.StorageAccountClient = mockSAClient
 | 
			
		||||
	mockSAClient.EXPECT().ListKeys(gomock.Any(), b.common.resourceGroup, gomock.Any()).Return(storage.AccountListKeysResult{
 | 
			
		||||
		Keys: &[]storage.AccountKey{
 | 
			
		||||
			{
 | 
			
		||||
				KeyName: pointer.String("key1"),
 | 
			
		||||
				Value:   pointer.String("key1"),
 | 
			
		||||
			},
 | 
			
		||||
		},
 | 
			
		||||
	}, nil)
 | 
			
		||||
	diskURI, err := b.CreateBlobDisk("datadisk", storage.StandardGRS, 10)
 | 
			
		||||
	expectedErr := "failed to put page blob datadisk.vhd in container vhds: storage: service returned error: StatusCode=403"
 | 
			
		||||
	assert.Error(t, err)
 | 
			
		||||
	assert.True(t, strings.Contains(err.Error(), expectedErr))
 | 
			
		||||
	assert.Empty(t, diskURI)
 | 
			
		||||
}
 | 
			
		||||
@@ -1,95 +0,0 @@
 | 
			
		||||
//go:build !providerless
 | 
			
		||||
// +build !providerless
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
Copyright 2019 The Kubernetes Authors.
 | 
			
		||||
 | 
			
		||||
Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
you may not use this file except in compliance with the License.
 | 
			
		||||
You may obtain a copy of the License at
 | 
			
		||||
 | 
			
		||||
    http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
 | 
			
		||||
Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
See the License for the specific language governing permissions and
 | 
			
		||||
limitations under the License.
 | 
			
		||||
*/
 | 
			
		||||
 | 
			
		||||
package azure
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"context"
 | 
			
		||||
	"fmt"
 | 
			
		||||
 | 
			
		||||
	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 | 
			
		||||
	"k8s.io/klog/v2"
 | 
			
		||||
	"sigs.k8s.io/yaml"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
const (
 | 
			
		||||
	cloudConfigNamespace  = "kube-system"
 | 
			
		||||
	cloudConfigKey        = "cloud-config"
 | 
			
		||||
	cloudConfigSecretName = "azure-cloud-provider"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// The config type for Azure cloud provider secret. Supported values are:
 | 
			
		||||
// * file   : The values are read from local cloud-config file.
 | 
			
		||||
// * secret : The values from secret would override all configures from local cloud-config file.
 | 
			
		||||
// * merge  : The values from secret would override only configurations that are explicitly set in the secret. This is the default value.
 | 
			
		||||
type cloudConfigType string
 | 
			
		||||
 | 
			
		||||
const (
 | 
			
		||||
	cloudConfigTypeFile   cloudConfigType = "file"
 | 
			
		||||
	cloudConfigTypeSecret cloudConfigType = "secret"
 | 
			
		||||
	cloudConfigTypeMerge  cloudConfigType = "merge"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// InitializeCloudFromSecret initializes Azure cloud provider from Kubernetes secret.
 | 
			
		||||
func (az *Cloud) InitializeCloudFromSecret() {
 | 
			
		||||
	config, err := az.getConfigFromSecret()
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		klog.Warningf("Failed to get cloud-config from secret: %v, skip initializing from secret", err)
 | 
			
		||||
		return
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if config == nil {
 | 
			
		||||
		// Skip re-initialization if the config is not override.
 | 
			
		||||
		return
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if err := az.InitializeCloudFromConfig(config, true); err != nil {
 | 
			
		||||
		klog.Errorf("Failed to initialize Azure cloud provider: %v", err)
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (az *Cloud) getConfigFromSecret() (*Config, error) {
 | 
			
		||||
	// Read config from file and no override, return nil.
 | 
			
		||||
	if az.Config.CloudConfigType == cloudConfigTypeFile {
 | 
			
		||||
		return nil, nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	secret, err := az.KubeClient.CoreV1().Secrets(cloudConfigNamespace).Get(context.TODO(), cloudConfigSecretName, metav1.GetOptions{})
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return nil, fmt.Errorf("failed to get secret %s: %v", cloudConfigSecretName, err)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	cloudConfigData, ok := secret.Data[cloudConfigKey]
 | 
			
		||||
	if !ok {
 | 
			
		||||
		return nil, fmt.Errorf("cloud-config is not set in the secret (%s)", cloudConfigSecretName)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	config := Config{}
 | 
			
		||||
	if az.Config.CloudConfigType == "" || az.Config.CloudConfigType == cloudConfigTypeMerge {
 | 
			
		||||
		// Merge cloud config, set default value to existing config.
 | 
			
		||||
		config = az.Config
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	err = yaml.Unmarshal(cloudConfigData, &config)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return nil, fmt.Errorf("failed to parse Azure cloud-config: %v", err)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return &config, nil
 | 
			
		||||
}
 | 
			
		||||
@@ -1,268 +0,0 @@
 | 
			
		||||
//go:build !providerless
 | 
			
		||||
// +build !providerless
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
Copyright 2019 The Kubernetes Authors.
 | 
			
		||||
 | 
			
		||||
Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
you may not use this file except in compliance with the License.
 | 
			
		||||
You may obtain a copy of the License at
 | 
			
		||||
 | 
			
		||||
    http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
 | 
			
		||||
Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
See the License for the specific language governing permissions and
 | 
			
		||||
limitations under the License.
 | 
			
		||||
*/
 | 
			
		||||
 | 
			
		||||
package azure
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"context"
 | 
			
		||||
	"testing"
 | 
			
		||||
 | 
			
		||||
	"github.com/stretchr/testify/assert"
 | 
			
		||||
 | 
			
		||||
	v1 "k8s.io/api/core/v1"
 | 
			
		||||
	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 | 
			
		||||
	fakeclient "k8s.io/client-go/kubernetes/fake"
 | 
			
		||||
	"k8s.io/legacy-cloud-providers/azure/auth"
 | 
			
		||||
	"k8s.io/utils/pointer"
 | 
			
		||||
	"sigs.k8s.io/yaml"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
func getTestConfig() *Config {
 | 
			
		||||
	return &Config{
 | 
			
		||||
		AzureAuthConfig: auth.AzureAuthConfig{
 | 
			
		||||
			TenantID:        "TenantID",
 | 
			
		||||
			SubscriptionID:  "SubscriptionID",
 | 
			
		||||
			AADClientID:     "AADClientID",
 | 
			
		||||
			AADClientSecret: "AADClientSecret",
 | 
			
		||||
		},
 | 
			
		||||
		ResourceGroup:               "ResourceGroup",
 | 
			
		||||
		RouteTableName:              "RouteTableName",
 | 
			
		||||
		RouteTableResourceGroup:     "RouteTableResourceGroup",
 | 
			
		||||
		Location:                    "Location",
 | 
			
		||||
		SubnetName:                  "SubnetName",
 | 
			
		||||
		VnetName:                    "VnetName",
 | 
			
		||||
		PrimaryAvailabilitySetName:  "PrimaryAvailabilitySetName",
 | 
			
		||||
		PrimaryScaleSetName:         "PrimaryScaleSetName",
 | 
			
		||||
		LoadBalancerSku:             "LoadBalancerSku",
 | 
			
		||||
		ExcludeMasterFromStandardLB: pointer.Bool(true),
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func getTestCloudConfigTypeSecretConfig() *Config {
 | 
			
		||||
	return &Config{
 | 
			
		||||
		AzureAuthConfig: auth.AzureAuthConfig{
 | 
			
		||||
			TenantID:       "TenantID",
 | 
			
		||||
			SubscriptionID: "SubscriptionID",
 | 
			
		||||
		},
 | 
			
		||||
		ResourceGroup:           "ResourceGroup",
 | 
			
		||||
		RouteTableName:          "RouteTableName",
 | 
			
		||||
		RouteTableResourceGroup: "RouteTableResourceGroup",
 | 
			
		||||
		SecurityGroupName:       "SecurityGroupName",
 | 
			
		||||
		CloudConfigType:         cloudConfigTypeSecret,
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func getTestCloudConfigTypeMergeConfig() *Config {
 | 
			
		||||
	return &Config{
 | 
			
		||||
		AzureAuthConfig: auth.AzureAuthConfig{
 | 
			
		||||
			TenantID:       "TenantID",
 | 
			
		||||
			SubscriptionID: "SubscriptionID",
 | 
			
		||||
		},
 | 
			
		||||
		ResourceGroup:           "ResourceGroup",
 | 
			
		||||
		RouteTableName:          "RouteTableName",
 | 
			
		||||
		RouteTableResourceGroup: "RouteTableResourceGroup",
 | 
			
		||||
		SecurityGroupName:       "SecurityGroupName",
 | 
			
		||||
		CloudConfigType:         cloudConfigTypeMerge,
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func getTestCloudConfigTypeMergeConfigExpected() *Config {
 | 
			
		||||
	config := getTestConfig()
 | 
			
		||||
	config.SecurityGroupName = "SecurityGroupName"
 | 
			
		||||
	config.CloudConfigType = cloudConfigTypeMerge
 | 
			
		||||
	return config
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestGetConfigFromSecret(t *testing.T) {
 | 
			
		||||
	emptyConfig := &Config{}
 | 
			
		||||
	badConfig := &Config{ResourceGroup: "DuplicateColumnsIncloud-config"}
 | 
			
		||||
	tests := []struct {
 | 
			
		||||
		name           string
 | 
			
		||||
		existingConfig *Config
 | 
			
		||||
		secretConfig   *Config
 | 
			
		||||
		expected       *Config
 | 
			
		||||
		expectErr      bool
 | 
			
		||||
	}{
 | 
			
		||||
		{
 | 
			
		||||
			name: "Azure config shouldn't be override when cloud config type is file",
 | 
			
		||||
			existingConfig: &Config{
 | 
			
		||||
				ResourceGroup:   "ResourceGroup1",
 | 
			
		||||
				CloudConfigType: cloudConfigTypeFile,
 | 
			
		||||
			},
 | 
			
		||||
			secretConfig: getTestConfig(),
 | 
			
		||||
			expected:     nil,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			name:           "Azure config should be override when cloud config type is secret",
 | 
			
		||||
			existingConfig: getTestCloudConfigTypeSecretConfig(),
 | 
			
		||||
			secretConfig:   getTestConfig(),
 | 
			
		||||
			expected:       getTestConfig(),
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			name:           "Azure config should be override when cloud config type is merge",
 | 
			
		||||
			existingConfig: getTestCloudConfigTypeMergeConfig(),
 | 
			
		||||
			secretConfig:   getTestConfig(),
 | 
			
		||||
			expected:       getTestCloudConfigTypeMergeConfigExpected(),
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			name:           "Error should be reported when secret doesn't exists",
 | 
			
		||||
			existingConfig: getTestCloudConfigTypeMergeConfig(),
 | 
			
		||||
			expectErr:      true,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			name:           "Error should be reported when secret exists but cloud-config data is not provided",
 | 
			
		||||
			existingConfig: getTestCloudConfigTypeMergeConfig(),
 | 
			
		||||
			secretConfig:   emptyConfig,
 | 
			
		||||
			expectErr:      true,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			name:           "Error should be reported when it failed to parse Azure cloud-config",
 | 
			
		||||
			existingConfig: getTestCloudConfigTypeMergeConfig(),
 | 
			
		||||
			secretConfig:   badConfig,
 | 
			
		||||
			expectErr:      true,
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	for _, test := range tests {
 | 
			
		||||
		t.Run(test.name, func(t *testing.T) {
 | 
			
		||||
			az := &Cloud{
 | 
			
		||||
				KubeClient: fakeclient.NewSimpleClientset(),
 | 
			
		||||
			}
 | 
			
		||||
			if test.existingConfig != nil {
 | 
			
		||||
				az.Config = *test.existingConfig
 | 
			
		||||
			}
 | 
			
		||||
			if test.secretConfig != nil {
 | 
			
		||||
				secret := &v1.Secret{
 | 
			
		||||
					Type: v1.SecretTypeOpaque,
 | 
			
		||||
					ObjectMeta: metav1.ObjectMeta{
 | 
			
		||||
						Name:      "azure-cloud-provider",
 | 
			
		||||
						Namespace: "kube-system",
 | 
			
		||||
					},
 | 
			
		||||
				}
 | 
			
		||||
				if test.secretConfig != emptyConfig && test.secretConfig != badConfig {
 | 
			
		||||
					secretData, err := yaml.Marshal(test.secretConfig)
 | 
			
		||||
					assert.NoError(t, err, test.name)
 | 
			
		||||
					secret.Data = map[string][]byte{
 | 
			
		||||
						"cloud-config": secretData,
 | 
			
		||||
					}
 | 
			
		||||
				}
 | 
			
		||||
				if test.secretConfig == badConfig {
 | 
			
		||||
					secret.Data = map[string][]byte{"cloud-config": []byte(`unknown: "hello",unknown: "hello"`)}
 | 
			
		||||
				}
 | 
			
		||||
				_, err := az.KubeClient.CoreV1().Secrets(cloudConfigNamespace).Create(context.TODO(), secret, metav1.CreateOptions{})
 | 
			
		||||
				assert.NoError(t, err, test.name)
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			real, err := az.getConfigFromSecret()
 | 
			
		||||
			if test.expectErr {
 | 
			
		||||
				assert.Error(t, err, test.name)
 | 
			
		||||
				return
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			assert.NoError(t, err, test.name)
 | 
			
		||||
			assert.Equal(t, test.expected, real, test.name)
 | 
			
		||||
		})
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestInitializeCloudFromSecret(t *testing.T) {
 | 
			
		||||
	emptyConfig := &Config{}
 | 
			
		||||
	unknownConfigTypeConfig := getTestConfig()
 | 
			
		||||
	unknownConfigTypeConfig.CloudConfigType = "UnknownConfigType"
 | 
			
		||||
	tests := []struct {
 | 
			
		||||
		name           string
 | 
			
		||||
		existingConfig *Config
 | 
			
		||||
		secretConfig   *Config
 | 
			
		||||
		expected       *Config
 | 
			
		||||
		expectErr      bool
 | 
			
		||||
	}{
 | 
			
		||||
		{
 | 
			
		||||
			name: "Azure config shouldn't be override when cloud config type is file",
 | 
			
		||||
			existingConfig: &Config{
 | 
			
		||||
				ResourceGroup:   "ResourceGroup1",
 | 
			
		||||
				CloudConfigType: cloudConfigTypeFile,
 | 
			
		||||
			},
 | 
			
		||||
			secretConfig: getTestConfig(),
 | 
			
		||||
			expected:     nil,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			name: "Azure config shouldn't be override when cloud config type is unknown",
 | 
			
		||||
			existingConfig: &Config{
 | 
			
		||||
				ResourceGroup:   "ResourceGroup1",
 | 
			
		||||
				CloudConfigType: "UnknownConfigType",
 | 
			
		||||
			},
 | 
			
		||||
			secretConfig: unknownConfigTypeConfig,
 | 
			
		||||
			expected:     nil,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			name:           "Azure config should be override when cloud config type is secret",
 | 
			
		||||
			existingConfig: getTestCloudConfigTypeSecretConfig(),
 | 
			
		||||
			secretConfig:   getTestConfig(),
 | 
			
		||||
			expected:       getTestConfig(),
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			name:           "Azure config should be override when cloud config type is merge",
 | 
			
		||||
			existingConfig: getTestCloudConfigTypeMergeConfig(),
 | 
			
		||||
			secretConfig:   getTestConfig(),
 | 
			
		||||
			expected:       getTestCloudConfigTypeMergeConfigExpected(),
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			name:           "Error should be reported when secret doesn't exists",
 | 
			
		||||
			existingConfig: getTestCloudConfigTypeMergeConfig(),
 | 
			
		||||
			expectErr:      true,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			name:           "Error should be reported when secret exists but cloud-config data is not provided",
 | 
			
		||||
			existingConfig: getTestCloudConfigTypeMergeConfig(),
 | 
			
		||||
			secretConfig:   emptyConfig,
 | 
			
		||||
			expectErr:      true,
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	for _, test := range tests {
 | 
			
		||||
		t.Run(test.name, func(t *testing.T) {
 | 
			
		||||
			az := &Cloud{
 | 
			
		||||
				KubeClient: fakeclient.NewSimpleClientset(),
 | 
			
		||||
			}
 | 
			
		||||
			if test.existingConfig != nil {
 | 
			
		||||
				az.Config = *test.existingConfig
 | 
			
		||||
			}
 | 
			
		||||
			if test.secretConfig != nil {
 | 
			
		||||
				secret := &v1.Secret{
 | 
			
		||||
					Type: v1.SecretTypeOpaque,
 | 
			
		||||
					ObjectMeta: metav1.ObjectMeta{
 | 
			
		||||
						Name:      "azure-cloud-provider",
 | 
			
		||||
						Namespace: "kube-system",
 | 
			
		||||
					},
 | 
			
		||||
				}
 | 
			
		||||
				if test.secretConfig != emptyConfig {
 | 
			
		||||
					secretData, err := yaml.Marshal(test.secretConfig)
 | 
			
		||||
					assert.NoError(t, err, test.name)
 | 
			
		||||
					secret.Data = map[string][]byte{
 | 
			
		||||
						"cloud-config": secretData,
 | 
			
		||||
					}
 | 
			
		||||
				}
 | 
			
		||||
				_, err := az.KubeClient.CoreV1().Secrets(cloudConfigNamespace).Create(context.TODO(), secret, metav1.CreateOptions{})
 | 
			
		||||
				assert.NoError(t, err, test.name)
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			az.InitializeCloudFromSecret()
 | 
			
		||||
		})
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
@@ -1,460 +0,0 @@
 | 
			
		||||
//go:build !providerless
 | 
			
		||||
// +build !providerless
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
Copyright 2018 The Kubernetes Authors.
 | 
			
		||||
 | 
			
		||||
Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
you may not use this file except in compliance with the License.
 | 
			
		||||
You may obtain a copy of the License at
 | 
			
		||||
 | 
			
		||||
    http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
 | 
			
		||||
Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
See the License for the specific language governing permissions and
 | 
			
		||||
limitations under the License.
 | 
			
		||||
*/
 | 
			
		||||
 | 
			
		||||
package azure
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"context"
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"net/http"
 | 
			
		||||
	"path"
 | 
			
		||||
	"regexp"
 | 
			
		||||
	"strings"
 | 
			
		||||
	"sync"
 | 
			
		||||
	"time"
 | 
			
		||||
 | 
			
		||||
	"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-12-01/compute"
 | 
			
		||||
 | 
			
		||||
	"k8s.io/apimachinery/pkg/types"
 | 
			
		||||
	kwait "k8s.io/apimachinery/pkg/util/wait"
 | 
			
		||||
	cloudprovider "k8s.io/cloud-provider"
 | 
			
		||||
	volerr "k8s.io/cloud-provider/volume/errors"
 | 
			
		||||
	"k8s.io/klog/v2"
 | 
			
		||||
	azcache "k8s.io/legacy-cloud-providers/azure/cache"
 | 
			
		||||
	"k8s.io/legacy-cloud-providers/azure/retry"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
const (
 | 
			
		||||
	// for limits check https://docs.microsoft.com/en-us/azure/azure-subscription-service-limits#storage-limits
 | 
			
		||||
	maxStorageAccounts                     = 100 // max # is 200 (250 with special request). this allows 100 for everything else including stand alone disks
 | 
			
		||||
	maxDisksPerStorageAccounts             = 60
 | 
			
		||||
	storageAccountUtilizationBeforeGrowing = 0.5
 | 
			
		||||
	// Disk Caching is not supported for disks 4 TiB and larger
 | 
			
		||||
	// https://docs.microsoft.com/en-us/azure/virtual-machines/premium-storage-performance#disk-caching
 | 
			
		||||
	diskCachingLimit = 4096 // GiB
 | 
			
		||||
 | 
			
		||||
	maxLUN               = 64 // max number of LUNs per VM
 | 
			
		||||
	errLeaseIDMissing    = "LeaseIdMissing"
 | 
			
		||||
	errContainerNotFound = "ContainerNotFound"
 | 
			
		||||
	errStatusCode400     = "statuscode=400"
 | 
			
		||||
	errInvalidParameter  = `code="invalidparameter"`
 | 
			
		||||
	errTargetInstanceIds = `target="instanceids"`
 | 
			
		||||
	sourceSnapshot       = "snapshot"
 | 
			
		||||
	sourceVolume         = "volume"
 | 
			
		||||
 | 
			
		||||
	// WriteAcceleratorEnabled support for Azure Write Accelerator on Azure Disks
 | 
			
		||||
	// https://docs.microsoft.com/azure/virtual-machines/windows/how-to-enable-write-accelerator
 | 
			
		||||
	WriteAcceleratorEnabled = "writeacceleratorenabled"
 | 
			
		||||
 | 
			
		||||
	// see https://docs.microsoft.com/en-us/rest/api/compute/disks/createorupdate#create-a-managed-disk-by-copying-a-snapshot.
 | 
			
		||||
	diskSnapshotPath = "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Compute/snapshots/%s"
 | 
			
		||||
 | 
			
		||||
	// see https://docs.microsoft.com/en-us/rest/api/compute/disks/createorupdate#create-a-managed-disk-from-an-existing-managed-disk-in-the-same-or-different-subscription.
 | 
			
		||||
	managedDiskPath = "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Compute/disks/%s"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
var defaultBackOff = kwait.Backoff{
 | 
			
		||||
	Steps:    20,
 | 
			
		||||
	Duration: 2 * time.Second,
 | 
			
		||||
	Factor:   1.5,
 | 
			
		||||
	Jitter:   0.0,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
var (
 | 
			
		||||
	managedDiskPathRE  = regexp.MustCompile(`.*/subscriptions/(?:.*)/resourceGroups/(?:.*)/providers/Microsoft.Compute/disks/(.+)`)
 | 
			
		||||
	diskSnapshotPathRE = regexp.MustCompile(`.*/subscriptions/(?:.*)/resourceGroups/(?:.*)/providers/Microsoft.Compute/snapshots/(.+)`)
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
type controllerCommon struct {
 | 
			
		||||
	subscriptionID        string
 | 
			
		||||
	location              string
 | 
			
		||||
	storageEndpointSuffix string
 | 
			
		||||
	resourceGroup         string
 | 
			
		||||
	// store disk URI when disk is in attaching or detaching process
 | 
			
		||||
	diskAttachDetachMap sync.Map
 | 
			
		||||
	// vm disk map used to lock per vm update calls
 | 
			
		||||
	vmLockMap *lockMap
 | 
			
		||||
	cloud     *Cloud
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// getNodeVMSet gets the VMSet interface based on config.VMType and the real virtual machine type.
 | 
			
		||||
func (c *controllerCommon) getNodeVMSet(nodeName types.NodeName, crt azcache.AzureCacheReadType) (VMSet, error) {
 | 
			
		||||
	// 1. vmType is standard, return cloud.VMSet directly.
 | 
			
		||||
	if c.cloud.VMType == vmTypeStandard {
 | 
			
		||||
		return c.cloud.VMSet, nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// 2. vmType is Virtual Machine Scale Set (vmss), convert vmSet to scaleSet.
 | 
			
		||||
	ss, ok := c.cloud.VMSet.(*scaleSet)
 | 
			
		||||
	if !ok {
 | 
			
		||||
		return nil, fmt.Errorf("error of converting vmSet (%q) to scaleSet with vmType %q", c.cloud.VMSet, c.cloud.VMType)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// 3. If the node is managed by availability set, then return ss.availabilitySet.
 | 
			
		||||
	managedByAS, err := ss.isNodeManagedByAvailabilitySet(mapNodeNameToVMName(nodeName), crt)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return nil, err
 | 
			
		||||
	}
 | 
			
		||||
	if managedByAS {
 | 
			
		||||
		// vm is managed by availability set.
 | 
			
		||||
		return ss.availabilitySet, nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// 4. Node is managed by vmss
 | 
			
		||||
	return ss, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// AttachDisk attaches a vhd to vm. The vhd must exist, can be identified by diskName, diskURI.
 | 
			
		||||
// return (lun, error)
 | 
			
		||||
func (c *controllerCommon) AttachDisk(isManagedDisk bool, diskName, diskURI string, nodeName types.NodeName, cachingMode compute.CachingTypes) (int32, error) {
 | 
			
		||||
	diskEncryptionSetID := ""
 | 
			
		||||
	writeAcceleratorEnabled := false
 | 
			
		||||
 | 
			
		||||
	vmset, err := c.getNodeVMSet(nodeName, azcache.CacheReadTypeUnsafe)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return -1, err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if isManagedDisk {
 | 
			
		||||
		diskName := path.Base(diskURI)
 | 
			
		||||
		resourceGroup, err := getResourceGroupFromDiskURI(diskURI)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			return -1, err
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		ctx, cancel := getContextWithCancel()
 | 
			
		||||
		defer cancel()
 | 
			
		||||
 | 
			
		||||
		disk, rerr := c.cloud.DisksClient.Get(ctx, resourceGroup, diskName)
 | 
			
		||||
		if rerr != nil {
 | 
			
		||||
			return -1, rerr.Error()
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		if disk.ManagedBy != nil && (disk.MaxShares == nil || *disk.MaxShares <= 1) {
 | 
			
		||||
			attachErr := fmt.Sprintf(
 | 
			
		||||
				"disk(%s) already attached to node(%s), could not be attached to node(%s)",
 | 
			
		||||
				diskURI, *disk.ManagedBy, nodeName)
 | 
			
		||||
			attachedNode, err := vmset.GetNodeNameByProviderID(*disk.ManagedBy)
 | 
			
		||||
			if err != nil {
 | 
			
		||||
				return -1, err
 | 
			
		||||
			}
 | 
			
		||||
			klog.V(2).Infof("found dangling volume %s attached to node %s", diskURI, attachedNode)
 | 
			
		||||
			danglingErr := volerr.NewDanglingError(attachErr, attachedNode, "")
 | 
			
		||||
			return -1, danglingErr
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		if disk.DiskProperties != nil {
 | 
			
		||||
			if disk.DiskProperties.DiskSizeGB != nil && *disk.DiskProperties.DiskSizeGB >= diskCachingLimit && cachingMode != compute.CachingTypesNone {
 | 
			
		||||
				// Disk Caching is not supported for disks 4 TiB and larger
 | 
			
		||||
				// https://docs.microsoft.com/en-us/azure/virtual-machines/premium-storage-performance#disk-caching
 | 
			
		||||
				cachingMode = compute.CachingTypesNone
 | 
			
		||||
				klog.Warningf("size of disk(%s) is %dGB which is bigger than limit(%dGB), set cacheMode as None",
 | 
			
		||||
					diskURI, *disk.DiskProperties.DiskSizeGB, diskCachingLimit)
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			if disk.DiskProperties.Encryption != nil &&
 | 
			
		||||
				disk.DiskProperties.Encryption.DiskEncryptionSetID != nil {
 | 
			
		||||
				diskEncryptionSetID = *disk.DiskProperties.Encryption.DiskEncryptionSetID
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		if v, ok := disk.Tags[WriteAcceleratorEnabled]; ok {
 | 
			
		||||
			if v != nil && strings.EqualFold(*v, "true") {
 | 
			
		||||
				writeAcceleratorEnabled = true
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	instanceid, err := c.cloud.InstanceID(context.TODO(), nodeName)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		klog.Warningf("failed to get azure instance id (%v) for node %s", err, nodeName)
 | 
			
		||||
		return -1, fmt.Errorf("failed to get azure instance id for node %q (%v)", nodeName, err)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	c.vmLockMap.LockEntry(strings.ToLower(string(nodeName)))
 | 
			
		||||
	defer c.vmLockMap.UnlockEntry(strings.ToLower(string(nodeName)))
 | 
			
		||||
 | 
			
		||||
	lun, err := c.GetNextDiskLun(nodeName)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		klog.Warningf("no LUN available for instance %q (%v)", nodeName, err)
 | 
			
		||||
		return -1, fmt.Errorf("all LUNs are used, cannot attach volume (%s, %s) to instance %q (%v)", diskName, diskURI, instanceid, err)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	klog.V(2).Infof("Trying to attach volume %q lun %d to node %q.", diskURI, lun, nodeName)
 | 
			
		||||
	c.diskAttachDetachMap.Store(strings.ToLower(diskURI), "attaching")
 | 
			
		||||
	defer c.diskAttachDetachMap.Delete(strings.ToLower(diskURI))
 | 
			
		||||
	return lun, vmset.AttachDisk(isManagedDisk, diskName, diskURI, nodeName, lun, cachingMode, diskEncryptionSetID, writeAcceleratorEnabled)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// DetachDisk detaches a disk from host. The vhd can be identified by diskName or diskURI.
 | 
			
		||||
func (c *controllerCommon) DetachDisk(diskName, diskURI string, nodeName types.NodeName) error {
 | 
			
		||||
	_, err := c.cloud.InstanceID(context.TODO(), nodeName)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		if err == cloudprovider.InstanceNotFound {
 | 
			
		||||
			// if host doesn't exist, no need to detach
 | 
			
		||||
			klog.Warningf("azureDisk - failed to get azure instance id(%q), DetachDisk(%s) will assume disk is already detached",
 | 
			
		||||
				nodeName, diskURI)
 | 
			
		||||
			return nil
 | 
			
		||||
		}
 | 
			
		||||
		klog.Warningf("failed to get azure instance id (%v)", err)
 | 
			
		||||
		return fmt.Errorf("failed to get azure instance id for node %q (%v)", nodeName, err)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	vmset, err := c.getNodeVMSet(nodeName, azcache.CacheReadTypeUnsafe)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	klog.V(2).Infof("detach %v from node %q", diskURI, nodeName)
 | 
			
		||||
 | 
			
		||||
	// make the lock here as small as possible
 | 
			
		||||
	c.vmLockMap.LockEntry(strings.ToLower(string(nodeName)))
 | 
			
		||||
	c.diskAttachDetachMap.Store(strings.ToLower(diskURI), "detaching")
 | 
			
		||||
	err = vmset.DetachDisk(diskName, diskURI, nodeName)
 | 
			
		||||
	c.diskAttachDetachMap.Delete(strings.ToLower(diskURI))
 | 
			
		||||
	c.vmLockMap.UnlockEntry(strings.ToLower(string(nodeName)))
 | 
			
		||||
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		if isInstanceNotFoundError(err) {
 | 
			
		||||
			// if host doesn't exist, no need to detach
 | 
			
		||||
			klog.Warningf("azureDisk - got InstanceNotFoundError(%v), DetachDisk(%s) will assume disk is already detached",
 | 
			
		||||
				err, diskURI)
 | 
			
		||||
			return nil
 | 
			
		||||
		}
 | 
			
		||||
		if retry.IsErrorRetriable(err) && c.cloud.CloudProviderBackoff {
 | 
			
		||||
			klog.Warningf("azureDisk - update backing off: detach disk(%s, %s), err: %v", diskName, diskURI, err)
 | 
			
		||||
			retryErr := kwait.ExponentialBackoff(c.cloud.RequestBackoff(), func() (bool, error) {
 | 
			
		||||
				c.vmLockMap.LockEntry(strings.ToLower(string(nodeName)))
 | 
			
		||||
				c.diskAttachDetachMap.Store(strings.ToLower(diskURI), "detaching")
 | 
			
		||||
				err := vmset.DetachDisk(diskName, diskURI, nodeName)
 | 
			
		||||
				c.diskAttachDetachMap.Delete(strings.ToLower(diskURI))
 | 
			
		||||
				c.vmLockMap.UnlockEntry(strings.ToLower(string(nodeName)))
 | 
			
		||||
 | 
			
		||||
				retriable := false
 | 
			
		||||
				if err != nil && retry.IsErrorRetriable(err) {
 | 
			
		||||
					retriable = true
 | 
			
		||||
				}
 | 
			
		||||
				return !retriable, err
 | 
			
		||||
			})
 | 
			
		||||
			if retryErr != nil {
 | 
			
		||||
				err = retryErr
 | 
			
		||||
				klog.V(2).Infof("azureDisk - update abort backoff: detach disk(%s, %s), err: %v", diskName, diskURI, err)
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		klog.Errorf("azureDisk - detach disk(%s, %s) failed, err: %v", diskName, diskURI, err)
 | 
			
		||||
		return err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	klog.V(2).Infof("azureDisk - detach disk(%s, %s) succeeded", diskName, diskURI)
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// getNodeDataDisks invokes vmSet interfaces to get data disks for the node.
 | 
			
		||||
func (c *controllerCommon) getNodeDataDisks(nodeName types.NodeName, crt azcache.AzureCacheReadType) ([]compute.DataDisk, error) {
 | 
			
		||||
	vmset, err := c.getNodeVMSet(nodeName, crt)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return nil, err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return vmset.GetDataDisks(nodeName, crt)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// GetDiskLun finds the lun on the host that the vhd is attached to, given a vhd's diskName and diskURI.
 | 
			
		||||
func (c *controllerCommon) GetDiskLun(diskName, diskURI string, nodeName types.NodeName) (int32, error) {
 | 
			
		||||
	// getNodeDataDisks need to fetch the cached data/fresh data if cache expired here
 | 
			
		||||
	// to ensure we get LUN based on latest entry.
 | 
			
		||||
	disks, err := c.getNodeDataDisks(nodeName, azcache.CacheReadTypeDefault)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		klog.Errorf("error of getting data disks for node %q: %v", nodeName, err)
 | 
			
		||||
		return -1, err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	for _, disk := range disks {
 | 
			
		||||
		if disk.Lun != nil && (disk.Name != nil && diskName != "" && strings.EqualFold(*disk.Name, diskName)) ||
 | 
			
		||||
			(disk.Vhd != nil && disk.Vhd.URI != nil && diskURI != "" && strings.EqualFold(*disk.Vhd.URI, diskURI)) ||
 | 
			
		||||
			(disk.ManagedDisk != nil && strings.EqualFold(*disk.ManagedDisk.ID, diskURI)) {
 | 
			
		||||
			if disk.ToBeDetached != nil && *disk.ToBeDetached {
 | 
			
		||||
				klog.Warningf("azureDisk - find disk(ToBeDetached): lun %d name %q uri %q", *disk.Lun, diskName, diskURI)
 | 
			
		||||
			} else {
 | 
			
		||||
				// found the disk
 | 
			
		||||
				klog.V(2).Infof("azureDisk - find disk: lun %d name %q uri %q", *disk.Lun, diskName, diskURI)
 | 
			
		||||
				return *disk.Lun, nil
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	return -1, fmt.Errorf("cannot find Lun for disk %s", diskName)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// GetNextDiskLun searches all vhd attachment on the host and find unused lun. Return -1 if all luns are used.
 | 
			
		||||
func (c *controllerCommon) GetNextDiskLun(nodeName types.NodeName) (int32, error) {
 | 
			
		||||
	disks, err := c.getNodeDataDisks(nodeName, azcache.CacheReadTypeDefault)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		klog.Errorf("error of getting data disks for node %q: %v", nodeName, err)
 | 
			
		||||
		return -1, err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	used := make([]bool, maxLUN)
 | 
			
		||||
	for _, disk := range disks {
 | 
			
		||||
		if disk.Lun != nil {
 | 
			
		||||
			used[*disk.Lun] = true
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	for k, v := range used {
 | 
			
		||||
		if !v {
 | 
			
		||||
			return int32(k), nil
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	return -1, fmt.Errorf("all luns are used")
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// DisksAreAttached checks if a list of volumes are attached to the node with the specified NodeName.
 | 
			
		||||
func (c *controllerCommon) DisksAreAttached(diskNames []string, nodeName types.NodeName) (map[string]bool, error) {
 | 
			
		||||
	attached := make(map[string]bool)
 | 
			
		||||
	for _, diskName := range diskNames {
 | 
			
		||||
		attached[diskName] = false
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// doing stalled read for getNodeDataDisks to ensure we don't call ARM
 | 
			
		||||
	// for every reconcile call. The cache is invalidated after Attach/Detach
 | 
			
		||||
	// disk. So the new entry will be fetched and cached the first time reconcile
 | 
			
		||||
	// loop runs after the Attach/Disk OP which will reflect the latest model.
 | 
			
		||||
	disks, err := c.getNodeDataDisks(nodeName, azcache.CacheReadTypeUnsafe)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		if err == cloudprovider.InstanceNotFound {
 | 
			
		||||
			// if host doesn't exist, no need to detach
 | 
			
		||||
			klog.Warningf("azureDisk - Cannot find node %q, DisksAreAttached will assume disks %v are not attached to it.",
 | 
			
		||||
				nodeName, diskNames)
 | 
			
		||||
			return attached, nil
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		return attached, err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	for _, disk := range disks {
 | 
			
		||||
		for _, diskName := range diskNames {
 | 
			
		||||
			if disk.Name != nil && diskName != "" && strings.EqualFold(*disk.Name, diskName) {
 | 
			
		||||
				attached[diskName] = true
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return attached, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func filterDetachingDisks(unfilteredDisks []compute.DataDisk) []compute.DataDisk {
 | 
			
		||||
	filteredDisks := []compute.DataDisk{}
 | 
			
		||||
	for _, disk := range unfilteredDisks {
 | 
			
		||||
		if disk.ToBeDetached != nil && *disk.ToBeDetached {
 | 
			
		||||
			if disk.Name != nil {
 | 
			
		||||
				klog.V(2).Infof("Filtering disk: %s with ToBeDetached flag set.", *disk.Name)
 | 
			
		||||
			}
 | 
			
		||||
		} else {
 | 
			
		||||
			filteredDisks = append(filteredDisks, disk)
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	return filteredDisks
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (c *controllerCommon) filterNonExistingDisks(ctx context.Context, unfilteredDisks []compute.DataDisk) []compute.DataDisk {
 | 
			
		||||
	filteredDisks := []compute.DataDisk{}
 | 
			
		||||
	for _, disk := range unfilteredDisks {
 | 
			
		||||
		filter := false
 | 
			
		||||
		if disk.ManagedDisk != nil && disk.ManagedDisk.ID != nil {
 | 
			
		||||
			diskURI := *disk.ManagedDisk.ID
 | 
			
		||||
			exist, err := c.cloud.checkDiskExists(ctx, diskURI)
 | 
			
		||||
			if err != nil {
 | 
			
		||||
				klog.Errorf("checkDiskExists(%s) failed with error: %v", diskURI, err)
 | 
			
		||||
			} else {
 | 
			
		||||
				// only filter disk when checkDiskExists returns <false, nil>
 | 
			
		||||
				filter = !exist
 | 
			
		||||
				if filter {
 | 
			
		||||
					klog.Errorf("disk(%s) does not exist, removed from data disk list", diskURI)
 | 
			
		||||
				}
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		if !filter {
 | 
			
		||||
			filteredDisks = append(filteredDisks, disk)
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	return filteredDisks
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (c *controllerCommon) checkDiskExists(ctx context.Context, diskURI string) (bool, error) {
 | 
			
		||||
	diskName := path.Base(diskURI)
 | 
			
		||||
	resourceGroup, err := getResourceGroupFromDiskURI(diskURI)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return false, err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if _, rerr := c.cloud.DisksClient.Get(ctx, resourceGroup, diskName); rerr != nil {
 | 
			
		||||
		if rerr.HTTPStatusCode == http.StatusNotFound {
 | 
			
		||||
			return false, nil
 | 
			
		||||
		}
 | 
			
		||||
		return false, rerr.Error()
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return true, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func getValidCreationData(subscriptionID, resourceGroup, sourceResourceID, sourceType string) (compute.CreationData, error) {
 | 
			
		||||
	if sourceResourceID == "" {
 | 
			
		||||
		return compute.CreationData{
 | 
			
		||||
			CreateOption: compute.Empty,
 | 
			
		||||
		}, nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	switch sourceType {
 | 
			
		||||
	case sourceSnapshot:
 | 
			
		||||
		if match := diskSnapshotPathRE.FindString(sourceResourceID); match == "" {
 | 
			
		||||
			sourceResourceID = fmt.Sprintf(diskSnapshotPath, subscriptionID, resourceGroup, sourceResourceID)
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
	case sourceVolume:
 | 
			
		||||
		if match := managedDiskPathRE.FindString(sourceResourceID); match == "" {
 | 
			
		||||
			sourceResourceID = fmt.Sprintf(managedDiskPath, subscriptionID, resourceGroup, sourceResourceID)
 | 
			
		||||
		}
 | 
			
		||||
	default:
 | 
			
		||||
		return compute.CreationData{
 | 
			
		||||
			CreateOption: compute.Empty,
 | 
			
		||||
		}, nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	splits := strings.Split(sourceResourceID, "/")
 | 
			
		||||
	if len(splits) > 9 {
 | 
			
		||||
		if sourceType == sourceSnapshot {
 | 
			
		||||
			return compute.CreationData{}, fmt.Errorf("sourceResourceID(%s) is invalid, correct format: %s", sourceResourceID, diskSnapshotPathRE)
 | 
			
		||||
		}
 | 
			
		||||
		return compute.CreationData{}, fmt.Errorf("sourceResourceID(%s) is invalid, correct format: %s", sourceResourceID, managedDiskPathRE)
 | 
			
		||||
	}
 | 
			
		||||
	return compute.CreationData{
 | 
			
		||||
		CreateOption:     compute.Copy,
 | 
			
		||||
		SourceResourceID: &sourceResourceID,
 | 
			
		||||
	}, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func isInstanceNotFoundError(err error) bool {
 | 
			
		||||
	errMsg := strings.ToLower(err.Error())
 | 
			
		||||
	if strings.Contains(errMsg, strings.ToLower(vmssVMNotActiveErrorMessage)) {
 | 
			
		||||
		return true
 | 
			
		||||
	}
 | 
			
		||||
	return strings.Contains(errMsg, errStatusCode400) && strings.Contains(errMsg, errInvalidParameter) && strings.Contains(errMsg, errTargetInstanceIds)
 | 
			
		||||
}
 | 
			
		||||
@@ -1,824 +0,0 @@
 | 
			
		||||
//go:build !providerless
 | 
			
		||||
// +build !providerless
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
Copyright 2019 The Kubernetes Authors.
 | 
			
		||||
 | 
			
		||||
Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
you may not use this file except in compliance with the License.
 | 
			
		||||
You may obtain a copy of the License at
 | 
			
		||||
 | 
			
		||||
    http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
 | 
			
		||||
Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
See the License for the specific language governing permissions and
 | 
			
		||||
limitations under the License.
 | 
			
		||||
*/
 | 
			
		||||
 | 
			
		||||
package azure
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"net/http"
 | 
			
		||||
	"reflect"
 | 
			
		||||
	"testing"
 | 
			
		||||
 | 
			
		||||
	"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-12-01/compute"
 | 
			
		||||
	"github.com/golang/mock/gomock"
 | 
			
		||||
	"github.com/stretchr/testify/assert"
 | 
			
		||||
 | 
			
		||||
	"k8s.io/apimachinery/pkg/types"
 | 
			
		||||
	"k8s.io/apimachinery/pkg/util/wait"
 | 
			
		||||
	cloudprovider "k8s.io/cloud-provider"
 | 
			
		||||
	"k8s.io/legacy-cloud-providers/azure/clients/diskclient/mockdiskclient"
 | 
			
		||||
	"k8s.io/legacy-cloud-providers/azure/clients/vmclient/mockvmclient"
 | 
			
		||||
	"k8s.io/legacy-cloud-providers/azure/clients/vmssclient/mockvmssclient"
 | 
			
		||||
	"k8s.io/legacy-cloud-providers/azure/clients/vmssvmclient/mockvmssvmclient"
 | 
			
		||||
	"k8s.io/legacy-cloud-providers/azure/retry"
 | 
			
		||||
	"k8s.io/utils/pointer"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
func TestCommonAttachDisk(t *testing.T) {
 | 
			
		||||
	ctrl := gomock.NewController(t)
 | 
			
		||||
	defer ctrl.Finish()
 | 
			
		||||
 | 
			
		||||
	maxShare := int32(1)
 | 
			
		||||
	goodInstanceID := fmt.Sprintf("/subscriptions/subscription/resourceGroups/rg/providers/Microsoft.Compute/virtualMachines/%s", "vm1")
 | 
			
		||||
	diskEncryptionSetID := fmt.Sprintf("/subscriptions/subscription/resourceGroups/rg/providers/Microsoft.Compute/diskEncryptionSets/%s", "diskEncryptionSet-name")
 | 
			
		||||
	testTags := make(map[string]*string)
 | 
			
		||||
	testTags[WriteAcceleratorEnabled] = pointer.String("true")
 | 
			
		||||
	testCases := []struct {
 | 
			
		||||
		desc            string
 | 
			
		||||
		vmList          map[string]string
 | 
			
		||||
		nodeName        types.NodeName
 | 
			
		||||
		isDataDisksFull bool
 | 
			
		||||
		isBadDiskURI    bool
 | 
			
		||||
		isDiskUsed      bool
 | 
			
		||||
		existedDisk     compute.Disk
 | 
			
		||||
		expectedLun     int32
 | 
			
		||||
		expectedErr     bool
 | 
			
		||||
	}{
 | 
			
		||||
		{
 | 
			
		||||
			desc:        "LUN -1 and error shall be returned if there's no such instance corresponding to given nodeName",
 | 
			
		||||
			nodeName:    "vm1",
 | 
			
		||||
			existedDisk: compute.Disk{Name: pointer.String("disk-name")},
 | 
			
		||||
			expectedLun: -1,
 | 
			
		||||
			expectedErr: true,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			desc:            "LUN -1 and error shall be returned if there's no available LUN for instance",
 | 
			
		||||
			vmList:          map[string]string{"vm1": "PowerState/Running"},
 | 
			
		||||
			nodeName:        "vm1",
 | 
			
		||||
			isDataDisksFull: true,
 | 
			
		||||
			existedDisk:     compute.Disk{Name: pointer.String("disk-name")},
 | 
			
		||||
			expectedLun:     -1,
 | 
			
		||||
			expectedErr:     true,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			desc:     "correct LUN and no error shall be returned if everything is good",
 | 
			
		||||
			vmList:   map[string]string{"vm1": "PowerState/Running"},
 | 
			
		||||
			nodeName: "vm1",
 | 
			
		||||
			existedDisk: compute.Disk{Name: pointer.String("disk-name"),
 | 
			
		||||
				DiskProperties: &compute.DiskProperties{
 | 
			
		||||
					Encryption: &compute.Encryption{DiskEncryptionSetID: &diskEncryptionSetID, Type: compute.EncryptionAtRestWithCustomerKey},
 | 
			
		||||
					DiskSizeGB: pointer.Int32(4096),
 | 
			
		||||
				},
 | 
			
		||||
				Tags: testTags},
 | 
			
		||||
			expectedLun: 1,
 | 
			
		||||
			expectedErr: false,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			desc:         "an error shall be returned if there's invalid disk uri",
 | 
			
		||||
			vmList:       map[string]string{"vm1": "PowerState/Running"},
 | 
			
		||||
			nodeName:     "vm1",
 | 
			
		||||
			isBadDiskURI: true,
 | 
			
		||||
			existedDisk:  compute.Disk{Name: pointer.String("disk-name")},
 | 
			
		||||
			expectedLun:  -1,
 | 
			
		||||
			expectedErr:  true,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			desc:        "an error shall be returned if attach an already attached disk with good ManagedBy instance id",
 | 
			
		||||
			vmList:      map[string]string{"vm1": "PowerState/Running"},
 | 
			
		||||
			nodeName:    "vm1",
 | 
			
		||||
			existedDisk: compute.Disk{Name: pointer.String("disk-name"), ManagedBy: pointer.String(goodInstanceID), DiskProperties: &compute.DiskProperties{MaxShares: &maxShare}},
 | 
			
		||||
			expectedLun: -1,
 | 
			
		||||
			expectedErr: true,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			desc:        "an error shall be returned if attach an already attached disk with bad ManagedBy instance id",
 | 
			
		||||
			vmList:      map[string]string{"vm1": "PowerState/Running"},
 | 
			
		||||
			nodeName:    "vm1",
 | 
			
		||||
			existedDisk: compute.Disk{Name: pointer.String("disk-name"), ManagedBy: pointer.String("test"), DiskProperties: &compute.DiskProperties{MaxShares: &maxShare}},
 | 
			
		||||
			expectedLun: -1,
 | 
			
		||||
			expectedErr: true,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			desc:        "an error shall be returned if there's no matching disk",
 | 
			
		||||
			vmList:      map[string]string{"vm1": "PowerState/Running"},
 | 
			
		||||
			nodeName:    "vm1",
 | 
			
		||||
			existedDisk: compute.Disk{Name: pointer.String("disk-name-1")},
 | 
			
		||||
			expectedLun: -1,
 | 
			
		||||
			expectedErr: true,
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	for i, test := range testCases {
 | 
			
		||||
		testCloud := GetTestCloud(ctrl)
 | 
			
		||||
		common := &controllerCommon{
 | 
			
		||||
			location:              testCloud.Location,
 | 
			
		||||
			storageEndpointSuffix: testCloud.Environment.StorageEndpointSuffix,
 | 
			
		||||
			resourceGroup:         testCloud.ResourceGroup,
 | 
			
		||||
			subscriptionID:        testCloud.SubscriptionID,
 | 
			
		||||
			cloud:                 testCloud,
 | 
			
		||||
			vmLockMap:             newLockMap(),
 | 
			
		||||
		}
 | 
			
		||||
		diskURI := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Compute/disks/%s",
 | 
			
		||||
			testCloud.SubscriptionID, testCloud.ResourceGroup, *test.existedDisk.Name)
 | 
			
		||||
		if test.isBadDiskURI {
 | 
			
		||||
			diskURI = fmt.Sprintf("/baduri/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Compute/disks/%s",
 | 
			
		||||
				testCloud.SubscriptionID, testCloud.ResourceGroup, *test.existedDisk.Name)
 | 
			
		||||
		}
 | 
			
		||||
		expectedVMs := setTestVirtualMachines(testCloud, test.vmList, test.isDataDisksFull)
 | 
			
		||||
		if test.isDiskUsed {
 | 
			
		||||
			vm0 := setTestVirtualMachines(testCloud, map[string]string{"vm0": "PowerState/Running"}, test.isDataDisksFull)[0]
 | 
			
		||||
			expectedVMs = append(expectedVMs, vm0)
 | 
			
		||||
		}
 | 
			
		||||
		mockVMsClient := testCloud.VirtualMachinesClient.(*mockvmclient.MockInterface)
 | 
			
		||||
		for _, vm := range expectedVMs {
 | 
			
		||||
			mockVMsClient.EXPECT().Get(gomock.Any(), testCloud.ResourceGroup, *vm.Name, gomock.Any()).Return(vm, nil).AnyTimes()
 | 
			
		||||
		}
 | 
			
		||||
		if len(expectedVMs) == 0 {
 | 
			
		||||
			mockVMsClient.EXPECT().Get(gomock.Any(), testCloud.ResourceGroup, gomock.Any(), gomock.Any()).Return(compute.VirtualMachine{}, &retry.Error{HTTPStatusCode: http.StatusNotFound, RawError: cloudprovider.InstanceNotFound}).AnyTimes()
 | 
			
		||||
		}
 | 
			
		||||
		mockVMsClient.EXPECT().Update(gomock.Any(), testCloud.ResourceGroup, gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).AnyTimes()
 | 
			
		||||
 | 
			
		||||
		mockDisksClient := testCloud.DisksClient.(*mockdiskclient.MockInterface)
 | 
			
		||||
		mockDisksClient.EXPECT().Get(gomock.Any(), testCloud.ResourceGroup, "disk-name").Return(test.existedDisk, nil).AnyTimes()
 | 
			
		||||
		mockDisksClient.EXPECT().Get(gomock.Any(), testCloud.ResourceGroup, gomock.Not("disk-name")).Return(compute.Disk{}, &retry.Error{HTTPStatusCode: http.StatusNotFound, RawError: cloudprovider.InstanceNotFound}).AnyTimes()
 | 
			
		||||
 | 
			
		||||
		lun, err := common.AttachDisk(true, "", diskURI, test.nodeName, compute.CachingTypesReadOnly)
 | 
			
		||||
		assert.Equal(t, test.expectedLun, lun, "TestCase[%d]: %s", i, test.desc)
 | 
			
		||||
		assert.Equal(t, test.expectedErr, err != nil, "TestCase[%d]: %s, return error: %v", i, test.desc, err)
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestCommonAttachDiskWithVMSS(t *testing.T) {
 | 
			
		||||
	ctrl := gomock.NewController(t)
 | 
			
		||||
	defer ctrl.Finish()
 | 
			
		||||
 | 
			
		||||
	testCases := []struct {
 | 
			
		||||
		desc            string
 | 
			
		||||
		vmList          map[string]string
 | 
			
		||||
		vmssList        []string
 | 
			
		||||
		nodeName        types.NodeName
 | 
			
		||||
		isVMSS          bool
 | 
			
		||||
		isManagedBy     bool
 | 
			
		||||
		isManagedDisk   bool
 | 
			
		||||
		isDataDisksFull bool
 | 
			
		||||
		existedDisk     compute.Disk
 | 
			
		||||
		expectedLun     int32
 | 
			
		||||
		expectedErr     bool
 | 
			
		||||
	}{
 | 
			
		||||
		{
 | 
			
		||||
			desc:          "an error shall be returned if convert vmSet to scaleSet failed",
 | 
			
		||||
			vmList:        map[string]string{"vm1": "PowerState/Running"},
 | 
			
		||||
			nodeName:      "vm1",
 | 
			
		||||
			isVMSS:        false,
 | 
			
		||||
			isManagedBy:   false,
 | 
			
		||||
			isManagedDisk: false,
 | 
			
		||||
			existedDisk:   compute.Disk{Name: pointer.String("disk-name")},
 | 
			
		||||
			expectedLun:   -1,
 | 
			
		||||
			expectedErr:   true,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			desc:          "an error shall be returned if convert vmSet to scaleSet success but node is not managed by availability set",
 | 
			
		||||
			vmssList:      []string{"vmss-vm-000001"},
 | 
			
		||||
			nodeName:      "vmss1",
 | 
			
		||||
			isVMSS:        true,
 | 
			
		||||
			isManagedBy:   false,
 | 
			
		||||
			isManagedDisk: false,
 | 
			
		||||
			existedDisk:   compute.Disk{Name: pointer.String("disk-name")},
 | 
			
		||||
			expectedLun:   -1,
 | 
			
		||||
			expectedErr:   true,
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	for i, test := range testCases {
 | 
			
		||||
		testCloud := GetTestCloud(ctrl)
 | 
			
		||||
		testCloud.VMType = vmTypeVMSS
 | 
			
		||||
		if test.isVMSS {
 | 
			
		||||
			if test.isManagedBy {
 | 
			
		||||
				testCloud.DisableAvailabilitySetNodes = false
 | 
			
		||||
				testVMSSName := "vmss"
 | 
			
		||||
				expectedVMSS := compute.VirtualMachineScaleSet{Name: pointer.String(testVMSSName)}
 | 
			
		||||
				mockVMSSClient := testCloud.VirtualMachineScaleSetsClient.(*mockvmssclient.MockInterface)
 | 
			
		||||
				mockVMSSClient.EXPECT().List(gomock.Any(), testCloud.ResourceGroup).Return([]compute.VirtualMachineScaleSet{expectedVMSS}, nil).AnyTimes()
 | 
			
		||||
 | 
			
		||||
				expectedVMSSVMs, _, _ := buildTestVirtualMachineEnv(testCloud, testVMSSName, "", 0, test.vmssList, "", false)
 | 
			
		||||
				mockVMSSVMClient := testCloud.VirtualMachineScaleSetVMsClient.(*mockvmssvmclient.MockInterface)
 | 
			
		||||
				mockVMSSVMClient.EXPECT().List(gomock.Any(), testCloud.ResourceGroup, testVMSSName, gomock.Any()).Return(expectedVMSSVMs, nil).AnyTimes()
 | 
			
		||||
 | 
			
		||||
				mockVMsClient := testCloud.VirtualMachinesClient.(*mockvmclient.MockInterface)
 | 
			
		||||
				mockVMsClient.EXPECT().List(gomock.Any(), gomock.Any()).Return([]compute.VirtualMachine{}, nil).AnyTimes()
 | 
			
		||||
			} else {
 | 
			
		||||
				testCloud.DisableAvailabilitySetNodes = true
 | 
			
		||||
			}
 | 
			
		||||
			ss, err := newScaleSet(testCloud)
 | 
			
		||||
			assert.NoError(t, err)
 | 
			
		||||
			testCloud.VMSet = ss
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		common := &controllerCommon{
 | 
			
		||||
			location:              testCloud.Location,
 | 
			
		||||
			storageEndpointSuffix: testCloud.Environment.StorageEndpointSuffix,
 | 
			
		||||
			resourceGroup:         testCloud.ResourceGroup,
 | 
			
		||||
			subscriptionID:        testCloud.SubscriptionID,
 | 
			
		||||
			cloud:                 testCloud,
 | 
			
		||||
			vmLockMap:             newLockMap(),
 | 
			
		||||
		}
 | 
			
		||||
		diskURI := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Compute/disks/%s",
 | 
			
		||||
			testCloud.SubscriptionID, testCloud.ResourceGroup, *test.existedDisk.Name)
 | 
			
		||||
		if !test.isVMSS {
 | 
			
		||||
			expectedVMs := setTestVirtualMachines(testCloud, test.vmList, test.isDataDisksFull)
 | 
			
		||||
			mockVMsClient := testCloud.VirtualMachinesClient.(*mockvmclient.MockInterface)
 | 
			
		||||
			for _, vm := range expectedVMs {
 | 
			
		||||
				mockVMsClient.EXPECT().Get(gomock.Any(), testCloud.ResourceGroup, *vm.Name, gomock.Any()).Return(vm, nil).AnyTimes()
 | 
			
		||||
			}
 | 
			
		||||
			if len(expectedVMs) == 0 {
 | 
			
		||||
				mockVMsClient.EXPECT().Get(gomock.Any(), testCloud.ResourceGroup, gomock.Any(), gomock.Any()).Return(compute.VirtualMachine{}, &retry.Error{HTTPStatusCode: http.StatusNotFound, RawError: cloudprovider.InstanceNotFound}).AnyTimes()
 | 
			
		||||
			}
 | 
			
		||||
			mockVMsClient.EXPECT().Update(gomock.Any(), testCloud.ResourceGroup, gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).AnyTimes()
 | 
			
		||||
 | 
			
		||||
			mockDisksClient := testCloud.DisksClient.(*mockdiskclient.MockInterface)
 | 
			
		||||
			mockDisksClient.EXPECT().Get(gomock.Any(), testCloud.ResourceGroup, "disk-name").Return(test.existedDisk, nil).AnyTimes()
 | 
			
		||||
			mockDisksClient.EXPECT().Get(gomock.Any(), testCloud.ResourceGroup, gomock.Not("disk-name")).Return(compute.Disk{}, &retry.Error{HTTPStatusCode: http.StatusNotFound, RawError: cloudprovider.InstanceNotFound}).AnyTimes()
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		lun, err := common.AttachDisk(test.isManagedDisk, "test", diskURI, test.nodeName, compute.CachingTypesReadOnly)
 | 
			
		||||
		assert.Equal(t, test.expectedLun, lun, "TestCase[%d]: %s", i, test.desc)
 | 
			
		||||
		assert.Equal(t, test.expectedErr, err != nil, "TestCase[%d]: %s, return error: %v", i, test.desc, err)
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestCommonDetachDisk(t *testing.T) {
 | 
			
		||||
	ctrl := gomock.NewController(t)
 | 
			
		||||
	defer ctrl.Finish()
 | 
			
		||||
 | 
			
		||||
	testCases := []struct {
 | 
			
		||||
		desc             string
 | 
			
		||||
		vmList           map[string]string
 | 
			
		||||
		nodeName         types.NodeName
 | 
			
		||||
		diskName         string
 | 
			
		||||
		isErrorRetriable bool
 | 
			
		||||
		expectedErr      bool
 | 
			
		||||
	}{
 | 
			
		||||
		{
 | 
			
		||||
			desc:        "error should not be returned if there's no such instance corresponding to given nodeName",
 | 
			
		||||
			nodeName:    "vm1",
 | 
			
		||||
			expectedErr: false,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			desc:             "an error should be returned if vmset detach failed with isErrorRetriable error",
 | 
			
		||||
			vmList:           map[string]string{"vm1": "PowerState/Running"},
 | 
			
		||||
			nodeName:         "vm1",
 | 
			
		||||
			isErrorRetriable: true,
 | 
			
		||||
			expectedErr:      true,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			desc:        "no error shall be returned if there's no matching disk according to given diskName",
 | 
			
		||||
			vmList:      map[string]string{"vm1": "PowerState/Running"},
 | 
			
		||||
			nodeName:    "vm1",
 | 
			
		||||
			diskName:    "disk2",
 | 
			
		||||
			expectedErr: false,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			desc:        "no error shall be returned if the disk exists",
 | 
			
		||||
			vmList:      map[string]string{"vm1": "PowerState/Running"},
 | 
			
		||||
			nodeName:    "vm1",
 | 
			
		||||
			diskName:    "disk1",
 | 
			
		||||
			expectedErr: false,
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	for i, test := range testCases {
 | 
			
		||||
		testCloud := GetTestCloud(ctrl)
 | 
			
		||||
		common := &controllerCommon{
 | 
			
		||||
			location:              testCloud.Location,
 | 
			
		||||
			storageEndpointSuffix: testCloud.Environment.StorageEndpointSuffix,
 | 
			
		||||
			resourceGroup:         testCloud.ResourceGroup,
 | 
			
		||||
			subscriptionID:        testCloud.SubscriptionID,
 | 
			
		||||
			cloud:                 testCloud,
 | 
			
		||||
			vmLockMap:             newLockMap(),
 | 
			
		||||
		}
 | 
			
		||||
		diskURI := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Compute/disks/disk-name",
 | 
			
		||||
			testCloud.SubscriptionID, testCloud.ResourceGroup)
 | 
			
		||||
		expectedVMs := setTestVirtualMachines(testCloud, test.vmList, false)
 | 
			
		||||
		mockVMsClient := testCloud.VirtualMachinesClient.(*mockvmclient.MockInterface)
 | 
			
		||||
		for _, vm := range expectedVMs {
 | 
			
		||||
			mockVMsClient.EXPECT().Get(gomock.Any(), testCloud.ResourceGroup, *vm.Name, gomock.Any()).Return(vm, nil).AnyTimes()
 | 
			
		||||
		}
 | 
			
		||||
		if len(expectedVMs) == 0 {
 | 
			
		||||
			mockVMsClient.EXPECT().Get(gomock.Any(), testCloud.ResourceGroup, gomock.Any(), gomock.Any()).Return(compute.VirtualMachine{}, &retry.Error{HTTPStatusCode: http.StatusNotFound, RawError: cloudprovider.InstanceNotFound}).AnyTimes()
 | 
			
		||||
		}
 | 
			
		||||
		if test.isErrorRetriable {
 | 
			
		||||
			testCloud.CloudProviderBackoff = true
 | 
			
		||||
			testCloud.ResourceRequestBackoff = wait.Backoff{Steps: 1}
 | 
			
		||||
			mockVMsClient.EXPECT().Update(gomock.Any(), testCloud.ResourceGroup, gomock.Any(), gomock.Any(), gomock.Any()).Return(&retry.Error{HTTPStatusCode: http.StatusBadRequest, Retriable: true, RawError: fmt.Errorf("Retriable: true")}).AnyTimes()
 | 
			
		||||
		} else {
 | 
			
		||||
			mockVMsClient.EXPECT().Update(gomock.Any(), testCloud.ResourceGroup, gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).AnyTimes()
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		err := common.DetachDisk(test.diskName, diskURI, test.nodeName)
 | 
			
		||||
		assert.Equal(t, test.expectedErr, err != nil, "TestCase[%d]: %s, err: %v", i, test.desc, err)
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestGetDiskLun(t *testing.T) {
 | 
			
		||||
	ctrl := gomock.NewController(t)
 | 
			
		||||
	defer ctrl.Finish()
 | 
			
		||||
 | 
			
		||||
	testCases := []struct {
 | 
			
		||||
		desc        string
 | 
			
		||||
		diskName    string
 | 
			
		||||
		diskURI     string
 | 
			
		||||
		expectedLun int32
 | 
			
		||||
		expectedErr bool
 | 
			
		||||
	}{
 | 
			
		||||
		{
 | 
			
		||||
			desc:        "LUN -1 and error shall be returned if diskName != disk.Name or diskURI != disk.Vhd.URI",
 | 
			
		||||
			diskName:    "disk2",
 | 
			
		||||
			expectedLun: -1,
 | 
			
		||||
			expectedErr: true,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			desc:        "correct LUN and no error shall be returned if diskName = disk.Name",
 | 
			
		||||
			diskName:    "disk1",
 | 
			
		||||
			expectedLun: 0,
 | 
			
		||||
			expectedErr: false,
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	for i, test := range testCases {
 | 
			
		||||
		testCloud := GetTestCloud(ctrl)
 | 
			
		||||
		common := &controllerCommon{
 | 
			
		||||
			location:              testCloud.Location,
 | 
			
		||||
			storageEndpointSuffix: testCloud.Environment.StorageEndpointSuffix,
 | 
			
		||||
			resourceGroup:         testCloud.ResourceGroup,
 | 
			
		||||
			subscriptionID:        testCloud.SubscriptionID,
 | 
			
		||||
			cloud:                 testCloud,
 | 
			
		||||
			vmLockMap:             newLockMap(),
 | 
			
		||||
		}
 | 
			
		||||
		expectedVMs := setTestVirtualMachines(testCloud, map[string]string{"vm1": "PowerState/Running"}, false)
 | 
			
		||||
		mockVMsClient := testCloud.VirtualMachinesClient.(*mockvmclient.MockInterface)
 | 
			
		||||
		for _, vm := range expectedVMs {
 | 
			
		||||
			mockVMsClient.EXPECT().Get(gomock.Any(), testCloud.ResourceGroup, *vm.Name, gomock.Any()).Return(vm, nil).AnyTimes()
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		lun, err := common.GetDiskLun(test.diskName, test.diskURI, "vm1")
 | 
			
		||||
		assert.Equal(t, test.expectedLun, lun, "TestCase[%d]: %s", i, test.desc)
 | 
			
		||||
		assert.Equal(t, test.expectedErr, err != nil, "TestCase[%d]: %s", i, test.desc)
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestGetNextDiskLun(t *testing.T) {
 | 
			
		||||
	ctrl := gomock.NewController(t)
 | 
			
		||||
	defer ctrl.Finish()
 | 
			
		||||
 | 
			
		||||
	testCases := []struct {
 | 
			
		||||
		desc            string
 | 
			
		||||
		isDataDisksFull bool
 | 
			
		||||
		expectedLun     int32
 | 
			
		||||
		expectedErr     bool
 | 
			
		||||
	}{
 | 
			
		||||
		{
 | 
			
		||||
			desc:            "the minimal LUN shall be returned if there's enough room for extra disks",
 | 
			
		||||
			isDataDisksFull: false,
 | 
			
		||||
			expectedLun:     1,
 | 
			
		||||
			expectedErr:     false,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			desc:            "LUN -1 and  error shall be returned if there's no available LUN",
 | 
			
		||||
			isDataDisksFull: true,
 | 
			
		||||
			expectedLun:     -1,
 | 
			
		||||
			expectedErr:     true,
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	for i, test := range testCases {
 | 
			
		||||
		testCloud := GetTestCloud(ctrl)
 | 
			
		||||
		common := &controllerCommon{
 | 
			
		||||
			location:              testCloud.Location,
 | 
			
		||||
			storageEndpointSuffix: testCloud.Environment.StorageEndpointSuffix,
 | 
			
		||||
			resourceGroup:         testCloud.ResourceGroup,
 | 
			
		||||
			subscriptionID:        testCloud.SubscriptionID,
 | 
			
		||||
			cloud:                 testCloud,
 | 
			
		||||
			vmLockMap:             newLockMap(),
 | 
			
		||||
		}
 | 
			
		||||
		expectedVMs := setTestVirtualMachines(testCloud, map[string]string{"vm1": "PowerState/Running"}, test.isDataDisksFull)
 | 
			
		||||
		mockVMsClient := testCloud.VirtualMachinesClient.(*mockvmclient.MockInterface)
 | 
			
		||||
		for _, vm := range expectedVMs {
 | 
			
		||||
			mockVMsClient.EXPECT().Get(gomock.Any(), testCloud.ResourceGroup, *vm.Name, gomock.Any()).Return(vm, nil).AnyTimes()
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		lun, err := common.GetNextDiskLun("vm1")
 | 
			
		||||
		assert.Equal(t, test.expectedLun, lun, "TestCase[%d]: %s", i, test.desc)
 | 
			
		||||
		assert.Equal(t, test.expectedErr, err != nil, "TestCase[%d]: %s", i, test.desc)
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestDisksAreAttached(t *testing.T) {
 | 
			
		||||
	ctrl := gomock.NewController(t)
 | 
			
		||||
	defer ctrl.Finish()
 | 
			
		||||
 | 
			
		||||
	testCases := []struct {
 | 
			
		||||
		desc             string
 | 
			
		||||
		diskNames        []string
 | 
			
		||||
		nodeName         types.NodeName
 | 
			
		||||
		expectedAttached map[string]bool
 | 
			
		||||
		expectedErr      bool
 | 
			
		||||
	}{
 | 
			
		||||
		{
 | 
			
		||||
			desc:             "an error shall be returned if there's no such instance corresponding to given nodeName",
 | 
			
		||||
			diskNames:        []string{"disk1"},
 | 
			
		||||
			nodeName:         "vm2",
 | 
			
		||||
			expectedAttached: map[string]bool{"disk1": false},
 | 
			
		||||
			expectedErr:      false,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			desc:             "proper attach map shall be returned if everything is good",
 | 
			
		||||
			diskNames:        []string{"disk1", "disk2"},
 | 
			
		||||
			nodeName:         "vm1",
 | 
			
		||||
			expectedAttached: map[string]bool{"disk1": true, "disk2": false},
 | 
			
		||||
			expectedErr:      false,
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	for i, test := range testCases {
 | 
			
		||||
		testCloud := GetTestCloud(ctrl)
 | 
			
		||||
		common := &controllerCommon{
 | 
			
		||||
			location:              testCloud.Location,
 | 
			
		||||
			storageEndpointSuffix: testCloud.Environment.StorageEndpointSuffix,
 | 
			
		||||
			resourceGroup:         testCloud.ResourceGroup,
 | 
			
		||||
			subscriptionID:        testCloud.SubscriptionID,
 | 
			
		||||
			cloud:                 testCloud,
 | 
			
		||||
			vmLockMap:             newLockMap(),
 | 
			
		||||
		}
 | 
			
		||||
		expectedVMs := setTestVirtualMachines(testCloud, map[string]string{"vm1": "PowerState/Running"}, false)
 | 
			
		||||
		mockVMsClient := testCloud.VirtualMachinesClient.(*mockvmclient.MockInterface)
 | 
			
		||||
		for _, vm := range expectedVMs {
 | 
			
		||||
			mockVMsClient.EXPECT().Get(gomock.Any(), testCloud.ResourceGroup, *vm.Name, gomock.Any()).Return(vm, nil).AnyTimes()
 | 
			
		||||
		}
 | 
			
		||||
		mockVMsClient.EXPECT().Get(gomock.Any(), testCloud.ResourceGroup, "vm2", gomock.Any()).Return(compute.VirtualMachine{}, &retry.Error{HTTPStatusCode: http.StatusNotFound, RawError: cloudprovider.InstanceNotFound}).AnyTimes()
 | 
			
		||||
 | 
			
		||||
		attached, err := common.DisksAreAttached(test.diskNames, test.nodeName)
 | 
			
		||||
		assert.Equal(t, test.expectedAttached, attached, "TestCase[%d]: %s", i, test.desc)
 | 
			
		||||
		assert.Equal(t, test.expectedErr, err != nil, "TestCase[%d]: %s", i, test.desc)
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestFilteredDetachingDisks(t *testing.T) {
 | 
			
		||||
 | 
			
		||||
	disks := []compute.DataDisk{
 | 
			
		||||
		{
 | 
			
		||||
			Name:         pointer.StringPtr("DiskName1"),
 | 
			
		||||
			ToBeDetached: pointer.BoolPtr(false),
 | 
			
		||||
			ManagedDisk: &compute.ManagedDiskParameters{
 | 
			
		||||
				ID: pointer.StringPtr("ManagedID"),
 | 
			
		||||
			},
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			Name:         pointer.StringPtr("DiskName2"),
 | 
			
		||||
			ToBeDetached: pointer.BoolPtr(true),
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			Name:         pointer.StringPtr("DiskName3"),
 | 
			
		||||
			ToBeDetached: nil,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			Name:         pointer.StringPtr("DiskName4"),
 | 
			
		||||
			ToBeDetached: nil,
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	filteredDisks := filterDetachingDisks(disks)
 | 
			
		||||
	assert.Equal(t, 3, len(filteredDisks))
 | 
			
		||||
	assert.Equal(t, "DiskName1", *filteredDisks[0].Name)
 | 
			
		||||
	assert.Equal(t, "ManagedID", *filteredDisks[0].ManagedDisk.ID)
 | 
			
		||||
	assert.Equal(t, "DiskName3", *filteredDisks[1].Name)
 | 
			
		||||
 | 
			
		||||
	disks = []compute.DataDisk{}
 | 
			
		||||
	filteredDisks = filterDetachingDisks(disks)
 | 
			
		||||
	assert.Equal(t, 0, len(filteredDisks))
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestGetValidCreationData(t *testing.T) {
 | 
			
		||||
	sourceResourceSnapshotID := "/subscriptions/xxx/resourceGroups/xxx/providers/Microsoft.Compute/snapshots/xxx"
 | 
			
		||||
	sourceResourceVolumeID := "/subscriptions/xxx/resourceGroups/xxx/providers/Microsoft.Compute/disks/xxx"
 | 
			
		||||
 | 
			
		||||
	tests := []struct {
 | 
			
		||||
		subscriptionID   string
 | 
			
		||||
		resourceGroup    string
 | 
			
		||||
		sourceResourceID string
 | 
			
		||||
		sourceType       string
 | 
			
		||||
		expected1        compute.CreationData
 | 
			
		||||
		expected2        error
 | 
			
		||||
	}{
 | 
			
		||||
		{
 | 
			
		||||
			subscriptionID:   "",
 | 
			
		||||
			resourceGroup:    "",
 | 
			
		||||
			sourceResourceID: "",
 | 
			
		||||
			sourceType:       "",
 | 
			
		||||
			expected1: compute.CreationData{
 | 
			
		||||
				CreateOption: compute.Empty,
 | 
			
		||||
			},
 | 
			
		||||
			expected2: nil,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			subscriptionID:   "",
 | 
			
		||||
			resourceGroup:    "",
 | 
			
		||||
			sourceResourceID: "/subscriptions/xxx/resourceGroups/xxx/providers/Microsoft.Compute/snapshots/xxx",
 | 
			
		||||
			sourceType:       sourceSnapshot,
 | 
			
		||||
			expected1: compute.CreationData{
 | 
			
		||||
				CreateOption:     compute.Copy,
 | 
			
		||||
				SourceResourceID: &sourceResourceSnapshotID,
 | 
			
		||||
			},
 | 
			
		||||
			expected2: nil,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			subscriptionID:   "xxx",
 | 
			
		||||
			resourceGroup:    "xxx",
 | 
			
		||||
			sourceResourceID: "xxx",
 | 
			
		||||
			sourceType:       sourceSnapshot,
 | 
			
		||||
			expected1: compute.CreationData{
 | 
			
		||||
				CreateOption:     compute.Copy,
 | 
			
		||||
				SourceResourceID: &sourceResourceSnapshotID,
 | 
			
		||||
			},
 | 
			
		||||
			expected2: nil,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			subscriptionID:   "",
 | 
			
		||||
			resourceGroup:    "",
 | 
			
		||||
			sourceResourceID: "/subscriptions/23/providers/Microsoft.Compute/disks/name",
 | 
			
		||||
			sourceType:       sourceSnapshot,
 | 
			
		||||
			expected1:        compute.CreationData{},
 | 
			
		||||
			expected2:        fmt.Errorf("sourceResourceID(%s) is invalid, correct format: %s", "/subscriptions//resourceGroups//providers/Microsoft.Compute/snapshots//subscriptions/23/providers/Microsoft.Compute/disks/name", diskSnapshotPathRE),
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			subscriptionID:   "",
 | 
			
		||||
			resourceGroup:    "",
 | 
			
		||||
			sourceResourceID: "http://test.com/vhds/name",
 | 
			
		||||
			sourceType:       sourceSnapshot,
 | 
			
		||||
			expected1:        compute.CreationData{},
 | 
			
		||||
			expected2:        fmt.Errorf("sourceResourceID(%s) is invalid, correct format: %s", "/subscriptions//resourceGroups//providers/Microsoft.Compute/snapshots/http://test.com/vhds/name", diskSnapshotPathRE),
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			subscriptionID:   "",
 | 
			
		||||
			resourceGroup:    "",
 | 
			
		||||
			sourceResourceID: "/subscriptions/xxx/snapshots/xxx",
 | 
			
		||||
			sourceType:       sourceSnapshot,
 | 
			
		||||
			expected1:        compute.CreationData{},
 | 
			
		||||
			expected2:        fmt.Errorf("sourceResourceID(%s) is invalid, correct format: %s", "/subscriptions//resourceGroups//providers/Microsoft.Compute/snapshots//subscriptions/xxx/snapshots/xxx", diskSnapshotPathRE),
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			subscriptionID:   "",
 | 
			
		||||
			resourceGroup:    "",
 | 
			
		||||
			sourceResourceID: "/subscriptions/xxx/resourceGroups/xxx/providers/Microsoft.Compute/snapshots/xxx/snapshots/xxx/snapshots/xxx",
 | 
			
		||||
			sourceType:       sourceSnapshot,
 | 
			
		||||
			expected1:        compute.CreationData{},
 | 
			
		||||
			expected2:        fmt.Errorf("sourceResourceID(%s) is invalid, correct format: %s", "/subscriptions/xxx/resourceGroups/xxx/providers/Microsoft.Compute/snapshots/xxx/snapshots/xxx/snapshots/xxx", diskSnapshotPathRE),
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			subscriptionID:   "",
 | 
			
		||||
			resourceGroup:    "",
 | 
			
		||||
			sourceResourceID: "xxx",
 | 
			
		||||
			sourceType:       "",
 | 
			
		||||
			expected1: compute.CreationData{
 | 
			
		||||
				CreateOption: compute.Empty,
 | 
			
		||||
			},
 | 
			
		||||
			expected2: nil,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			subscriptionID:   "",
 | 
			
		||||
			resourceGroup:    "",
 | 
			
		||||
			sourceResourceID: "/subscriptions/xxx/resourceGroups/xxx/providers/Microsoft.Compute/disks/xxx",
 | 
			
		||||
			sourceType:       sourceVolume,
 | 
			
		||||
			expected1: compute.CreationData{
 | 
			
		||||
				CreateOption:     compute.Copy,
 | 
			
		||||
				SourceResourceID: &sourceResourceVolumeID,
 | 
			
		||||
			},
 | 
			
		||||
			expected2: nil,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			subscriptionID:   "xxx",
 | 
			
		||||
			resourceGroup:    "xxx",
 | 
			
		||||
			sourceResourceID: "xxx",
 | 
			
		||||
			sourceType:       sourceVolume,
 | 
			
		||||
			expected1: compute.CreationData{
 | 
			
		||||
				CreateOption:     compute.Copy,
 | 
			
		||||
				SourceResourceID: &sourceResourceVolumeID,
 | 
			
		||||
			},
 | 
			
		||||
			expected2: nil,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			subscriptionID:   "",
 | 
			
		||||
			resourceGroup:    "",
 | 
			
		||||
			sourceResourceID: "/subscriptions/xxx/resourceGroups/xxx/providers/Microsoft.Compute/snapshots/xxx",
 | 
			
		||||
			sourceType:       sourceVolume,
 | 
			
		||||
			expected1:        compute.CreationData{},
 | 
			
		||||
			expected2:        fmt.Errorf("sourceResourceID(%s) is invalid, correct format: %s", "/subscriptions//resourceGroups//providers/Microsoft.Compute/disks//subscriptions/xxx/resourceGroups/xxx/providers/Microsoft.Compute/snapshots/xxx", managedDiskPathRE),
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	for _, test := range tests {
 | 
			
		||||
		result, err := getValidCreationData(test.subscriptionID, test.resourceGroup, test.sourceResourceID, test.sourceType)
 | 
			
		||||
		if !reflect.DeepEqual(result, test.expected1) || !reflect.DeepEqual(err, test.expected2) {
 | 
			
		||||
			t.Errorf("input sourceResourceID: %v, sourceType: %v, getValidCreationData result: %v, expected1 : %v, err: %v, expected2: %v", test.sourceResourceID, test.sourceType, result, test.expected1, err, test.expected2)
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestCheckDiskExists(t *testing.T) {
 | 
			
		||||
	ctrl := gomock.NewController(t)
 | 
			
		||||
	defer ctrl.Finish()
 | 
			
		||||
 | 
			
		||||
	ctx, cancel := getContextWithCancel()
 | 
			
		||||
	defer cancel()
 | 
			
		||||
 | 
			
		||||
	testCloud := GetTestCloud(ctrl)
 | 
			
		||||
	common := &controllerCommon{
 | 
			
		||||
		location:              testCloud.Location,
 | 
			
		||||
		storageEndpointSuffix: testCloud.Environment.StorageEndpointSuffix,
 | 
			
		||||
		resourceGroup:         testCloud.ResourceGroup,
 | 
			
		||||
		subscriptionID:        testCloud.SubscriptionID,
 | 
			
		||||
		cloud:                 testCloud,
 | 
			
		||||
		vmLockMap:             newLockMap(),
 | 
			
		||||
	}
 | 
			
		||||
	// create a new disk before running test
 | 
			
		||||
	newDiskName := "newdisk"
 | 
			
		||||
	newDiskURI := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Compute/disks/%s",
 | 
			
		||||
		testCloud.SubscriptionID, testCloud.ResourceGroup, newDiskName)
 | 
			
		||||
 | 
			
		||||
	mockDisksClient := testCloud.DisksClient.(*mockdiskclient.MockInterface)
 | 
			
		||||
	mockDisksClient.EXPECT().Get(gomock.Any(), testCloud.ResourceGroup, newDiskName).Return(compute.Disk{}, nil).AnyTimes()
 | 
			
		||||
	mockDisksClient.EXPECT().Get(gomock.Any(), gomock.Not(testCloud.ResourceGroup), gomock.Any()).Return(compute.Disk{}, &retry.Error{HTTPStatusCode: http.StatusNotFound, RawError: cloudprovider.InstanceNotFound}).AnyTimes()
 | 
			
		||||
 | 
			
		||||
	testCases := []struct {
 | 
			
		||||
		diskURI        string
 | 
			
		||||
		expectedResult bool
 | 
			
		||||
		expectedErr    bool
 | 
			
		||||
	}{
 | 
			
		||||
		{
 | 
			
		||||
			diskURI:        "incorrect disk URI format",
 | 
			
		||||
			expectedResult: false,
 | 
			
		||||
			expectedErr:    true,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			diskURI:        "/subscriptions/xxx/resourceGroups/xxx/providers/Microsoft.Compute/disks/non-existing-disk",
 | 
			
		||||
			expectedResult: false,
 | 
			
		||||
			expectedErr:    false,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			diskURI:        newDiskURI,
 | 
			
		||||
			expectedResult: true,
 | 
			
		||||
			expectedErr:    false,
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	for i, test := range testCases {
 | 
			
		||||
		exist, err := common.checkDiskExists(ctx, test.diskURI)
 | 
			
		||||
		assert.Equal(t, test.expectedResult, exist, "TestCase[%d]", i, exist)
 | 
			
		||||
		assert.Equal(t, test.expectedErr, err != nil, "TestCase[%d], return error: %v", i, err)
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestFilterNonExistingDisks(t *testing.T) {
 | 
			
		||||
	ctrl := gomock.NewController(t)
 | 
			
		||||
	defer ctrl.Finish()
 | 
			
		||||
 | 
			
		||||
	ctx, cancel := getContextWithCancel()
 | 
			
		||||
	defer cancel()
 | 
			
		||||
 | 
			
		||||
	testCloud := GetTestCloud(ctrl)
 | 
			
		||||
	common := &controllerCommon{
 | 
			
		||||
		location:              testCloud.Location,
 | 
			
		||||
		storageEndpointSuffix: testCloud.Environment.StorageEndpointSuffix,
 | 
			
		||||
		resourceGroup:         testCloud.ResourceGroup,
 | 
			
		||||
		subscriptionID:        testCloud.SubscriptionID,
 | 
			
		||||
		cloud:                 testCloud,
 | 
			
		||||
		vmLockMap:             newLockMap(),
 | 
			
		||||
	}
 | 
			
		||||
	// create a new disk before running test
 | 
			
		||||
	diskURIPrefix := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Compute/disks/",
 | 
			
		||||
		testCloud.SubscriptionID, testCloud.ResourceGroup)
 | 
			
		||||
	newDiskName := "newdisk"
 | 
			
		||||
	newDiskURI := diskURIPrefix + newDiskName
 | 
			
		||||
 | 
			
		||||
	mockDisksClient := testCloud.DisksClient.(*mockdiskclient.MockInterface)
 | 
			
		||||
	mockDisksClient.EXPECT().Get(gomock.Any(), testCloud.ResourceGroup, newDiskName).Return(compute.Disk{}, nil).AnyTimes()
 | 
			
		||||
	mockDisksClient.EXPECT().Get(gomock.Any(), testCloud.ResourceGroup, gomock.Not(newDiskName)).Return(compute.Disk{}, &retry.Error{HTTPStatusCode: http.StatusNotFound, RawError: cloudprovider.InstanceNotFound}).AnyTimes()
 | 
			
		||||
 | 
			
		||||
	disks := []compute.DataDisk{
 | 
			
		||||
		{
 | 
			
		||||
			Name: &newDiskName,
 | 
			
		||||
			ManagedDisk: &compute.ManagedDiskParameters{
 | 
			
		||||
				ID: &newDiskURI,
 | 
			
		||||
			},
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			Name: pointer.StringPtr("DiskName2"),
 | 
			
		||||
			ManagedDisk: &compute.ManagedDiskParameters{
 | 
			
		||||
				ID: pointer.StringPtr(diskURIPrefix + "DiskName2"),
 | 
			
		||||
			},
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			Name: pointer.StringPtr("DiskName3"),
 | 
			
		||||
			ManagedDisk: &compute.ManagedDiskParameters{
 | 
			
		||||
				ID: pointer.StringPtr(diskURIPrefix + "DiskName3"),
 | 
			
		||||
			},
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			Name: pointer.StringPtr("DiskName4"),
 | 
			
		||||
			ManagedDisk: &compute.ManagedDiskParameters{
 | 
			
		||||
				ID: pointer.StringPtr(diskURIPrefix + "DiskName4"),
 | 
			
		||||
			},
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	filteredDisks := common.filterNonExistingDisks(ctx, disks)
 | 
			
		||||
	assert.Equal(t, 1, len(filteredDisks))
 | 
			
		||||
	assert.Equal(t, newDiskName, *filteredDisks[0].Name)
 | 
			
		||||
 | 
			
		||||
	disks = []compute.DataDisk{}
 | 
			
		||||
	filteredDisks = filterDetachingDisks(disks)
 | 
			
		||||
	assert.Equal(t, 0, len(filteredDisks))
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestFilterNonExistingDisksWithSpecialHTTPStatusCode(t *testing.T) {
 | 
			
		||||
	ctrl := gomock.NewController(t)
 | 
			
		||||
	defer ctrl.Finish()
 | 
			
		||||
 | 
			
		||||
	ctx, cancel := getContextWithCancel()
 | 
			
		||||
	defer cancel()
 | 
			
		||||
 | 
			
		||||
	testCloud := GetTestCloud(ctrl)
 | 
			
		||||
	common := &controllerCommon{
 | 
			
		||||
		location:              testCloud.Location,
 | 
			
		||||
		storageEndpointSuffix: testCloud.Environment.StorageEndpointSuffix,
 | 
			
		||||
		resourceGroup:         testCloud.ResourceGroup,
 | 
			
		||||
		subscriptionID:        testCloud.SubscriptionID,
 | 
			
		||||
		cloud:                 testCloud,
 | 
			
		||||
		vmLockMap:             newLockMap(),
 | 
			
		||||
	}
 | 
			
		||||
	// create a new disk before running test
 | 
			
		||||
	diskURIPrefix := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Compute/disks/",
 | 
			
		||||
		testCloud.SubscriptionID, testCloud.ResourceGroup)
 | 
			
		||||
	newDiskName := "specialdisk"
 | 
			
		||||
	newDiskURI := diskURIPrefix + newDiskName
 | 
			
		||||
 | 
			
		||||
	mockDisksClient := testCloud.DisksClient.(*mockdiskclient.MockInterface)
 | 
			
		||||
	mockDisksClient.EXPECT().Get(gomock.Any(), testCloud.ResourceGroup, gomock.Eq(newDiskName)).Return(compute.Disk{}, &retry.Error{HTTPStatusCode: http.StatusBadRequest, RawError: cloudprovider.InstanceNotFound}).AnyTimes()
 | 
			
		||||
 | 
			
		||||
	disks := []compute.DataDisk{
 | 
			
		||||
		{
 | 
			
		||||
			Name: &newDiskName,
 | 
			
		||||
			ManagedDisk: &compute.ManagedDiskParameters{
 | 
			
		||||
				ID: &newDiskURI,
 | 
			
		||||
			},
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	filteredDisks := common.filterNonExistingDisks(ctx, disks)
 | 
			
		||||
	assert.Equal(t, 1, len(filteredDisks))
 | 
			
		||||
	assert.Equal(t, newDiskName, *filteredDisks[0].Name)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestIsInstanceNotFoundError(t *testing.T) {
 | 
			
		||||
	testCases := []struct {
 | 
			
		||||
		errMsg         string
 | 
			
		||||
		expectedResult bool
 | 
			
		||||
	}{
 | 
			
		||||
		{
 | 
			
		||||
			errMsg:         "",
 | 
			
		||||
			expectedResult: false,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			errMsg:         "other error",
 | 
			
		||||
			expectedResult: false,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			errMsg:         "The provided instanceId 857 is not an active Virtual Machine Scale Set VM instanceId.",
 | 
			
		||||
			expectedResult: true,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			errMsg:         `compute.VirtualMachineScaleSetVMsClient#Update: Failure sending request: StatusCode=400 -- Original Error: Code="InvalidParameter" Message="The provided instanceId 1181 is not an active Virtual Machine Scale Set VM instanceId." Target="instanceIds"`,
 | 
			
		||||
			expectedResult: true,
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	for i, test := range testCases {
 | 
			
		||||
		result := isInstanceNotFoundError(fmt.Errorf(test.errMsg))
 | 
			
		||||
		assert.Equal(t, test.expectedResult, result, "TestCase[%d]", i, result)
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
@@ -1,204 +0,0 @@
 | 
			
		||||
//go:build !providerless
 | 
			
		||||
// +build !providerless
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
Copyright 2018 The Kubernetes Authors.
 | 
			
		||||
 | 
			
		||||
Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
you may not use this file except in compliance with the License.
 | 
			
		||||
You may obtain a copy of the License at
 | 
			
		||||
 | 
			
		||||
    http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
 | 
			
		||||
Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
See the License for the specific language governing permissions and
 | 
			
		||||
limitations under the License.
 | 
			
		||||
*/
 | 
			
		||||
 | 
			
		||||
package azure
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"net/http"
 | 
			
		||||
	"strings"
 | 
			
		||||
 | 
			
		||||
	"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-12-01/compute"
 | 
			
		||||
 | 
			
		||||
	"k8s.io/apimachinery/pkg/types"
 | 
			
		||||
	"k8s.io/klog/v2"
 | 
			
		||||
	azcache "k8s.io/legacy-cloud-providers/azure/cache"
 | 
			
		||||
	"k8s.io/utils/pointer"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// AttachDisk attaches a vhd to vm
 | 
			
		||||
// the vhd must exist, can be identified by diskName, diskURI, and lun.
 | 
			
		||||
func (as *availabilitySet) AttachDisk(isManagedDisk bool, diskName, diskURI string, nodeName types.NodeName, lun int32, cachingMode compute.CachingTypes, diskEncryptionSetID string, writeAcceleratorEnabled bool) error {
 | 
			
		||||
	vm, err := as.getVirtualMachine(nodeName, azcache.CacheReadTypeDefault)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	vmName := mapNodeNameToVMName(nodeName)
 | 
			
		||||
	nodeResourceGroup, err := as.GetNodeResourceGroup(vmName)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	disks := make([]compute.DataDisk, len(*vm.StorageProfile.DataDisks))
 | 
			
		||||
	copy(disks, *vm.StorageProfile.DataDisks)
 | 
			
		||||
 | 
			
		||||
	if isManagedDisk {
 | 
			
		||||
		managedDisk := &compute.ManagedDiskParameters{ID: &diskURI}
 | 
			
		||||
		if diskEncryptionSetID == "" {
 | 
			
		||||
			if vm.StorageProfile.OsDisk != nil &&
 | 
			
		||||
				vm.StorageProfile.OsDisk.ManagedDisk != nil &&
 | 
			
		||||
				vm.StorageProfile.OsDisk.ManagedDisk.DiskEncryptionSet != nil &&
 | 
			
		||||
				vm.StorageProfile.OsDisk.ManagedDisk.DiskEncryptionSet.ID != nil {
 | 
			
		||||
				// set diskEncryptionSet as value of os disk by default
 | 
			
		||||
				diskEncryptionSetID = *vm.StorageProfile.OsDisk.ManagedDisk.DiskEncryptionSet.ID
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
		if diskEncryptionSetID != "" {
 | 
			
		||||
			managedDisk.DiskEncryptionSet = &compute.DiskEncryptionSetParameters{ID: &diskEncryptionSetID}
 | 
			
		||||
		}
 | 
			
		||||
		disks = append(disks,
 | 
			
		||||
			compute.DataDisk{
 | 
			
		||||
				Name:                    &diskName,
 | 
			
		||||
				Lun:                     &lun,
 | 
			
		||||
				Caching:                 cachingMode,
 | 
			
		||||
				CreateOption:            "attach",
 | 
			
		||||
				ManagedDisk:             managedDisk,
 | 
			
		||||
				WriteAcceleratorEnabled: pointer.Bool(writeAcceleratorEnabled),
 | 
			
		||||
			})
 | 
			
		||||
	} else {
 | 
			
		||||
		disks = append(disks,
 | 
			
		||||
			compute.DataDisk{
 | 
			
		||||
				Name: &diskName,
 | 
			
		||||
				Vhd: &compute.VirtualHardDisk{
 | 
			
		||||
					URI: &diskURI,
 | 
			
		||||
				},
 | 
			
		||||
				Lun:          &lun,
 | 
			
		||||
				Caching:      cachingMode,
 | 
			
		||||
				CreateOption: "attach",
 | 
			
		||||
			})
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	newVM := compute.VirtualMachineUpdate{
 | 
			
		||||
		VirtualMachineProperties: &compute.VirtualMachineProperties{
 | 
			
		||||
			StorageProfile: &compute.StorageProfile{
 | 
			
		||||
				DataDisks: &disks,
 | 
			
		||||
			},
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
	klog.V(2).Infof("azureDisk - update(%s): vm(%s) - attach disk(%s, %s) with DiskEncryptionSetID(%s)", nodeResourceGroup, vmName, diskName, diskURI, diskEncryptionSetID)
 | 
			
		||||
	ctx, cancel := getContextWithCancel()
 | 
			
		||||
	defer cancel()
 | 
			
		||||
 | 
			
		||||
	// Invalidate the cache right after updating
 | 
			
		||||
	defer as.cloud.vmCache.Delete(vmName)
 | 
			
		||||
 | 
			
		||||
	rerr := as.VirtualMachinesClient.Update(ctx, nodeResourceGroup, vmName, newVM, "attach_disk")
 | 
			
		||||
	if rerr != nil {
 | 
			
		||||
		klog.Errorf("azureDisk - attach disk(%s, %s) on rg(%s) vm(%s) failed, err: %v", diskName, diskURI, nodeResourceGroup, vmName, rerr)
 | 
			
		||||
		if rerr.HTTPStatusCode == http.StatusNotFound {
 | 
			
		||||
			klog.Errorf("azureDisk - begin to filterNonExistingDisks(%s, %s) on rg(%s) vm(%s)", diskName, diskURI, nodeResourceGroup, vmName)
 | 
			
		||||
			disks := as.filterNonExistingDisks(ctx, *newVM.VirtualMachineProperties.StorageProfile.DataDisks)
 | 
			
		||||
			newVM.VirtualMachineProperties.StorageProfile.DataDisks = &disks
 | 
			
		||||
			rerr = as.VirtualMachinesClient.Update(ctx, nodeResourceGroup, vmName, newVM, "attach_disk")
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	klog.V(2).Infof("azureDisk - update(%s): vm(%s) - attach disk(%s, %s) returned with %v", nodeResourceGroup, vmName, diskName, diskURI, rerr)
 | 
			
		||||
	if rerr != nil {
 | 
			
		||||
		return rerr.Error()
 | 
			
		||||
	}
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// DetachDisk detaches a disk from host
 | 
			
		||||
// the vhd can be identified by diskName or diskURI
 | 
			
		||||
func (as *availabilitySet) DetachDisk(diskName, diskURI string, nodeName types.NodeName) error {
 | 
			
		||||
	vm, err := as.getVirtualMachine(nodeName, azcache.CacheReadTypeDefault)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		// if host doesn't exist, no need to detach
 | 
			
		||||
		klog.Warningf("azureDisk - cannot find node %s, skip detaching disk(%s, %s)", nodeName, diskName, diskURI)
 | 
			
		||||
		return nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	vmName := mapNodeNameToVMName(nodeName)
 | 
			
		||||
	nodeResourceGroup, err := as.GetNodeResourceGroup(vmName)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	disks := make([]compute.DataDisk, len(*vm.StorageProfile.DataDisks))
 | 
			
		||||
	copy(disks, *vm.StorageProfile.DataDisks)
 | 
			
		||||
 | 
			
		||||
	bFoundDisk := false
 | 
			
		||||
	for i, disk := range disks {
 | 
			
		||||
		if disk.Lun != nil && (disk.Name != nil && diskName != "" && strings.EqualFold(*disk.Name, diskName)) ||
 | 
			
		||||
			(disk.Vhd != nil && disk.Vhd.URI != nil && diskURI != "" && strings.EqualFold(*disk.Vhd.URI, diskURI)) ||
 | 
			
		||||
			(disk.ManagedDisk != nil && diskURI != "" && strings.EqualFold(*disk.ManagedDisk.ID, diskURI)) {
 | 
			
		||||
			// found the disk
 | 
			
		||||
			klog.V(2).Infof("azureDisk - detach disk: name %q uri %q", diskName, diskURI)
 | 
			
		||||
			if strings.EqualFold(as.cloud.Environment.Name, AzureStackCloudName) {
 | 
			
		||||
				disks = append(disks[:i], disks[i+1:]...)
 | 
			
		||||
			} else {
 | 
			
		||||
				disks[i].ToBeDetached = pointer.Bool(true)
 | 
			
		||||
			}
 | 
			
		||||
			bFoundDisk = true
 | 
			
		||||
			break
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if !bFoundDisk {
 | 
			
		||||
		// only log here, next action is to update VM status with original meta data
 | 
			
		||||
		klog.Errorf("detach azure disk: disk %s not found, diskURI: %s", diskName, diskURI)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	newVM := compute.VirtualMachineUpdate{
 | 
			
		||||
		VirtualMachineProperties: &compute.VirtualMachineProperties{
 | 
			
		||||
			StorageProfile: &compute.StorageProfile{
 | 
			
		||||
				DataDisks: &disks,
 | 
			
		||||
			},
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
	klog.V(2).Infof("azureDisk - update(%s): vm(%s) - detach disk(%s, %s)", nodeResourceGroup, vmName, diskName, diskURI)
 | 
			
		||||
	ctx, cancel := getContextWithCancel()
 | 
			
		||||
	defer cancel()
 | 
			
		||||
 | 
			
		||||
	// Invalidate the cache right after updating
 | 
			
		||||
	defer as.cloud.vmCache.Delete(vmName)
 | 
			
		||||
 | 
			
		||||
	rerr := as.VirtualMachinesClient.Update(ctx, nodeResourceGroup, vmName, newVM, "detach_disk")
 | 
			
		||||
	if rerr != nil {
 | 
			
		||||
		klog.Errorf("azureDisk - detach disk(%s, %s) on rg(%s) vm(%s) failed, err: %v", diskName, diskURI, nodeResourceGroup, vmName, rerr)
 | 
			
		||||
		if rerr.HTTPStatusCode == http.StatusNotFound {
 | 
			
		||||
			klog.Errorf("azureDisk - begin to filterNonExistingDisks(%s, %s) on rg(%s) vm(%s)", diskName, diskURI, nodeResourceGroup, vmName)
 | 
			
		||||
			disks := as.filterNonExistingDisks(ctx, *vm.StorageProfile.DataDisks)
 | 
			
		||||
			newVM.VirtualMachineProperties.StorageProfile.DataDisks = &disks
 | 
			
		||||
			rerr = as.VirtualMachinesClient.Update(ctx, nodeResourceGroup, vmName, newVM, "detach_disk")
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	klog.V(2).Infof("azureDisk - update(%s): vm(%s) - detach disk(%s, %s) returned with %v", nodeResourceGroup, vmName, diskName, diskURI, rerr)
 | 
			
		||||
	if rerr != nil {
 | 
			
		||||
		return rerr.Error()
 | 
			
		||||
	}
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// GetDataDisks gets a list of data disks attached to the node.
 | 
			
		||||
func (as *availabilitySet) GetDataDisks(nodeName types.NodeName, crt azcache.AzureCacheReadType) ([]compute.DataDisk, error) {
 | 
			
		||||
	vm, err := as.getVirtualMachine(nodeName, crt)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return nil, err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if vm.StorageProfile.DataDisks == nil {
 | 
			
		||||
		return nil, nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return *vm.StorageProfile.DataDisks, nil
 | 
			
		||||
}
 | 
			
		||||
@@ -1,251 +0,0 @@
 | 
			
		||||
//go:build !providerless
 | 
			
		||||
// +build !providerless
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
Copyright 2019 The Kubernetes Authors.
 | 
			
		||||
 | 
			
		||||
Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
you may not use this file except in compliance with the License.
 | 
			
		||||
You may obtain a copy of the License at
 | 
			
		||||
 | 
			
		||||
    http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
 | 
			
		||||
Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
See the License for the specific language governing permissions and
 | 
			
		||||
limitations under the License.
 | 
			
		||||
*/
 | 
			
		||||
 | 
			
		||||
package azure
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"net/http"
 | 
			
		||||
	"testing"
 | 
			
		||||
	"time"
 | 
			
		||||
 | 
			
		||||
	"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-12-01/compute"
 | 
			
		||||
	"github.com/golang/mock/gomock"
 | 
			
		||||
	"github.com/stretchr/testify/assert"
 | 
			
		||||
 | 
			
		||||
	"k8s.io/apimachinery/pkg/types"
 | 
			
		||||
	cloudprovider "k8s.io/cloud-provider"
 | 
			
		||||
	azcache "k8s.io/legacy-cloud-providers/azure/cache"
 | 
			
		||||
	"k8s.io/legacy-cloud-providers/azure/clients/vmclient/mockvmclient"
 | 
			
		||||
	"k8s.io/legacy-cloud-providers/azure/retry"
 | 
			
		||||
	"k8s.io/utils/pointer"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
var (
 | 
			
		||||
	fakeCacheTTL = 2 * time.Second
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
func TestStandardAttachDisk(t *testing.T) {
 | 
			
		||||
	ctrl := gomock.NewController(t)
 | 
			
		||||
	defer ctrl.Finish()
 | 
			
		||||
 | 
			
		||||
	testCases := []struct {
 | 
			
		||||
		desc          string
 | 
			
		||||
		nodeName      types.NodeName
 | 
			
		||||
		isManagedDisk bool
 | 
			
		||||
		isAttachFail  bool
 | 
			
		||||
		expectedErr   bool
 | 
			
		||||
	}{
 | 
			
		||||
		{
 | 
			
		||||
			desc:          "an error shall be returned if there's no corresponding vms",
 | 
			
		||||
			nodeName:      "vm2",
 | 
			
		||||
			isManagedDisk: true,
 | 
			
		||||
			expectedErr:   true,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			desc:          "no error shall be returned if everything's good",
 | 
			
		||||
			nodeName:      "vm1",
 | 
			
		||||
			isManagedDisk: true,
 | 
			
		||||
			expectedErr:   false,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			desc:          "no error shall be returned if everything's good with non managed disk",
 | 
			
		||||
			nodeName:      "vm1",
 | 
			
		||||
			isManagedDisk: false,
 | 
			
		||||
			expectedErr:   false,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			desc:          "an error shall be returned if update attach disk failed",
 | 
			
		||||
			nodeName:      "vm1",
 | 
			
		||||
			isManagedDisk: true,
 | 
			
		||||
			isAttachFail:  true,
 | 
			
		||||
			expectedErr:   true,
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	for i, test := range testCases {
 | 
			
		||||
		testCloud := GetTestCloud(ctrl)
 | 
			
		||||
		vmSet := testCloud.VMSet
 | 
			
		||||
		expectedVMs := setTestVirtualMachines(testCloud, map[string]string{"vm1": "PowerState/Running"}, false)
 | 
			
		||||
		mockVMsClient := testCloud.VirtualMachinesClient.(*mockvmclient.MockInterface)
 | 
			
		||||
		for _, vm := range expectedVMs {
 | 
			
		||||
			vm.StorageProfile = &compute.StorageProfile{
 | 
			
		||||
				OsDisk: &compute.OSDisk{
 | 
			
		||||
					Name: pointer.String("osdisk1"),
 | 
			
		||||
					ManagedDisk: &compute.ManagedDiskParameters{
 | 
			
		||||
						ID: pointer.String("ManagedID"),
 | 
			
		||||
						DiskEncryptionSet: &compute.DiskEncryptionSetParameters{
 | 
			
		||||
							ID: pointer.String("DiskEncryptionSetID"),
 | 
			
		||||
						},
 | 
			
		||||
					},
 | 
			
		||||
				},
 | 
			
		||||
				DataDisks: &[]compute.DataDisk{},
 | 
			
		||||
			}
 | 
			
		||||
			mockVMsClient.EXPECT().Get(gomock.Any(), testCloud.ResourceGroup, *vm.Name, gomock.Any()).Return(vm, nil).AnyTimes()
 | 
			
		||||
		}
 | 
			
		||||
		mockVMsClient.EXPECT().Get(gomock.Any(), testCloud.ResourceGroup, "vm2", gomock.Any()).Return(compute.VirtualMachine{}, &retry.Error{HTTPStatusCode: http.StatusNotFound, RawError: cloudprovider.InstanceNotFound}).AnyTimes()
 | 
			
		||||
		if test.isAttachFail {
 | 
			
		||||
			mockVMsClient.EXPECT().Update(gomock.Any(), testCloud.ResourceGroup, gomock.Any(), gomock.Any(), gomock.Any()).Return(&retry.Error{HTTPStatusCode: http.StatusNotFound, RawError: cloudprovider.InstanceNotFound}).AnyTimes()
 | 
			
		||||
		} else {
 | 
			
		||||
			mockVMsClient.EXPECT().Update(gomock.Any(), testCloud.ResourceGroup, gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).AnyTimes()
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		err := vmSet.AttachDisk(test.isManagedDisk, "",
 | 
			
		||||
			"uri", test.nodeName, 0, compute.CachingTypesReadOnly, "", false)
 | 
			
		||||
		assert.Equal(t, test.expectedErr, err != nil, "TestCase[%d]: %s, err: %v", i, test.desc, err)
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestStandardDetachDisk(t *testing.T) {
 | 
			
		||||
	ctrl := gomock.NewController(t)
 | 
			
		||||
	defer ctrl.Finish()
 | 
			
		||||
 | 
			
		||||
	testCases := []struct {
 | 
			
		||||
		desc          string
 | 
			
		||||
		nodeName      types.NodeName
 | 
			
		||||
		diskName      string
 | 
			
		||||
		isDetachFail  bool
 | 
			
		||||
		expectedError bool
 | 
			
		||||
	}{
 | 
			
		||||
		{
 | 
			
		||||
			desc:          "no error shall be returned if there's no corresponding vm",
 | 
			
		||||
			nodeName:      "vm2",
 | 
			
		||||
			expectedError: false,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			desc:          "no error shall be returned if there's no corresponding disk",
 | 
			
		||||
			nodeName:      "vm1",
 | 
			
		||||
			diskName:      "disk2",
 | 
			
		||||
			expectedError: false,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			desc:          "no error shall be returned if there's a corresponding disk",
 | 
			
		||||
			nodeName:      "vm1",
 | 
			
		||||
			diskName:      "disk1",
 | 
			
		||||
			expectedError: false,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			desc:          "an error shall be returned if detach disk failed",
 | 
			
		||||
			nodeName:      "vm1",
 | 
			
		||||
			isDetachFail:  true,
 | 
			
		||||
			expectedError: true,
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	for i, test := range testCases {
 | 
			
		||||
		testCloud := GetTestCloud(ctrl)
 | 
			
		||||
		vmSet := testCloud.VMSet
 | 
			
		||||
		expectedVMs := setTestVirtualMachines(testCloud, map[string]string{"vm1": "PowerState/Running"}, false)
 | 
			
		||||
		mockVMsClient := testCloud.VirtualMachinesClient.(*mockvmclient.MockInterface)
 | 
			
		||||
		for _, vm := range expectedVMs {
 | 
			
		||||
			mockVMsClient.EXPECT().Get(gomock.Any(), testCloud.ResourceGroup, *vm.Name, gomock.Any()).Return(vm, nil).AnyTimes()
 | 
			
		||||
		}
 | 
			
		||||
		mockVMsClient.EXPECT().Get(gomock.Any(), testCloud.ResourceGroup, "vm2", gomock.Any()).Return(compute.VirtualMachine{}, &retry.Error{HTTPStatusCode: http.StatusNotFound, RawError: cloudprovider.InstanceNotFound}).AnyTimes()
 | 
			
		||||
		if test.isDetachFail {
 | 
			
		||||
			mockVMsClient.EXPECT().Update(gomock.Any(), testCloud.ResourceGroup, gomock.Any(), gomock.Any(), gomock.Any()).Return(&retry.Error{HTTPStatusCode: http.StatusNotFound, RawError: cloudprovider.InstanceNotFound}).AnyTimes()
 | 
			
		||||
		} else {
 | 
			
		||||
			mockVMsClient.EXPECT().Update(gomock.Any(), testCloud.ResourceGroup, gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).AnyTimes()
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		err := vmSet.DetachDisk(test.diskName, "", test.nodeName)
 | 
			
		||||
		assert.Equal(t, test.expectedError, err != nil, "TestCase[%d]: %s", i, test.desc)
 | 
			
		||||
		if !test.expectedError && test.diskName != "" {
 | 
			
		||||
			dataDisks, err := vmSet.GetDataDisks(test.nodeName, azcache.CacheReadTypeDefault)
 | 
			
		||||
			assert.Equal(t, true, len(dataDisks) == 1, "TestCase[%d]: %s, err: %v", i, test.desc, err)
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestGetDataDisks(t *testing.T) {
 | 
			
		||||
	ctrl := gomock.NewController(t)
 | 
			
		||||
	defer ctrl.Finish()
 | 
			
		||||
 | 
			
		||||
	var testCases = []struct {
 | 
			
		||||
		desc              string
 | 
			
		||||
		nodeName          types.NodeName
 | 
			
		||||
		isDataDiskNull    bool
 | 
			
		||||
		expectedDataDisks []compute.DataDisk
 | 
			
		||||
		expectedError     bool
 | 
			
		||||
		crt               azcache.AzureCacheReadType
 | 
			
		||||
	}{
 | 
			
		||||
		{
 | 
			
		||||
			desc:              "an error shall be returned if there's no corresponding vm",
 | 
			
		||||
			nodeName:          "vm2",
 | 
			
		||||
			expectedDataDisks: nil,
 | 
			
		||||
			expectedError:     true,
 | 
			
		||||
			crt:               azcache.CacheReadTypeDefault,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			desc:     "correct list of data disks shall be returned if everything is good",
 | 
			
		||||
			nodeName: "vm1",
 | 
			
		||||
			expectedDataDisks: []compute.DataDisk{
 | 
			
		||||
				{
 | 
			
		||||
					Lun:  pointer.Int32(0),
 | 
			
		||||
					Name: pointer.String("disk1"),
 | 
			
		||||
				},
 | 
			
		||||
			},
 | 
			
		||||
			expectedError: false,
 | 
			
		||||
			crt:           azcache.CacheReadTypeDefault,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			desc:     "correct list of data disks shall be returned if everything is good",
 | 
			
		||||
			nodeName: "vm1",
 | 
			
		||||
			expectedDataDisks: []compute.DataDisk{
 | 
			
		||||
				{
 | 
			
		||||
					Lun:  pointer.Int32(0),
 | 
			
		||||
					Name: pointer.String("disk1"),
 | 
			
		||||
				},
 | 
			
		||||
			},
 | 
			
		||||
			expectedError: false,
 | 
			
		||||
			crt:           azcache.CacheReadTypeUnsafe,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			desc:              "nil shall be returned if DataDisk is null",
 | 
			
		||||
			nodeName:          "vm1",
 | 
			
		||||
			isDataDiskNull:    true,
 | 
			
		||||
			expectedDataDisks: nil,
 | 
			
		||||
			expectedError:     false,
 | 
			
		||||
			crt:               azcache.CacheReadTypeDefault,
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
	for i, test := range testCases {
 | 
			
		||||
		testCloud := GetTestCloud(ctrl)
 | 
			
		||||
		vmSet := testCloud.VMSet
 | 
			
		||||
		expectedVMs := setTestVirtualMachines(testCloud, map[string]string{"vm1": "PowerState/Running"}, false)
 | 
			
		||||
		mockVMsClient := testCloud.VirtualMachinesClient.(*mockvmclient.MockInterface)
 | 
			
		||||
		for _, vm := range expectedVMs {
 | 
			
		||||
			if test.isDataDiskNull {
 | 
			
		||||
				vm.StorageProfile = &compute.StorageProfile{}
 | 
			
		||||
			}
 | 
			
		||||
			mockVMsClient.EXPECT().Get(gomock.Any(), testCloud.ResourceGroup, *vm.Name, gomock.Any()).Return(vm, nil).AnyTimes()
 | 
			
		||||
		}
 | 
			
		||||
		mockVMsClient.EXPECT().Get(gomock.Any(), testCloud.ResourceGroup, gomock.Not("vm1"), gomock.Any()).Return(compute.VirtualMachine{}, &retry.Error{HTTPStatusCode: http.StatusNotFound, RawError: cloudprovider.InstanceNotFound}).AnyTimes()
 | 
			
		||||
		mockVMsClient.EXPECT().Update(gomock.Any(), testCloud.ResourceGroup, gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).AnyTimes()
 | 
			
		||||
 | 
			
		||||
		dataDisks, err := vmSet.GetDataDisks(test.nodeName, test.crt)
 | 
			
		||||
		assert.Equal(t, test.expectedDataDisks, dataDisks, "TestCase[%d]: %s", i, test.desc)
 | 
			
		||||
		assert.Equal(t, test.expectedError, err != nil, "TestCase[%d]: %s", i, test.desc)
 | 
			
		||||
 | 
			
		||||
		if test.crt == azcache.CacheReadTypeUnsafe {
 | 
			
		||||
			time.Sleep(fakeCacheTTL)
 | 
			
		||||
			dataDisks, err := vmSet.GetDataDisks(test.nodeName, test.crt)
 | 
			
		||||
			assert.Equal(t, test.expectedDataDisks, dataDisks, "TestCase[%d]: %s", i, test.desc)
 | 
			
		||||
			assert.Equal(t, test.expectedError, err != nil, "TestCase[%d]: %s", i, test.desc)
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
@@ -1,207 +0,0 @@
 | 
			
		||||
//go:build !providerless
 | 
			
		||||
// +build !providerless
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
Copyright 2018 The Kubernetes Authors.
 | 
			
		||||
 | 
			
		||||
Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
you may not use this file except in compliance with the License.
 | 
			
		||||
You may obtain a copy of the License at
 | 
			
		||||
 | 
			
		||||
    http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
 | 
			
		||||
Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
See the License for the specific language governing permissions and
 | 
			
		||||
limitations under the License.
 | 
			
		||||
*/
 | 
			
		||||
 | 
			
		||||
package azure
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"net/http"
 | 
			
		||||
	"strings"
 | 
			
		||||
 | 
			
		||||
	"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-12-01/compute"
 | 
			
		||||
 | 
			
		||||
	"k8s.io/apimachinery/pkg/types"
 | 
			
		||||
	"k8s.io/klog/v2"
 | 
			
		||||
	azcache "k8s.io/legacy-cloud-providers/azure/cache"
 | 
			
		||||
	"k8s.io/utils/pointer"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// AttachDisk attaches a vhd to vm
 | 
			
		||||
// the vhd must exist, can be identified by diskName, diskURI, and lun.
 | 
			
		||||
func (ss *scaleSet) AttachDisk(isManagedDisk bool, diskName, diskURI string, nodeName types.NodeName, lun int32, cachingMode compute.CachingTypes, diskEncryptionSetID string, writeAcceleratorEnabled bool) error {
 | 
			
		||||
	vmName := mapNodeNameToVMName(nodeName)
 | 
			
		||||
	ssName, instanceID, vm, err := ss.getVmssVM(vmName, azcache.CacheReadTypeDefault)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	nodeResourceGroup, err := ss.GetNodeResourceGroup(vmName)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	disks := []compute.DataDisk{}
 | 
			
		||||
	if vm.StorageProfile != nil && vm.StorageProfile.DataDisks != nil {
 | 
			
		||||
		disks = make([]compute.DataDisk, len(*vm.StorageProfile.DataDisks))
 | 
			
		||||
		copy(disks, *vm.StorageProfile.DataDisks)
 | 
			
		||||
	}
 | 
			
		||||
	if isManagedDisk {
 | 
			
		||||
		managedDisk := &compute.ManagedDiskParameters{ID: &diskURI}
 | 
			
		||||
		if diskEncryptionSetID == "" {
 | 
			
		||||
			if vm.StorageProfile.OsDisk != nil &&
 | 
			
		||||
				vm.StorageProfile.OsDisk.ManagedDisk != nil &&
 | 
			
		||||
				vm.StorageProfile.OsDisk.ManagedDisk.DiskEncryptionSet != nil &&
 | 
			
		||||
				vm.StorageProfile.OsDisk.ManagedDisk.DiskEncryptionSet.ID != nil {
 | 
			
		||||
				// set diskEncryptionSet as value of os disk by default
 | 
			
		||||
				diskEncryptionSetID = *vm.StorageProfile.OsDisk.ManagedDisk.DiskEncryptionSet.ID
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
		if diskEncryptionSetID != "" {
 | 
			
		||||
			managedDisk.DiskEncryptionSet = &compute.DiskEncryptionSetParameters{ID: &diskEncryptionSetID}
 | 
			
		||||
		}
 | 
			
		||||
		disks = append(disks,
 | 
			
		||||
			compute.DataDisk{
 | 
			
		||||
				Name:                    &diskName,
 | 
			
		||||
				Lun:                     &lun,
 | 
			
		||||
				Caching:                 compute.CachingTypes(cachingMode),
 | 
			
		||||
				CreateOption:            "attach",
 | 
			
		||||
				ManagedDisk:             managedDisk,
 | 
			
		||||
				WriteAcceleratorEnabled: pointer.Bool(writeAcceleratorEnabled),
 | 
			
		||||
			})
 | 
			
		||||
	} else {
 | 
			
		||||
		disks = append(disks,
 | 
			
		||||
			compute.DataDisk{
 | 
			
		||||
				Name: &diskName,
 | 
			
		||||
				Vhd: &compute.VirtualHardDisk{
 | 
			
		||||
					URI: &diskURI,
 | 
			
		||||
				},
 | 
			
		||||
				Lun:          &lun,
 | 
			
		||||
				Caching:      compute.CachingTypes(cachingMode),
 | 
			
		||||
				CreateOption: "attach",
 | 
			
		||||
			})
 | 
			
		||||
	}
 | 
			
		||||
	newVM := compute.VirtualMachineScaleSetVM{
 | 
			
		||||
		VirtualMachineScaleSetVMProperties: &compute.VirtualMachineScaleSetVMProperties{
 | 
			
		||||
			StorageProfile: &compute.StorageProfile{
 | 
			
		||||
				DataDisks: &disks,
 | 
			
		||||
			},
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	ctx, cancel := getContextWithCancel()
 | 
			
		||||
	defer cancel()
 | 
			
		||||
 | 
			
		||||
	// Invalidate the cache right after updating
 | 
			
		||||
	defer ss.deleteCacheForNode(vmName)
 | 
			
		||||
 | 
			
		||||
	klog.V(2).Infof("azureDisk - update(%s): vm(%s) - attach disk(%s, %s) with DiskEncryptionSetID(%s)", nodeResourceGroup, nodeName, diskName, diskURI, diskEncryptionSetID)
 | 
			
		||||
	rerr := ss.VirtualMachineScaleSetVMsClient.Update(ctx, nodeResourceGroup, ssName, instanceID, newVM, "attach_disk")
 | 
			
		||||
	if rerr != nil {
 | 
			
		||||
		klog.Errorf("azureDisk - attach disk(%s, %s) on rg(%s) vm(%s) failed, err: %v", diskName, diskURI, nodeResourceGroup, nodeName, rerr)
 | 
			
		||||
		if rerr.HTTPStatusCode == http.StatusNotFound {
 | 
			
		||||
			klog.Errorf("azureDisk - begin to filterNonExistingDisks(%s, %s) on rg(%s) vm(%s)", diskName, diskURI, nodeResourceGroup, nodeName)
 | 
			
		||||
			disks := ss.filterNonExistingDisks(ctx, *newVM.VirtualMachineScaleSetVMProperties.StorageProfile.DataDisks)
 | 
			
		||||
			newVM.VirtualMachineScaleSetVMProperties.StorageProfile.DataDisks = &disks
 | 
			
		||||
			rerr = ss.VirtualMachineScaleSetVMsClient.Update(ctx, nodeResourceGroup, ssName, instanceID, newVM, "attach_disk")
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	klog.V(2).Infof("azureDisk - update(%s): vm(%s) - attach disk(%s, %s)  returned with %v", nodeResourceGroup, nodeName, diskName, diskURI, rerr)
 | 
			
		||||
	if rerr != nil {
 | 
			
		||||
		return rerr.Error()
 | 
			
		||||
	}
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// DetachDisk detaches a disk from host
 | 
			
		||||
// the vhd can be identified by diskName or diskURI
 | 
			
		||||
func (ss *scaleSet) DetachDisk(diskName, diskURI string, nodeName types.NodeName) error {
 | 
			
		||||
	vmName := mapNodeNameToVMName(nodeName)
 | 
			
		||||
	ssName, instanceID, vm, err := ss.getVmssVM(vmName, azcache.CacheReadTypeDefault)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	nodeResourceGroup, err := ss.GetNodeResourceGroup(vmName)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	disks := []compute.DataDisk{}
 | 
			
		||||
	if vm.StorageProfile != nil && vm.StorageProfile.DataDisks != nil {
 | 
			
		||||
		disks = make([]compute.DataDisk, len(*vm.StorageProfile.DataDisks))
 | 
			
		||||
		copy(disks, *vm.StorageProfile.DataDisks)
 | 
			
		||||
	}
 | 
			
		||||
	bFoundDisk := false
 | 
			
		||||
	for i, disk := range disks {
 | 
			
		||||
		if disk.Lun != nil && (disk.Name != nil && diskName != "" && strings.EqualFold(*disk.Name, diskName)) ||
 | 
			
		||||
			(disk.Vhd != nil && disk.Vhd.URI != nil && diskURI != "" && strings.EqualFold(*disk.Vhd.URI, diskURI)) ||
 | 
			
		||||
			(disk.ManagedDisk != nil && diskURI != "" && strings.EqualFold(*disk.ManagedDisk.ID, diskURI)) {
 | 
			
		||||
			// found the disk
 | 
			
		||||
			klog.V(2).Infof("azureDisk - detach disk: name %q uri %q", diskName, diskURI)
 | 
			
		||||
			if strings.EqualFold(ss.cloud.Environment.Name, AzureStackCloudName) {
 | 
			
		||||
				disks = append(disks[:i], disks[i+1:]...)
 | 
			
		||||
			} else {
 | 
			
		||||
				disks[i].ToBeDetached = pointer.Bool(true)
 | 
			
		||||
			}
 | 
			
		||||
			bFoundDisk = true
 | 
			
		||||
			break
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if !bFoundDisk {
 | 
			
		||||
		// only log here, next action is to update VM status with original meta data
 | 
			
		||||
		klog.Errorf("detach azure disk: disk %s not found, diskURI: %s", diskName, diskURI)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	newVM := compute.VirtualMachineScaleSetVM{
 | 
			
		||||
		VirtualMachineScaleSetVMProperties: &compute.VirtualMachineScaleSetVMProperties{
 | 
			
		||||
			StorageProfile: &compute.StorageProfile{
 | 
			
		||||
				DataDisks: &disks,
 | 
			
		||||
			},
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	ctx, cancel := getContextWithCancel()
 | 
			
		||||
	defer cancel()
 | 
			
		||||
 | 
			
		||||
	// Invalidate the cache right after updating
 | 
			
		||||
	defer ss.deleteCacheForNode(vmName)
 | 
			
		||||
 | 
			
		||||
	klog.V(2).Infof("azureDisk - update(%s): vm(%s) - detach disk(%s, %s)", nodeResourceGroup, nodeName, diskName, diskURI)
 | 
			
		||||
	rerr := ss.VirtualMachineScaleSetVMsClient.Update(ctx, nodeResourceGroup, ssName, instanceID, newVM, "detach_disk")
 | 
			
		||||
	if rerr != nil {
 | 
			
		||||
		klog.Errorf("azureDisk - detach disk(%s, %s) on rg(%s) vm(%s) failed, err: %v", diskName, diskURI, nodeResourceGroup, nodeName, rerr)
 | 
			
		||||
		if rerr.HTTPStatusCode == http.StatusNotFound {
 | 
			
		||||
			klog.Errorf("azureDisk - begin to filterNonExistingDisks(%s, %s) on rg(%s) vm(%s)", diskName, diskURI, nodeResourceGroup, nodeName)
 | 
			
		||||
			disks := ss.filterNonExistingDisks(ctx, *newVM.VirtualMachineScaleSetVMProperties.StorageProfile.DataDisks)
 | 
			
		||||
			newVM.VirtualMachineScaleSetVMProperties.StorageProfile.DataDisks = &disks
 | 
			
		||||
			rerr = ss.VirtualMachineScaleSetVMsClient.Update(ctx, nodeResourceGroup, ssName, instanceID, newVM, "detach_disk")
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	klog.V(2).Infof("azureDisk - update(%s): vm(%s) - detach disk(%s, %s) returned with %v", nodeResourceGroup, nodeName, diskName, diskURI, rerr)
 | 
			
		||||
	if rerr != nil {
 | 
			
		||||
		return rerr.Error()
 | 
			
		||||
	}
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// GetDataDisks gets a list of data disks attached to the node.
 | 
			
		||||
func (ss *scaleSet) GetDataDisks(nodeName types.NodeName, crt azcache.AzureCacheReadType) ([]compute.DataDisk, error) {
 | 
			
		||||
	_, _, vm, err := ss.getVmssVM(string(nodeName), crt)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return nil, err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if vm.StorageProfile == nil || vm.StorageProfile.DataDisks == nil {
 | 
			
		||||
		return nil, nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return *vm.StorageProfile.DataDisks, nil
 | 
			
		||||
}
 | 
			
		||||
@@ -1,326 +0,0 @@
 | 
			
		||||
//go:build !providerless
 | 
			
		||||
// +build !providerless
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
Copyright 2020 The Kubernetes Authors.
 | 
			
		||||
 | 
			
		||||
Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
you may not use this file except in compliance with the License.
 | 
			
		||||
You may obtain a copy of the License at
 | 
			
		||||
 | 
			
		||||
    http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
 | 
			
		||||
Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
See the License for the specific language governing permissions and
 | 
			
		||||
limitations under the License.
 | 
			
		||||
*/
 | 
			
		||||
 | 
			
		||||
package azure
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"net/http"
 | 
			
		||||
	"testing"
 | 
			
		||||
 | 
			
		||||
	"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-12-01/compute"
 | 
			
		||||
	"github.com/golang/mock/gomock"
 | 
			
		||||
	"github.com/stretchr/testify/assert"
 | 
			
		||||
 | 
			
		||||
	"k8s.io/apimachinery/pkg/types"
 | 
			
		||||
	cloudprovider "k8s.io/cloud-provider"
 | 
			
		||||
	azcache "k8s.io/legacy-cloud-providers/azure/cache"
 | 
			
		||||
	"k8s.io/legacy-cloud-providers/azure/clients/vmssclient/mockvmssclient"
 | 
			
		||||
	"k8s.io/legacy-cloud-providers/azure/clients/vmssvmclient/mockvmssvmclient"
 | 
			
		||||
	"k8s.io/legacy-cloud-providers/azure/retry"
 | 
			
		||||
	"k8s.io/utils/pointer"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
func TestAttachDiskWithVMSS(t *testing.T) {
 | 
			
		||||
	ctrl := gomock.NewController(t)
 | 
			
		||||
	defer ctrl.Finish()
 | 
			
		||||
 | 
			
		||||
	fakeStatusNotFoundVMSSName := types.NodeName("FakeStatusNotFoundVMSSName")
 | 
			
		||||
	testCases := []struct {
 | 
			
		||||
		desc           string
 | 
			
		||||
		vmList         map[string]string
 | 
			
		||||
		vmssVMList     []string
 | 
			
		||||
		vmssName       types.NodeName
 | 
			
		||||
		vmssvmName     types.NodeName
 | 
			
		||||
		isManagedDisk  bool
 | 
			
		||||
		existedDisk    compute.Disk
 | 
			
		||||
		expectedErr    bool
 | 
			
		||||
		expectedErrMsg error
 | 
			
		||||
	}{
 | 
			
		||||
		{
 | 
			
		||||
			desc:           "an error shall be returned if it is invalid vmss name",
 | 
			
		||||
			vmssVMList:     []string{"vmss-vm-000001"},
 | 
			
		||||
			vmssName:       "vm1",
 | 
			
		||||
			vmssvmName:     "vm1",
 | 
			
		||||
			isManagedDisk:  false,
 | 
			
		||||
			existedDisk:    compute.Disk{Name: pointer.String("disk-name")},
 | 
			
		||||
			expectedErr:    true,
 | 
			
		||||
			expectedErrMsg: fmt.Errorf("not a vmss instance"),
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			desc:          "no error shall be returned if everything is good with managed disk",
 | 
			
		||||
			vmssVMList:    []string{"vmss00-vm-000000", "vmss00-vm-000001", "vmss00-vm-000002"},
 | 
			
		||||
			vmssName:      "vmss00",
 | 
			
		||||
			vmssvmName:    "vmss00-vm-000000",
 | 
			
		||||
			isManagedDisk: true,
 | 
			
		||||
			existedDisk:   compute.Disk{Name: pointer.String("disk-name")},
 | 
			
		||||
			expectedErr:   false,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			desc:          "no error shall be returned if everything is good with non-managed disk",
 | 
			
		||||
			vmssVMList:    []string{"vmss00-vm-000000", "vmss00-vm-000001", "vmss00-vm-000002"},
 | 
			
		||||
			vmssName:      "vmss00",
 | 
			
		||||
			vmssvmName:    "vmss00-vm-000000",
 | 
			
		||||
			isManagedDisk: false,
 | 
			
		||||
			existedDisk:   compute.Disk{Name: pointer.String("disk-name")},
 | 
			
		||||
			expectedErr:   false,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			desc:           "an error shall be returned if response StatusNotFound",
 | 
			
		||||
			vmssVMList:     []string{"vmss00-vm-000000", "vmss00-vm-000001", "vmss00-vm-000002"},
 | 
			
		||||
			vmssName:       fakeStatusNotFoundVMSSName,
 | 
			
		||||
			vmssvmName:     "vmss00-vm-000000",
 | 
			
		||||
			isManagedDisk:  false,
 | 
			
		||||
			existedDisk:    compute.Disk{Name: pointer.String("disk-name")},
 | 
			
		||||
			expectedErr:    true,
 | 
			
		||||
			expectedErrMsg: fmt.Errorf("Retriable: false, RetryAfter: 0s, HTTPStatusCode: 404, RawError: %w", cloudprovider.InstanceNotFound),
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	for i, test := range testCases {
 | 
			
		||||
		scaleSetName := string(test.vmssName)
 | 
			
		||||
		ss, err := newTestScaleSet(ctrl)
 | 
			
		||||
		assert.NoError(t, err, test.desc)
 | 
			
		||||
		testCloud := ss.cloud
 | 
			
		||||
		testCloud.PrimaryScaleSetName = scaleSetName
 | 
			
		||||
		expectedVMSS := buildTestVMSSWithLB(scaleSetName, "vmss00-vm-", []string{testLBBackendpoolID0}, false)
 | 
			
		||||
		mockVMSSClient := testCloud.VirtualMachineScaleSetsClient.(*mockvmssclient.MockInterface)
 | 
			
		||||
		mockVMSSClient.EXPECT().List(gomock.Any(), testCloud.ResourceGroup).Return([]compute.VirtualMachineScaleSet{expectedVMSS}, nil).AnyTimes()
 | 
			
		||||
		mockVMSSClient.EXPECT().Get(gomock.Any(), testCloud.ResourceGroup, scaleSetName).Return(expectedVMSS, nil).MaxTimes(1)
 | 
			
		||||
		mockVMSSClient.EXPECT().CreateOrUpdate(gomock.Any(), testCloud.ResourceGroup, scaleSetName, gomock.Any()).Return(nil).MaxTimes(1)
 | 
			
		||||
 | 
			
		||||
		expectedVMSSVMs, _, _ := buildTestVirtualMachineEnv(testCloud, scaleSetName, "", 0, test.vmssVMList, "succeeded", false)
 | 
			
		||||
		for _, vmssvm := range expectedVMSSVMs {
 | 
			
		||||
			vmssvm.StorageProfile = &compute.StorageProfile{
 | 
			
		||||
				OsDisk: &compute.OSDisk{
 | 
			
		||||
					Name: pointer.String("osdisk1"),
 | 
			
		||||
					ManagedDisk: &compute.ManagedDiskParameters{
 | 
			
		||||
						ID: pointer.String("ManagedID"),
 | 
			
		||||
						DiskEncryptionSet: &compute.DiskEncryptionSetParameters{
 | 
			
		||||
							ID: pointer.String("DiskEncryptionSetID"),
 | 
			
		||||
						},
 | 
			
		||||
					},
 | 
			
		||||
				},
 | 
			
		||||
				DataDisks: &[]compute.DataDisk{},
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
		mockVMSSVMClient := testCloud.VirtualMachineScaleSetVMsClient.(*mockvmssvmclient.MockInterface)
 | 
			
		||||
		mockVMSSVMClient.EXPECT().List(gomock.Any(), testCloud.ResourceGroup, scaleSetName, gomock.Any()).Return(expectedVMSSVMs, nil).AnyTimes()
 | 
			
		||||
		if scaleSetName == string(fakeStatusNotFoundVMSSName) {
 | 
			
		||||
			mockVMSSVMClient.EXPECT().Update(gomock.Any(), testCloud.ResourceGroup, scaleSetName, gomock.Any(), gomock.Any(), gomock.Any()).Return(&retry.Error{HTTPStatusCode: http.StatusNotFound, RawError: cloudprovider.InstanceNotFound}).AnyTimes()
 | 
			
		||||
		} else {
 | 
			
		||||
			mockVMSSVMClient.EXPECT().Update(gomock.Any(), testCloud.ResourceGroup, scaleSetName, gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).AnyTimes()
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		diskURI := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Compute/disks/%s",
 | 
			
		||||
			testCloud.SubscriptionID, testCloud.ResourceGroup, *test.existedDisk.Name)
 | 
			
		||||
 | 
			
		||||
		err = ss.AttachDisk(test.isManagedDisk, "disk-name", diskURI, test.vmssvmName, 0, compute.CachingTypesReadWrite, "", true)
 | 
			
		||||
		assert.Equal(t, test.expectedErr, err != nil, "TestCase[%d]: %s, return error: %v", i, test.desc, err)
 | 
			
		||||
		assert.Equal(t, test.expectedErrMsg, err, "TestCase[%d]: %s, expected error: %v, return error: %v", i, test.desc, test.expectedErrMsg, err)
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestDetachDiskWithVMSS(t *testing.T) {
 | 
			
		||||
	ctrl := gomock.NewController(t)
 | 
			
		||||
	defer ctrl.Finish()
 | 
			
		||||
 | 
			
		||||
	fakeStatusNotFoundVMSSName := types.NodeName("FakeStatusNotFoundVMSSName")
 | 
			
		||||
	diskName := "disk-name"
 | 
			
		||||
	testCases := []struct {
 | 
			
		||||
		desc           string
 | 
			
		||||
		vmList         map[string]string
 | 
			
		||||
		vmssVMList     []string
 | 
			
		||||
		vmssName       types.NodeName
 | 
			
		||||
		vmssvmName     types.NodeName
 | 
			
		||||
		existedDisk    compute.Disk
 | 
			
		||||
		expectedErr    bool
 | 
			
		||||
		expectedErrMsg error
 | 
			
		||||
	}{
 | 
			
		||||
		{
 | 
			
		||||
			desc:           "an error shall be returned if it is invalid vmss name",
 | 
			
		||||
			vmssVMList:     []string{"vmss-vm-000001"},
 | 
			
		||||
			vmssName:       "vm1",
 | 
			
		||||
			vmssvmName:     "vm1",
 | 
			
		||||
			existedDisk:    compute.Disk{Name: pointer.String(diskName)},
 | 
			
		||||
			expectedErr:    true,
 | 
			
		||||
			expectedErrMsg: fmt.Errorf("not a vmss instance"),
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			desc:        "no error shall be returned if everything is good",
 | 
			
		||||
			vmssVMList:  []string{"vmss00-vm-000000", "vmss00-vm-000001", "vmss00-vm-000002"},
 | 
			
		||||
			vmssName:    "vmss00",
 | 
			
		||||
			vmssvmName:  "vmss00-vm-000000",
 | 
			
		||||
			existedDisk: compute.Disk{Name: pointer.String(diskName)},
 | 
			
		||||
			expectedErr: false,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			desc:           "an error shall be returned if response StatusNotFound",
 | 
			
		||||
			vmssVMList:     []string{"vmss00-vm-000000", "vmss00-vm-000001", "vmss00-vm-000002"},
 | 
			
		||||
			vmssName:       fakeStatusNotFoundVMSSName,
 | 
			
		||||
			vmssvmName:     "vmss00-vm-000000",
 | 
			
		||||
			existedDisk:    compute.Disk{Name: pointer.String(diskName)},
 | 
			
		||||
			expectedErr:    true,
 | 
			
		||||
			expectedErrMsg: fmt.Errorf("Retriable: false, RetryAfter: 0s, HTTPStatusCode: 404, RawError: %w", cloudprovider.InstanceNotFound),
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			desc:        "no error shall be returned if everything is good and the attaching disk does not match data disk",
 | 
			
		||||
			vmssVMList:  []string{"vmss00-vm-000000", "vmss00-vm-000001", "vmss00-vm-000002"},
 | 
			
		||||
			vmssName:    "vmss00",
 | 
			
		||||
			vmssvmName:  "vmss00-vm-000000",
 | 
			
		||||
			existedDisk: compute.Disk{Name: pointer.String("disk-name-err")},
 | 
			
		||||
			expectedErr: false,
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	for i, test := range testCases {
 | 
			
		||||
		scaleSetName := string(test.vmssName)
 | 
			
		||||
		ss, err := newTestScaleSet(ctrl)
 | 
			
		||||
		assert.NoError(t, err, test.desc)
 | 
			
		||||
		testCloud := ss.cloud
 | 
			
		||||
		testCloud.PrimaryScaleSetName = scaleSetName
 | 
			
		||||
		expectedVMSS := buildTestVMSSWithLB(scaleSetName, "vmss00-vm-", []string{testLBBackendpoolID0}, false)
 | 
			
		||||
		mockVMSSClient := testCloud.VirtualMachineScaleSetsClient.(*mockvmssclient.MockInterface)
 | 
			
		||||
		mockVMSSClient.EXPECT().List(gomock.Any(), testCloud.ResourceGroup).Return([]compute.VirtualMachineScaleSet{expectedVMSS}, nil).AnyTimes()
 | 
			
		||||
		mockVMSSClient.EXPECT().Get(gomock.Any(), testCloud.ResourceGroup, scaleSetName).Return(expectedVMSS, nil).MaxTimes(1)
 | 
			
		||||
		mockVMSSClient.EXPECT().CreateOrUpdate(gomock.Any(), testCloud.ResourceGroup, scaleSetName, gomock.Any()).Return(nil).MaxTimes(1)
 | 
			
		||||
 | 
			
		||||
		expectedVMSSVMs, _, _ := buildTestVirtualMachineEnv(testCloud, scaleSetName, "", 0, test.vmssVMList, "succeeded", false)
 | 
			
		||||
		for _, vmssvm := range expectedVMSSVMs {
 | 
			
		||||
			vmssvm.StorageProfile = &compute.StorageProfile{
 | 
			
		||||
				OsDisk: &compute.OSDisk{
 | 
			
		||||
					Name: pointer.String("osdisk1"),
 | 
			
		||||
					ManagedDisk: &compute.ManagedDiskParameters{
 | 
			
		||||
						ID: pointer.String("ManagedID"),
 | 
			
		||||
						DiskEncryptionSet: &compute.DiskEncryptionSetParameters{
 | 
			
		||||
							ID: pointer.String("DiskEncryptionSetID"),
 | 
			
		||||
						},
 | 
			
		||||
					},
 | 
			
		||||
				},
 | 
			
		||||
				DataDisks: &[]compute.DataDisk{{
 | 
			
		||||
					Lun:  pointer.Int32(0),
 | 
			
		||||
					Name: pointer.String(diskName),
 | 
			
		||||
				}},
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
		mockVMSSVMClient := testCloud.VirtualMachineScaleSetVMsClient.(*mockvmssvmclient.MockInterface)
 | 
			
		||||
		mockVMSSVMClient.EXPECT().List(gomock.Any(), testCloud.ResourceGroup, scaleSetName, gomock.Any()).Return(expectedVMSSVMs, nil).AnyTimes()
 | 
			
		||||
		if scaleSetName == string(fakeStatusNotFoundVMSSName) {
 | 
			
		||||
			mockVMSSVMClient.EXPECT().Update(gomock.Any(), testCloud.ResourceGroup, scaleSetName, gomock.Any(), gomock.Any(), gomock.Any()).Return(&retry.Error{HTTPStatusCode: http.StatusNotFound, RawError: cloudprovider.InstanceNotFound}).AnyTimes()
 | 
			
		||||
		} else {
 | 
			
		||||
			mockVMSSVMClient.EXPECT().Update(gomock.Any(), testCloud.ResourceGroup, scaleSetName, gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).AnyTimes()
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		err = ss.DetachDisk(*test.existedDisk.Name, diskName, test.vmssvmName)
 | 
			
		||||
		assert.Equal(t, test.expectedErr, err != nil, "TestCase[%d]: %s, err: %v", i, test.desc, err)
 | 
			
		||||
		assert.Equal(t, test.expectedErrMsg, err, "TestCase[%d]: %s, expected error: %v, return error: %v", i, test.desc, test.expectedErrMsg, err)
 | 
			
		||||
 | 
			
		||||
		if !test.expectedErr {
 | 
			
		||||
			dataDisks, err := ss.GetDataDisks(test.vmssvmName, azcache.CacheReadTypeDefault)
 | 
			
		||||
			assert.Equal(t, true, len(dataDisks) == 1, "TestCase[%d]: %s, actual data disk num: %d, err: %v", i, test.desc, len(dataDisks), err)
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestGetDataDisksWithVMSS(t *testing.T) {
 | 
			
		||||
	ctrl := gomock.NewController(t)
 | 
			
		||||
	defer ctrl.Finish()
 | 
			
		||||
 | 
			
		||||
	var testCases = []struct {
 | 
			
		||||
		desc              string
 | 
			
		||||
		nodeName          types.NodeName
 | 
			
		||||
		isDataDiskNull    bool
 | 
			
		||||
		expectedDataDisks []compute.DataDisk
 | 
			
		||||
		expectedErr       bool
 | 
			
		||||
		expectedErrMsg    error
 | 
			
		||||
		crt               azcache.AzureCacheReadType
 | 
			
		||||
	}{
 | 
			
		||||
		{
 | 
			
		||||
			desc:              "an error shall be returned if there's no corresponding vm",
 | 
			
		||||
			nodeName:          "vmss00-vm-000001",
 | 
			
		||||
			expectedDataDisks: nil,
 | 
			
		||||
			expectedErr:       true,
 | 
			
		||||
			expectedErrMsg:    fmt.Errorf("instance not found"),
 | 
			
		||||
			crt:               azcache.CacheReadTypeDefault,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			desc:     "correct list of data disks shall be returned if everything is good",
 | 
			
		||||
			nodeName: "vmss00-vm-000000",
 | 
			
		||||
			expectedDataDisks: []compute.DataDisk{
 | 
			
		||||
				{
 | 
			
		||||
					Lun:  pointer.Int32(0),
 | 
			
		||||
					Name: pointer.String("disk1"),
 | 
			
		||||
				},
 | 
			
		||||
			},
 | 
			
		||||
			expectedErr: false,
 | 
			
		||||
			crt:         azcache.CacheReadTypeDefault,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			desc:     "correct list of data disks shall be returned if everything is good",
 | 
			
		||||
			nodeName: "vmss00-vm-000000",
 | 
			
		||||
			expectedDataDisks: []compute.DataDisk{
 | 
			
		||||
				{
 | 
			
		||||
					Lun:  pointer.Int32(0),
 | 
			
		||||
					Name: pointer.String("disk1"),
 | 
			
		||||
				},
 | 
			
		||||
			},
 | 
			
		||||
			expectedErr: false,
 | 
			
		||||
			crt:         azcache.CacheReadTypeUnsafe,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			desc:              "nil shall be returned if DataDisk is null",
 | 
			
		||||
			nodeName:          "vmss00-vm-000000",
 | 
			
		||||
			isDataDiskNull:    true,
 | 
			
		||||
			expectedDataDisks: nil,
 | 
			
		||||
			expectedErr:       false,
 | 
			
		||||
			crt:               azcache.CacheReadTypeDefault,
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
	for i, test := range testCases {
 | 
			
		||||
		scaleSetName := string(test.nodeName)
 | 
			
		||||
		ss, err := newTestScaleSet(ctrl)
 | 
			
		||||
		assert.NoError(t, err, test.desc)
 | 
			
		||||
		testCloud := ss.cloud
 | 
			
		||||
		testCloud.PrimaryScaleSetName = scaleSetName
 | 
			
		||||
		expectedVMSS := buildTestVMSSWithLB(scaleSetName, "vmss00-vm-", []string{testLBBackendpoolID0}, false)
 | 
			
		||||
		mockVMSSClient := testCloud.VirtualMachineScaleSetsClient.(*mockvmssclient.MockInterface)
 | 
			
		||||
		mockVMSSClient.EXPECT().List(gomock.Any(), testCloud.ResourceGroup).Return([]compute.VirtualMachineScaleSet{expectedVMSS}, nil).AnyTimes()
 | 
			
		||||
		mockVMSSClient.EXPECT().Get(gomock.Any(), testCloud.ResourceGroup, scaleSetName).Return(expectedVMSS, nil).MaxTimes(1)
 | 
			
		||||
		mockVMSSClient.EXPECT().CreateOrUpdate(gomock.Any(), testCloud.ResourceGroup, scaleSetName, gomock.Any()).Return(nil).MaxTimes(1)
 | 
			
		||||
 | 
			
		||||
		expectedVMSSVMs, _, _ := buildTestVirtualMachineEnv(testCloud, scaleSetName, "", 0, []string{"vmss00-vm-000000"}, "succeeded", false)
 | 
			
		||||
		if !test.isDataDiskNull {
 | 
			
		||||
			for _, vmssvm := range expectedVMSSVMs {
 | 
			
		||||
				vmssvm.StorageProfile = &compute.StorageProfile{
 | 
			
		||||
					DataDisks: &[]compute.DataDisk{{
 | 
			
		||||
						Lun:  pointer.Int32(0),
 | 
			
		||||
						Name: pointer.String("disk1"),
 | 
			
		||||
					}},
 | 
			
		||||
				}
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
		mockVMSSVMClient := testCloud.VirtualMachineScaleSetVMsClient.(*mockvmssvmclient.MockInterface)
 | 
			
		||||
		mockVMSSVMClient.EXPECT().List(gomock.Any(), testCloud.ResourceGroup, scaleSetName, gomock.Any()).Return(expectedVMSSVMs, nil).AnyTimes()
 | 
			
		||||
		mockVMSSVMClient.EXPECT().Update(gomock.Any(), testCloud.ResourceGroup, scaleSetName, gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).AnyTimes()
 | 
			
		||||
		dataDisks, err := ss.GetDataDisks(test.nodeName, test.crt)
 | 
			
		||||
		assert.Equal(t, test.expectedDataDisks, dataDisks, "TestCase[%d]: %s", i, test.desc)
 | 
			
		||||
		assert.Equal(t, test.expectedErr, err != nil, "TestCase[%d]: %s", i, test.desc)
 | 
			
		||||
		assert.Equal(t, test.expectedErrMsg, err, "TestCase[%d]: %s, expected error: %v, return error: %v", i, test.desc, test.expectedErrMsg, err)
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
@@ -1,99 +0,0 @@
 | 
			
		||||
//go:build !providerless
 | 
			
		||||
// +build !providerless
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
Copyright 2017 The Kubernetes Authors.
 | 
			
		||||
 | 
			
		||||
Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
you may not use this file except in compliance with the License.
 | 
			
		||||
You may obtain a copy of the License at
 | 
			
		||||
 | 
			
		||||
    http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
 | 
			
		||||
Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
See the License for the specific language governing permissions and
 | 
			
		||||
limitations under the License.
 | 
			
		||||
*/
 | 
			
		||||
 | 
			
		||||
package azure
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"fmt"
 | 
			
		||||
 | 
			
		||||
	"github.com/golang/mock/gomock"
 | 
			
		||||
 | 
			
		||||
	"k8s.io/apimachinery/pkg/util/sets"
 | 
			
		||||
	"k8s.io/client-go/tools/record"
 | 
			
		||||
	"k8s.io/legacy-cloud-providers/azure/auth"
 | 
			
		||||
	"k8s.io/legacy-cloud-providers/azure/clients/diskclient/mockdiskclient"
 | 
			
		||||
	"k8s.io/legacy-cloud-providers/azure/clients/interfaceclient/mockinterfaceclient"
 | 
			
		||||
	"k8s.io/legacy-cloud-providers/azure/clients/loadbalancerclient/mockloadbalancerclient"
 | 
			
		||||
	"k8s.io/legacy-cloud-providers/azure/clients/publicipclient/mockpublicipclient"
 | 
			
		||||
	"k8s.io/legacy-cloud-providers/azure/clients/routeclient/mockrouteclient"
 | 
			
		||||
	"k8s.io/legacy-cloud-providers/azure/clients/routetableclient/mockroutetableclient"
 | 
			
		||||
	"k8s.io/legacy-cloud-providers/azure/clients/securitygroupclient/mocksecuritygroupclient"
 | 
			
		||||
	"k8s.io/legacy-cloud-providers/azure/clients/subnetclient/mocksubnetclient"
 | 
			
		||||
	"k8s.io/legacy-cloud-providers/azure/clients/vmclient/mockvmclient"
 | 
			
		||||
	"k8s.io/legacy-cloud-providers/azure/clients/vmssclient/mockvmssclient"
 | 
			
		||||
	"k8s.io/legacy-cloud-providers/azure/clients/vmssvmclient/mockvmssvmclient"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
var (
 | 
			
		||||
	errPreconditionFailedEtagMismatch = fmt.Errorf("PreconditionFailedEtagMismatch")
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// GetTestCloud returns a fake azure cloud for unit tests in Azure related CSI drivers
 | 
			
		||||
func GetTestCloud(ctrl *gomock.Controller) (az *Cloud) {
 | 
			
		||||
	az = &Cloud{
 | 
			
		||||
		Config: Config{
 | 
			
		||||
			AzureAuthConfig: auth.AzureAuthConfig{
 | 
			
		||||
				TenantID:       "tenant",
 | 
			
		||||
				SubscriptionID: "subscription",
 | 
			
		||||
			},
 | 
			
		||||
			ResourceGroup:                "rg",
 | 
			
		||||
			VnetResourceGroup:            "rg",
 | 
			
		||||
			RouteTableResourceGroup:      "rg",
 | 
			
		||||
			SecurityGroupResourceGroup:   "rg",
 | 
			
		||||
			Location:                     "westus",
 | 
			
		||||
			VnetName:                     "vnet",
 | 
			
		||||
			SubnetName:                   "subnet",
 | 
			
		||||
			SecurityGroupName:            "nsg",
 | 
			
		||||
			RouteTableName:               "rt",
 | 
			
		||||
			PrimaryAvailabilitySetName:   "as",
 | 
			
		||||
			PrimaryScaleSetName:          "vmss",
 | 
			
		||||
			MaximumLoadBalancerRuleCount: 250,
 | 
			
		||||
			VMType:                       vmTypeStandard,
 | 
			
		||||
		},
 | 
			
		||||
		nodeZones:                map[string]sets.String{},
 | 
			
		||||
		nodeInformerSynced:       func() bool { return true },
 | 
			
		||||
		nodeResourceGroups:       map[string]string{},
 | 
			
		||||
		unmanagedNodes:           sets.NewString(),
 | 
			
		||||
		excludeLoadBalancerNodes: sets.NewString(),
 | 
			
		||||
		routeCIDRs:               map[string]string{},
 | 
			
		||||
		eventRecorder:            &record.FakeRecorder{},
 | 
			
		||||
	}
 | 
			
		||||
	az.DisksClient = mockdiskclient.NewMockInterface(ctrl)
 | 
			
		||||
	az.InterfacesClient = mockinterfaceclient.NewMockInterface(ctrl)
 | 
			
		||||
	az.LoadBalancerClient = mockloadbalancerclient.NewMockInterface(ctrl)
 | 
			
		||||
	az.PublicIPAddressesClient = mockpublicipclient.NewMockInterface(ctrl)
 | 
			
		||||
	az.RoutesClient = mockrouteclient.NewMockInterface(ctrl)
 | 
			
		||||
	az.RouteTablesClient = mockroutetableclient.NewMockInterface(ctrl)
 | 
			
		||||
	az.SecurityGroupsClient = mocksecuritygroupclient.NewMockInterface(ctrl)
 | 
			
		||||
	az.SubnetsClient = mocksubnetclient.NewMockInterface(ctrl)
 | 
			
		||||
	az.VirtualMachineScaleSetsClient = mockvmssclient.NewMockInterface(ctrl)
 | 
			
		||||
	az.VirtualMachineScaleSetVMsClient = mockvmssvmclient.NewMockInterface(ctrl)
 | 
			
		||||
	az.VirtualMachinesClient = mockvmclient.NewMockInterface(ctrl)
 | 
			
		||||
	az.VMSet = newAvailabilitySet(az)
 | 
			
		||||
	az.vmCache, _ = az.newVMCache()
 | 
			
		||||
	az.lbCache, _ = az.newLBCache()
 | 
			
		||||
	az.nsgCache, _ = az.newNSGCache()
 | 
			
		||||
	az.rtCache, _ = az.newRouteTableCache()
 | 
			
		||||
 | 
			
		||||
	common := &controllerCommon{cloud: az, resourceGroup: "rg", location: "westus"}
 | 
			
		||||
	az.controllerCommon = common
 | 
			
		||||
	az.ManagedDiskController = &ManagedDiskController{common: common}
 | 
			
		||||
 | 
			
		||||
	return az
 | 
			
		||||
}
 | 
			
		||||
@@ -1,42 +0,0 @@
 | 
			
		||||
//go:build !providerless
 | 
			
		||||
// +build !providerless
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
Copyright 2017 The Kubernetes Authors.
 | 
			
		||||
 | 
			
		||||
Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
you may not use this file except in compliance with the License.
 | 
			
		||||
You may obtain a copy of the License at
 | 
			
		||||
 | 
			
		||||
    http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
 | 
			
		||||
Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
See the License for the specific language governing permissions and
 | 
			
		||||
limitations under the License.
 | 
			
		||||
*/
 | 
			
		||||
 | 
			
		||||
package azure
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-06-01/storage"
 | 
			
		||||
	"k8s.io/legacy-cloud-providers/azure/clients/fileclient"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// create file share
 | 
			
		||||
func (az *Cloud) createFileShare(resourceGroupName, accountName string, shareOptions *fileclient.ShareOptions) error {
 | 
			
		||||
	return az.FileClient.CreateFileShare(resourceGroupName, accountName, shareOptions)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (az *Cloud) deleteFileShare(resourceGroupName, accountName, name string) error {
 | 
			
		||||
	return az.FileClient.DeleteFileShare(resourceGroupName, accountName, name)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (az *Cloud) resizeFileShare(resourceGroupName, accountName, name string, sizeGiB int) error {
 | 
			
		||||
	return az.FileClient.ResizeFileShare(resourceGroupName, accountName, name, sizeGiB)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (az *Cloud) getFileShare(resourceGroupName, accountName, name string) (storage.FileShare, error) {
 | 
			
		||||
	return az.FileClient.GetFileShare(resourceGroupName, accountName, name)
 | 
			
		||||
}
 | 
			
		||||
@@ -1,270 +0,0 @@
 | 
			
		||||
//go:build !providerless
 | 
			
		||||
// +build !providerless
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
Copyright 2018 The Kubernetes Authors.
 | 
			
		||||
 | 
			
		||||
Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
you may not use this file except in compliance with the License.
 | 
			
		||||
You may obtain a copy of the License at
 | 
			
		||||
 | 
			
		||||
    http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
 | 
			
		||||
Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
See the License for the specific language governing permissions and
 | 
			
		||||
limitations under the License.
 | 
			
		||||
*/
 | 
			
		||||
 | 
			
		||||
package azure
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"encoding/json"
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"io/ioutil"
 | 
			
		||||
	"net/http"
 | 
			
		||||
	"time"
 | 
			
		||||
 | 
			
		||||
	"k8s.io/klog/v2"
 | 
			
		||||
	azcache "k8s.io/legacy-cloud-providers/azure/cache"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
const (
 | 
			
		||||
	metadataCacheTTL           = time.Minute
 | 
			
		||||
	metadataCacheKey           = "InstanceMetadata"
 | 
			
		||||
	imdsInstanceAPIVersion     = "2019-03-11"
 | 
			
		||||
	imdsLoadBalancerAPIVersion = "2020-10-01"
 | 
			
		||||
	imdsServer                 = "http://169.254.169.254"
 | 
			
		||||
	imdsInstanceURI            = "/metadata/instance"
 | 
			
		||||
	imdsLoadBalancerURI        = "/metadata/loadbalancer"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// NetworkMetadata contains metadata about an instance's network
 | 
			
		||||
type NetworkMetadata struct {
 | 
			
		||||
	Interface []NetworkInterface `json:"interface"`
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// NetworkInterface represents an instances network interface.
 | 
			
		||||
type NetworkInterface struct {
 | 
			
		||||
	IPV4 NetworkData `json:"ipv4"`
 | 
			
		||||
	IPV6 NetworkData `json:"ipv6"`
 | 
			
		||||
	MAC  string      `json:"macAddress"`
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// NetworkData contains IP information for a network.
 | 
			
		||||
type NetworkData struct {
 | 
			
		||||
	IPAddress []IPAddress `json:"ipAddress"`
 | 
			
		||||
	Subnet    []Subnet    `json:"subnet"`
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// IPAddress represents IP address information.
 | 
			
		||||
type IPAddress struct {
 | 
			
		||||
	PrivateIP string `json:"privateIpAddress"`
 | 
			
		||||
	PublicIP  string `json:"publicIpAddress"`
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Subnet represents subnet information.
 | 
			
		||||
type Subnet struct {
 | 
			
		||||
	Address string `json:"address"`
 | 
			
		||||
	Prefix  string `json:"prefix"`
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// ComputeMetadata represents compute information
 | 
			
		||||
type ComputeMetadata struct {
 | 
			
		||||
	Environment    string `json:"azEnvironment,omitempty"`
 | 
			
		||||
	SKU            string `json:"sku,omitempty"`
 | 
			
		||||
	Name           string `json:"name,omitempty"`
 | 
			
		||||
	Zone           string `json:"zone,omitempty"`
 | 
			
		||||
	VMSize         string `json:"vmSize,omitempty"`
 | 
			
		||||
	OSType         string `json:"osType,omitempty"`
 | 
			
		||||
	Location       string `json:"location,omitempty"`
 | 
			
		||||
	FaultDomain    string `json:"platformFaultDomain,omitempty"`
 | 
			
		||||
	UpdateDomain   string `json:"platformUpdateDomain,omitempty"`
 | 
			
		||||
	ResourceGroup  string `json:"resourceGroupName,omitempty"`
 | 
			
		||||
	VMScaleSetName string `json:"vmScaleSetName,omitempty"`
 | 
			
		||||
	SubscriptionID string `json:"subscriptionId,omitempty"`
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// InstanceMetadata represents instance information.
 | 
			
		||||
type InstanceMetadata struct {
 | 
			
		||||
	Compute *ComputeMetadata `json:"compute,omitempty"`
 | 
			
		||||
	Network *NetworkMetadata `json:"network,omitempty"`
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// PublicIPMetadata represents the public IP metadata.
 | 
			
		||||
type PublicIPMetadata struct {
 | 
			
		||||
	FrontendIPAddress string `json:"frontendIpAddress,omitempty"`
 | 
			
		||||
	PrivateIPAddress  string `json:"privateIpAddress,omitempty"`
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// LoadbalancerProfile represents load balancer profile in IMDS.
 | 
			
		||||
type LoadbalancerProfile struct {
 | 
			
		||||
	PublicIPAddresses []PublicIPMetadata `json:"publicIpAddresses,omitempty"`
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// LoadBalancerMetadata represents load balancer metadata.
 | 
			
		||||
type LoadBalancerMetadata struct {
 | 
			
		||||
	LoadBalancer *LoadbalancerProfile `json:"loadbalancer,omitempty"`
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// InstanceMetadataService knows how to query the Azure instance metadata server.
 | 
			
		||||
type InstanceMetadataService struct {
 | 
			
		||||
	imdsServer string
 | 
			
		||||
	imsCache   *azcache.TimedCache
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// NewInstanceMetadataService creates an instance of the InstanceMetadataService accessor object.
 | 
			
		||||
func NewInstanceMetadataService(imdsServer string) (*InstanceMetadataService, error) {
 | 
			
		||||
	ims := &InstanceMetadataService{
 | 
			
		||||
		imdsServer: imdsServer,
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	imsCache, err := azcache.NewTimedcache(metadataCacheTTL, ims.getMetadata)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return nil, err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	ims.imsCache = imsCache
 | 
			
		||||
	return ims, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (ims *InstanceMetadataService) getMetadata(key string) (interface{}, error) {
 | 
			
		||||
	instanceMetadata, err := ims.getInstanceMetadata(key)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return nil, err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if instanceMetadata.Network != nil && len(instanceMetadata.Network.Interface) > 0 {
 | 
			
		||||
		netInterface := instanceMetadata.Network.Interface[0]
 | 
			
		||||
		if (len(netInterface.IPV4.IPAddress) > 0 && len(netInterface.IPV4.IPAddress[0].PublicIP) > 0) ||
 | 
			
		||||
			(len(netInterface.IPV6.IPAddress) > 0 && len(netInterface.IPV6.IPAddress[0].PublicIP) > 0) {
 | 
			
		||||
			// Return if public IP address has already part of instance metadata.
 | 
			
		||||
			return instanceMetadata, nil
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		loadBalancerMetadata, err := ims.getLoadBalancerMetadata()
 | 
			
		||||
		if err != nil || loadBalancerMetadata == nil || loadBalancerMetadata.LoadBalancer == nil {
 | 
			
		||||
			// Log a warning since loadbalancer metadata may not be available when the VM
 | 
			
		||||
			// is not in standard LoadBalancer backend address pool.
 | 
			
		||||
			klog.V(4).Infof("Warning: failed to get loadbalancer metadata: %v", err)
 | 
			
		||||
			return instanceMetadata, nil
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		publicIPs := loadBalancerMetadata.LoadBalancer.PublicIPAddresses
 | 
			
		||||
		if len(netInterface.IPV4.IPAddress) > 0 && len(netInterface.IPV4.IPAddress[0].PrivateIP) > 0 {
 | 
			
		||||
			for _, pip := range publicIPs {
 | 
			
		||||
				if pip.PrivateIPAddress == netInterface.IPV4.IPAddress[0].PrivateIP {
 | 
			
		||||
					netInterface.IPV4.IPAddress[0].PublicIP = pip.FrontendIPAddress
 | 
			
		||||
					break
 | 
			
		||||
				}
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
		if len(netInterface.IPV6.IPAddress) > 0 && len(netInterface.IPV6.IPAddress[0].PrivateIP) > 0 {
 | 
			
		||||
			for _, pip := range publicIPs {
 | 
			
		||||
				if pip.PrivateIPAddress == netInterface.IPV6.IPAddress[0].PrivateIP {
 | 
			
		||||
					netInterface.IPV6.IPAddress[0].PublicIP = pip.FrontendIPAddress
 | 
			
		||||
					break
 | 
			
		||||
				}
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return instanceMetadata, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (ims *InstanceMetadataService) getInstanceMetadata(key string) (*InstanceMetadata, error) {
 | 
			
		||||
	req, err := http.NewRequest("GET", ims.imdsServer+imdsInstanceURI, nil)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return nil, err
 | 
			
		||||
	}
 | 
			
		||||
	req.Header.Add("Metadata", "True")
 | 
			
		||||
	req.Header.Add("User-Agent", "golang/kubernetes-cloud-provider")
 | 
			
		||||
 | 
			
		||||
	q := req.URL.Query()
 | 
			
		||||
	q.Add("format", "json")
 | 
			
		||||
	q.Add("api-version", imdsInstanceAPIVersion)
 | 
			
		||||
	req.URL.RawQuery = q.Encode()
 | 
			
		||||
 | 
			
		||||
	client := &http.Client{}
 | 
			
		||||
	resp, err := client.Do(req)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return nil, err
 | 
			
		||||
	}
 | 
			
		||||
	defer resp.Body.Close()
 | 
			
		||||
 | 
			
		||||
	if resp.StatusCode != http.StatusOK {
 | 
			
		||||
		return nil, fmt.Errorf("failure of getting instance metadata with response %q", resp.Status)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	data, err := ioutil.ReadAll(resp.Body)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return nil, err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	obj := InstanceMetadata{}
 | 
			
		||||
	err = json.Unmarshal(data, &obj)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return nil, err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return &obj, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (ims *InstanceMetadataService) getLoadBalancerMetadata() (*LoadBalancerMetadata, error) {
 | 
			
		||||
	req, err := http.NewRequest("GET", ims.imdsServer+imdsLoadBalancerURI, nil)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return nil, err
 | 
			
		||||
	}
 | 
			
		||||
	req.Header.Add("Metadata", "True")
 | 
			
		||||
	req.Header.Add("User-Agent", "golang/kubernetes-cloud-provider")
 | 
			
		||||
 | 
			
		||||
	q := req.URL.Query()
 | 
			
		||||
	q.Add("format", "json")
 | 
			
		||||
	q.Add("api-version", imdsLoadBalancerAPIVersion)
 | 
			
		||||
	req.URL.RawQuery = q.Encode()
 | 
			
		||||
 | 
			
		||||
	client := &http.Client{}
 | 
			
		||||
	resp, err := client.Do(req)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return nil, err
 | 
			
		||||
	}
 | 
			
		||||
	defer resp.Body.Close()
 | 
			
		||||
 | 
			
		||||
	if resp.StatusCode != http.StatusOK {
 | 
			
		||||
		return nil, fmt.Errorf("failure of getting loadbalancer metadata with response %q", resp.Status)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	data, err := ioutil.ReadAll(resp.Body)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return nil, err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	obj := LoadBalancerMetadata{}
 | 
			
		||||
	err = json.Unmarshal(data, &obj)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return nil, err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return &obj, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// GetMetadata gets instance metadata from cache.
 | 
			
		||||
// crt determines if we can get data from stalled cache/need fresh if cache expired.
 | 
			
		||||
func (ims *InstanceMetadataService) GetMetadata(crt azcache.AzureCacheReadType) (*InstanceMetadata, error) {
 | 
			
		||||
	cache, err := ims.imsCache.Get(metadataCacheKey, crt)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return nil, err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Cache shouldn't be nil, but added a check in case something is wrong.
 | 
			
		||||
	if cache == nil {
 | 
			
		||||
		return nil, fmt.Errorf("failure of getting instance metadata")
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if metadata, ok := cache.(*InstanceMetadata); ok {
 | 
			
		||||
		return metadata, nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return nil, fmt.Errorf("failure of getting instance metadata")
 | 
			
		||||
}
 | 
			
		||||
@@ -1,432 +0,0 @@
 | 
			
		||||
//go:build !providerless
 | 
			
		||||
// +build !providerless
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
Copyright 2016 The Kubernetes Authors.
 | 
			
		||||
 | 
			
		||||
Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
you may not use this file except in compliance with the License.
 | 
			
		||||
You may obtain a copy of the License at
 | 
			
		||||
 | 
			
		||||
    http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
 | 
			
		||||
Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
See the License for the specific language governing permissions and
 | 
			
		||||
limitations under the License.
 | 
			
		||||
*/
 | 
			
		||||
 | 
			
		||||
package azure
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"context"
 | 
			
		||||
	"errors"
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"os"
 | 
			
		||||
	"strings"
 | 
			
		||||
 | 
			
		||||
	v1 "k8s.io/api/core/v1"
 | 
			
		||||
	"k8s.io/apimachinery/pkg/types"
 | 
			
		||||
	cloudprovider "k8s.io/cloud-provider"
 | 
			
		||||
	"k8s.io/klog/v2"
 | 
			
		||||
	azcache "k8s.io/legacy-cloud-providers/azure/cache"
 | 
			
		||||
 | 
			
		||||
	"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-12-01/compute"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
const (
 | 
			
		||||
	vmPowerStatePrefix       = "PowerState/"
 | 
			
		||||
	vmPowerStateStopped      = "stopped"
 | 
			
		||||
	vmPowerStateDeallocated  = "deallocated"
 | 
			
		||||
	vmPowerStateDeallocating = "deallocating"
 | 
			
		||||
 | 
			
		||||
	// nodeNameEnvironmentName is the environment variable name for getting node name.
 | 
			
		||||
	// It is only used for out-of-tree cloud provider.
 | 
			
		||||
	nodeNameEnvironmentName = "NODE_NAME"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
var (
 | 
			
		||||
	errNodeNotInitialized = fmt.Errorf("providerID is empty, the node is not initialized yet")
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
func (az *Cloud) addressGetter(nodeName types.NodeName) ([]v1.NodeAddress, error) {
 | 
			
		||||
	ip, publicIP, err := az.getIPForMachine(nodeName)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		klog.V(2).Infof("NodeAddresses(%s) abort backoff: %v", nodeName, err)
 | 
			
		||||
		return nil, err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	addresses := []v1.NodeAddress{
 | 
			
		||||
		{Type: v1.NodeInternalIP, Address: ip},
 | 
			
		||||
		{Type: v1.NodeHostName, Address: string(nodeName)},
 | 
			
		||||
	}
 | 
			
		||||
	if len(publicIP) > 0 {
 | 
			
		||||
		addresses = append(addresses, v1.NodeAddress{
 | 
			
		||||
			Type:    v1.NodeExternalIP,
 | 
			
		||||
			Address: publicIP,
 | 
			
		||||
		})
 | 
			
		||||
	}
 | 
			
		||||
	return addresses, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// NodeAddresses returns the addresses of the specified instance.
 | 
			
		||||
func (az *Cloud) NodeAddresses(ctx context.Context, name types.NodeName) ([]v1.NodeAddress, error) {
 | 
			
		||||
	// Returns nil for unmanaged nodes because azure cloud provider couldn't fetch information for them.
 | 
			
		||||
	unmanaged, err := az.IsNodeUnmanaged(string(name))
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return nil, err
 | 
			
		||||
	}
 | 
			
		||||
	if unmanaged {
 | 
			
		||||
		klog.V(4).Infof("NodeAddresses: omitting unmanaged node %q", name)
 | 
			
		||||
		return nil, nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if az.UseInstanceMetadata {
 | 
			
		||||
		metadata, err := az.metadata.GetMetadata(azcache.CacheReadTypeDefault)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			return nil, err
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		if metadata.Compute == nil || metadata.Network == nil {
 | 
			
		||||
			return nil, fmt.Errorf("failure of getting instance metadata")
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		isLocalInstance, err := az.isCurrentInstance(name, metadata.Compute.Name)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			return nil, err
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		// Not local instance, get addresses from Azure ARM API.
 | 
			
		||||
		if !isLocalInstance {
 | 
			
		||||
			if az.VMSet != nil {
 | 
			
		||||
				return az.addressGetter(name)
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			// vmSet == nil indicates credentials are not provided.
 | 
			
		||||
			return nil, fmt.Errorf("no credentials provided for Azure cloud provider")
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		return az.getLocalInstanceNodeAddresses(metadata.Network.Interface, string(name))
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return az.addressGetter(name)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (az *Cloud) getLocalInstanceNodeAddresses(netInterfaces []NetworkInterface, nodeName string) ([]v1.NodeAddress, error) {
 | 
			
		||||
	if len(netInterfaces) == 0 {
 | 
			
		||||
		return nil, fmt.Errorf("no interface is found for the instance")
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Use ip address got from instance metadata.
 | 
			
		||||
	netInterface := netInterfaces[0]
 | 
			
		||||
	addresses := []v1.NodeAddress{
 | 
			
		||||
		{Type: v1.NodeHostName, Address: nodeName},
 | 
			
		||||
	}
 | 
			
		||||
	if len(netInterface.IPV4.IPAddress) > 0 && len(netInterface.IPV4.IPAddress[0].PrivateIP) > 0 {
 | 
			
		||||
		address := netInterface.IPV4.IPAddress[0]
 | 
			
		||||
		addresses = append(addresses, v1.NodeAddress{
 | 
			
		||||
			Type:    v1.NodeInternalIP,
 | 
			
		||||
			Address: address.PrivateIP,
 | 
			
		||||
		})
 | 
			
		||||
		if len(address.PublicIP) > 0 {
 | 
			
		||||
			addresses = append(addresses, v1.NodeAddress{
 | 
			
		||||
				Type:    v1.NodeExternalIP,
 | 
			
		||||
				Address: address.PublicIP,
 | 
			
		||||
			})
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	if len(netInterface.IPV6.IPAddress) > 0 && len(netInterface.IPV6.IPAddress[0].PrivateIP) > 0 {
 | 
			
		||||
		address := netInterface.IPV6.IPAddress[0]
 | 
			
		||||
		addresses = append(addresses, v1.NodeAddress{
 | 
			
		||||
			Type:    v1.NodeInternalIP,
 | 
			
		||||
			Address: address.PrivateIP,
 | 
			
		||||
		})
 | 
			
		||||
		if len(address.PublicIP) > 0 {
 | 
			
		||||
			addresses = append(addresses, v1.NodeAddress{
 | 
			
		||||
				Type:    v1.NodeExternalIP,
 | 
			
		||||
				Address: address.PublicIP,
 | 
			
		||||
			})
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if len(addresses) == 1 {
 | 
			
		||||
		// No IP addresses is got from instance metadata service, clean up cache and report errors.
 | 
			
		||||
		az.metadata.imsCache.Delete(metadataCacheKey)
 | 
			
		||||
		return nil, fmt.Errorf("get empty IP addresses from instance metadata service")
 | 
			
		||||
	}
 | 
			
		||||
	return addresses, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// NodeAddressesByProviderID returns the node addresses of an instances with the specified unique providerID
 | 
			
		||||
// This method will not be called from the node that is requesting this ID. i.e. metadata service
 | 
			
		||||
// and other local methods cannot be used here
 | 
			
		||||
func (az *Cloud) NodeAddressesByProviderID(ctx context.Context, providerID string) ([]v1.NodeAddress, error) {
 | 
			
		||||
	if providerID == "" {
 | 
			
		||||
		return nil, errNodeNotInitialized
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Returns nil for unmanaged nodes because azure cloud provider couldn't fetch information for them.
 | 
			
		||||
	if az.IsNodeUnmanagedByProviderID(providerID) {
 | 
			
		||||
		klog.V(4).Infof("NodeAddressesByProviderID: omitting unmanaged node %q", providerID)
 | 
			
		||||
		return nil, nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	name, err := az.VMSet.GetNodeNameByProviderID(providerID)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return nil, err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return az.NodeAddresses(ctx, name)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// InstanceExistsByProviderID returns true if the instance with the given provider id still exists and is running.
 | 
			
		||||
// If false is returned with no error, the instance will be immediately deleted by the cloud controller manager.
 | 
			
		||||
func (az *Cloud) InstanceExistsByProviderID(ctx context.Context, providerID string) (bool, error) {
 | 
			
		||||
	if providerID == "" {
 | 
			
		||||
		return false, errNodeNotInitialized
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Returns true for unmanaged nodes because azure cloud provider always assumes them exists.
 | 
			
		||||
	if az.IsNodeUnmanagedByProviderID(providerID) {
 | 
			
		||||
		klog.V(4).Infof("InstanceExistsByProviderID: assuming unmanaged node %q exists", providerID)
 | 
			
		||||
		return true, nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	name, err := az.VMSet.GetNodeNameByProviderID(providerID)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		if err == cloudprovider.InstanceNotFound {
 | 
			
		||||
			return false, nil
 | 
			
		||||
		}
 | 
			
		||||
		return false, err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	_, err = az.InstanceID(ctx, name)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		if err == cloudprovider.InstanceNotFound {
 | 
			
		||||
			return false, nil
 | 
			
		||||
		}
 | 
			
		||||
		return false, err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return true, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// InstanceShutdownByProviderID returns true if the instance is in safe state to detach volumes
 | 
			
		||||
func (az *Cloud) InstanceShutdownByProviderID(ctx context.Context, providerID string) (bool, error) {
 | 
			
		||||
	if providerID == "" {
 | 
			
		||||
		return false, nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	nodeName, err := az.VMSet.GetNodeNameByProviderID(providerID)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		// Returns false, so the controller manager will continue to check InstanceExistsByProviderID().
 | 
			
		||||
		if err == cloudprovider.InstanceNotFound {
 | 
			
		||||
			return false, nil
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		return false, err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	powerStatus, err := az.VMSet.GetPowerStatusByNodeName(string(nodeName))
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		// Returns false, so the controller manager will continue to check InstanceExistsByProviderID().
 | 
			
		||||
		if err == cloudprovider.InstanceNotFound {
 | 
			
		||||
			return false, nil
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		return false, err
 | 
			
		||||
	}
 | 
			
		||||
	klog.V(3).Infof("InstanceShutdownByProviderID gets power status %q for node %q", powerStatus, nodeName)
 | 
			
		||||
 | 
			
		||||
	provisioningState, err := az.VMSet.GetProvisioningStateByNodeName(string(nodeName))
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		// Returns false, so the controller manager will continue to check InstanceExistsByProviderID().
 | 
			
		||||
		if errors.Is(err, cloudprovider.InstanceNotFound) {
 | 
			
		||||
			return false, nil
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		return false, err
 | 
			
		||||
	}
 | 
			
		||||
	klog.V(3).Infof("InstanceShutdownByProviderID gets provisioning state %q for node %q", provisioningState, nodeName)
 | 
			
		||||
 | 
			
		||||
	status := strings.ToLower(powerStatus)
 | 
			
		||||
	provisioningSucceeded := strings.EqualFold(strings.ToLower(provisioningState), strings.ToLower(string(compute.ProvisioningStateSucceeded)))
 | 
			
		||||
	return provisioningSucceeded && (status == vmPowerStateStopped || status == vmPowerStateDeallocated || status == vmPowerStateDeallocating), nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (az *Cloud) isCurrentInstance(name types.NodeName, metadataVMName string) (bool, error) {
 | 
			
		||||
	var err error
 | 
			
		||||
	nodeName := mapNodeNameToVMName(name)
 | 
			
		||||
 | 
			
		||||
	// VMSS vmName is not same with hostname, use hostname instead.
 | 
			
		||||
	if az.VMType == vmTypeVMSS {
 | 
			
		||||
		metadataVMName, err = os.Hostname()
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			return false, err
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		// Use name from env variable "NODE_NAME" if it is set.
 | 
			
		||||
		nodeNameEnv := os.Getenv(nodeNameEnvironmentName)
 | 
			
		||||
		if nodeNameEnv != "" {
 | 
			
		||||
			metadataVMName = nodeNameEnv
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	metadataVMName = strings.ToLower(metadataVMName)
 | 
			
		||||
	return metadataVMName == nodeName, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// InstanceID returns the cloud provider ID of the specified instance.
 | 
			
		||||
// Note that if the instance does not exist or is no longer running, we must return ("", cloudprovider.InstanceNotFound)
 | 
			
		||||
func (az *Cloud) InstanceID(ctx context.Context, name types.NodeName) (string, error) {
 | 
			
		||||
	nodeName := mapNodeNameToVMName(name)
 | 
			
		||||
	unmanaged, err := az.IsNodeUnmanaged(nodeName)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return "", err
 | 
			
		||||
	}
 | 
			
		||||
	if unmanaged {
 | 
			
		||||
		// InstanceID is same with nodeName for unmanaged nodes.
 | 
			
		||||
		klog.V(4).Infof("InstanceID: getting ID %q for unmanaged node %q", name, name)
 | 
			
		||||
		return nodeName, nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if az.UseInstanceMetadata {
 | 
			
		||||
		metadata, err := az.metadata.GetMetadata(azcache.CacheReadTypeDefault)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			return "", err
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		if metadata.Compute == nil {
 | 
			
		||||
			return "", fmt.Errorf("failure of getting instance metadata")
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		isLocalInstance, err := az.isCurrentInstance(name, metadata.Compute.Name)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			return "", err
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		// Not local instance, get instanceID from Azure ARM API.
 | 
			
		||||
		if !isLocalInstance {
 | 
			
		||||
			if az.VMSet != nil {
 | 
			
		||||
				return az.VMSet.GetInstanceIDByNodeName(nodeName)
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			// vmSet == nil indicates credentials are not provided.
 | 
			
		||||
			return "", fmt.Errorf("no credentials provided for Azure cloud provider")
 | 
			
		||||
		}
 | 
			
		||||
		return az.getLocalInstanceProviderID(metadata, nodeName)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return az.VMSet.GetInstanceIDByNodeName(nodeName)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (az *Cloud) getLocalInstanceProviderID(metadata *InstanceMetadata, nodeName string) (string, error) {
 | 
			
		||||
	// Get resource group name and subscription ID.
 | 
			
		||||
	resourceGroup := strings.ToLower(metadata.Compute.ResourceGroup)
 | 
			
		||||
	subscriptionID := strings.ToLower(metadata.Compute.SubscriptionID)
 | 
			
		||||
 | 
			
		||||
	// Compose instanceID based on nodeName for standard instance.
 | 
			
		||||
	if metadata.Compute.VMScaleSetName == "" {
 | 
			
		||||
		return az.getStandardMachineID(subscriptionID, resourceGroup, nodeName), nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Get scale set name and instanceID from vmName for vmss.
 | 
			
		||||
	ssName, instanceID, err := extractVmssVMName(metadata.Compute.Name)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		if err == ErrorNotVmssInstance {
 | 
			
		||||
			// Compose machineID for standard Node.
 | 
			
		||||
			return az.getStandardMachineID(subscriptionID, resourceGroup, nodeName), nil
 | 
			
		||||
		}
 | 
			
		||||
		return "", err
 | 
			
		||||
	}
 | 
			
		||||
	// Compose instanceID based on ssName and instanceID for vmss instance.
 | 
			
		||||
	return az.getVmssMachineID(subscriptionID, resourceGroup, ssName, instanceID), nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// InstanceTypeByProviderID returns the cloudprovider instance type of the node with the specified unique providerID
 | 
			
		||||
// This method will not be called from the node that is requesting this ID. i.e. metadata service
 | 
			
		||||
// and other local methods cannot be used here
 | 
			
		||||
func (az *Cloud) InstanceTypeByProviderID(ctx context.Context, providerID string) (string, error) {
 | 
			
		||||
	if providerID == "" {
 | 
			
		||||
		return "", errNodeNotInitialized
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Returns "" for unmanaged nodes because azure cloud provider couldn't fetch information for them.
 | 
			
		||||
	if az.IsNodeUnmanagedByProviderID(providerID) {
 | 
			
		||||
		klog.V(4).Infof("InstanceTypeByProviderID: omitting unmanaged node %q", providerID)
 | 
			
		||||
		return "", nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	name, err := az.VMSet.GetNodeNameByProviderID(providerID)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return "", err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return az.InstanceType(ctx, name)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// InstanceType returns the type of the specified instance.
 | 
			
		||||
// Note that if the instance does not exist or is no longer running, we must return ("", cloudprovider.InstanceNotFound)
 | 
			
		||||
// (Implementer Note): This is used by kubelet. Kubelet will label the node. Real log from kubelet:
 | 
			
		||||
//
 | 
			
		||||
//	Adding node label from cloud provider: beta.kubernetes.io/instance-type=[value]
 | 
			
		||||
func (az *Cloud) InstanceType(ctx context.Context, name types.NodeName) (string, error) {
 | 
			
		||||
	// Returns "" for unmanaged nodes because azure cloud provider couldn't fetch information for them.
 | 
			
		||||
	unmanaged, err := az.IsNodeUnmanaged(string(name))
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return "", err
 | 
			
		||||
	}
 | 
			
		||||
	if unmanaged {
 | 
			
		||||
		klog.V(4).Infof("InstanceType: omitting unmanaged node %q", name)
 | 
			
		||||
		return "", nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if az.UseInstanceMetadata {
 | 
			
		||||
		metadata, err := az.metadata.GetMetadata(azcache.CacheReadTypeDefault)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			return "", err
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		if metadata.Compute == nil {
 | 
			
		||||
			return "", fmt.Errorf("failure of getting instance metadata")
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		isLocalInstance, err := az.isCurrentInstance(name, metadata.Compute.Name)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			return "", err
 | 
			
		||||
		}
 | 
			
		||||
		if !isLocalInstance {
 | 
			
		||||
			if az.VMSet != nil {
 | 
			
		||||
				return az.VMSet.GetInstanceTypeByNodeName(string(name))
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			// vmSet == nil indicates credentials are not provided.
 | 
			
		||||
			return "", fmt.Errorf("no credentials provided for Azure cloud provider")
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		if metadata.Compute.VMSize != "" {
 | 
			
		||||
			return metadata.Compute.VMSize, nil
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return az.VMSet.GetInstanceTypeByNodeName(string(name))
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// AddSSHKeyToAllInstances adds an SSH public key as a legal identity for all instances
 | 
			
		||||
// expected format for the key is standard ssh-keygen format: <protocol> <blob>
 | 
			
		||||
func (az *Cloud) AddSSHKeyToAllInstances(ctx context.Context, user string, keyData []byte) error {
 | 
			
		||||
	return cloudprovider.NotImplemented
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// CurrentNodeName returns the name of the node we are currently running on.
 | 
			
		||||
// On Azure this is the hostname, so we just return the hostname.
 | 
			
		||||
func (az *Cloud) CurrentNodeName(ctx context.Context, hostname string) (types.NodeName, error) {
 | 
			
		||||
	return types.NodeName(hostname), nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// mapNodeNameToVMName maps a k8s NodeName to an Azure VM Name
 | 
			
		||||
// This is a simple string cast.
 | 
			
		||||
func mapNodeNameToVMName(nodeName types.NodeName) string {
 | 
			
		||||
	return string(nodeName)
 | 
			
		||||
}
 | 
			
		||||
@@ -1,851 +0,0 @@
 | 
			
		||||
//go:build !providerless
 | 
			
		||||
// +build !providerless
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
Copyright 2018 The Kubernetes Authors.
 | 
			
		||||
 | 
			
		||||
Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
you may not use this file except in compliance with the License.
 | 
			
		||||
You may obtain a copy of the License at
 | 
			
		||||
 | 
			
		||||
    http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
 | 
			
		||||
Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
See the License for the specific language governing permissions and
 | 
			
		||||
limitations under the License.
 | 
			
		||||
*/
 | 
			
		||||
 | 
			
		||||
package azure
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"context"
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"net"
 | 
			
		||||
	"net/http"
 | 
			
		||||
	"strings"
 | 
			
		||||
	"testing"
 | 
			
		||||
 | 
			
		||||
	"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-12-01/compute"
 | 
			
		||||
	"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network"
 | 
			
		||||
	"github.com/golang/mock/gomock"
 | 
			
		||||
	"github.com/stretchr/testify/assert"
 | 
			
		||||
 | 
			
		||||
	v1 "k8s.io/api/core/v1"
 | 
			
		||||
	"k8s.io/apimachinery/pkg/types"
 | 
			
		||||
	"k8s.io/apimachinery/pkg/util/wait"
 | 
			
		||||
	cloudprovider "k8s.io/cloud-provider"
 | 
			
		||||
	azcache "k8s.io/legacy-cloud-providers/azure/cache"
 | 
			
		||||
	"k8s.io/legacy-cloud-providers/azure/clients/interfaceclient/mockinterfaceclient"
 | 
			
		||||
	"k8s.io/legacy-cloud-providers/azure/clients/publicipclient/mockpublicipclient"
 | 
			
		||||
	"k8s.io/legacy-cloud-providers/azure/clients/vmclient/mockvmclient"
 | 
			
		||||
	"k8s.io/legacy-cloud-providers/azure/clients/vmssclient/mockvmssclient"
 | 
			
		||||
	"k8s.io/legacy-cloud-providers/azure/clients/vmssvmclient/mockvmssvmclient"
 | 
			
		||||
	"k8s.io/legacy-cloud-providers/azure/retry"
 | 
			
		||||
	"k8s.io/utils/pointer"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// setTestVirtualMachines sets test virtual machine with powerstate.
 | 
			
		||||
func setTestVirtualMachines(c *Cloud, vmList map[string]string, isDataDisksFull bool) []compute.VirtualMachine {
 | 
			
		||||
	expectedVMs := make([]compute.VirtualMachine, 0)
 | 
			
		||||
 | 
			
		||||
	for nodeName, powerState := range vmList {
 | 
			
		||||
		instanceID := fmt.Sprintf("/subscriptions/subscription/resourceGroups/rg/providers/Microsoft.Compute/virtualMachines/%s", nodeName)
 | 
			
		||||
		vm := compute.VirtualMachine{
 | 
			
		||||
			Name:     &nodeName,
 | 
			
		||||
			ID:       &instanceID,
 | 
			
		||||
			Location: &c.Location,
 | 
			
		||||
		}
 | 
			
		||||
		status := []compute.InstanceViewStatus{
 | 
			
		||||
			{
 | 
			
		||||
				Code: pointer.String(powerState),
 | 
			
		||||
			},
 | 
			
		||||
			{
 | 
			
		||||
				Code: pointer.String("ProvisioningState/succeeded"),
 | 
			
		||||
			},
 | 
			
		||||
		}
 | 
			
		||||
		vm.VirtualMachineProperties = &compute.VirtualMachineProperties{
 | 
			
		||||
			ProvisioningState: pointer.String(string(compute.ProvisioningStateSucceeded)),
 | 
			
		||||
			HardwareProfile: &compute.HardwareProfile{
 | 
			
		||||
				VMSize: compute.VirtualMachineSizeTypesStandardA0,
 | 
			
		||||
			},
 | 
			
		||||
			InstanceView: &compute.VirtualMachineInstanceView{
 | 
			
		||||
				Statuses: &status,
 | 
			
		||||
			},
 | 
			
		||||
			StorageProfile: &compute.StorageProfile{
 | 
			
		||||
				DataDisks: &[]compute.DataDisk{},
 | 
			
		||||
			},
 | 
			
		||||
		}
 | 
			
		||||
		if !isDataDisksFull {
 | 
			
		||||
			vm.StorageProfile.DataDisks = &[]compute.DataDisk{{
 | 
			
		||||
				Lun:  pointer.Int32(0),
 | 
			
		||||
				Name: pointer.String("disk1"),
 | 
			
		||||
			}}
 | 
			
		||||
		} else {
 | 
			
		||||
			dataDisks := make([]compute.DataDisk, maxLUN)
 | 
			
		||||
			for i := 0; i < maxLUN; i++ {
 | 
			
		||||
				dataDisks[i] = compute.DataDisk{Lun: pointer.Int32(int32(i))}
 | 
			
		||||
			}
 | 
			
		||||
			vm.StorageProfile.DataDisks = &dataDisks
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		expectedVMs = append(expectedVMs, vm)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return expectedVMs
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestInstanceID(t *testing.T) {
 | 
			
		||||
	ctrl := gomock.NewController(t)
 | 
			
		||||
	defer ctrl.Finish()
 | 
			
		||||
 | 
			
		||||
	cloud := GetTestCloud(ctrl)
 | 
			
		||||
 | 
			
		||||
	testcases := []struct {
 | 
			
		||||
		name                string
 | 
			
		||||
		vmList              []string
 | 
			
		||||
		nodeName            string
 | 
			
		||||
		vmssName            string
 | 
			
		||||
		metadataName        string
 | 
			
		||||
		metadataTemplate    string
 | 
			
		||||
		vmType              string
 | 
			
		||||
		expectedID          string
 | 
			
		||||
		useInstanceMetadata bool
 | 
			
		||||
		useCustomImsCache   bool
 | 
			
		||||
		nilVMSet            bool
 | 
			
		||||
		expectedErrMsg      error
 | 
			
		||||
	}{
 | 
			
		||||
		{
 | 
			
		||||
			name:                "InstanceID should get instanceID if node's name are equal to metadataName",
 | 
			
		||||
			vmList:              []string{"vm1"},
 | 
			
		||||
			nodeName:            "vm1",
 | 
			
		||||
			metadataName:        "vm1",
 | 
			
		||||
			vmType:              vmTypeStandard,
 | 
			
		||||
			useInstanceMetadata: true,
 | 
			
		||||
			expectedID:          "/subscriptions/subscription/resourceGroups/rg/providers/Microsoft.Compute/virtualMachines/vm1",
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			name:                "InstanceID should get vmss instanceID from local if node's name are equal to metadataName and metadata.Compute.VMScaleSetName is not null",
 | 
			
		||||
			vmList:              []string{"vmss1_0"},
 | 
			
		||||
			vmssName:            "vmss1",
 | 
			
		||||
			nodeName:            "vmss1_0",
 | 
			
		||||
			metadataName:        "vmss1_0",
 | 
			
		||||
			vmType:              vmTypeStandard,
 | 
			
		||||
			useInstanceMetadata: true,
 | 
			
		||||
			expectedID:          "/subscriptions/subscription/resourceGroups/rg/providers/Microsoft.Compute/virtualMachineScaleSets/vmss1/virtualMachines/0",
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			name:                "InstanceID should get standard instanceID from local if node's name are equal to metadataName and format of nodeName is not compliance with vmss instance",
 | 
			
		||||
			vmList:              []string{"vmss1-0"},
 | 
			
		||||
			vmssName:            "vmss1",
 | 
			
		||||
			nodeName:            "vmss1-0",
 | 
			
		||||
			metadataName:        "vmss1-0",
 | 
			
		||||
			vmType:              vmTypeStandard,
 | 
			
		||||
			useInstanceMetadata: true,
 | 
			
		||||
			expectedID:          "/subscriptions/subscription/resourceGroups/rg/providers/Microsoft.Compute/virtualMachines/vmss1-0",
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			name:                "InstanceID should get instanceID from Azure API if node is not local instance",
 | 
			
		||||
			vmList:              []string{"vm2"},
 | 
			
		||||
			nodeName:            "vm2",
 | 
			
		||||
			metadataName:        "vm1",
 | 
			
		||||
			vmType:              vmTypeStandard,
 | 
			
		||||
			useInstanceMetadata: true,
 | 
			
		||||
			expectedID:          "/subscriptions/subscription/resourceGroups/rg/providers/Microsoft.Compute/virtualMachines/vm2",
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			name:         "InstanceID should get instanceID from Azure API if cloud.UseInstanceMetadata is false",
 | 
			
		||||
			vmList:       []string{"vm2"},
 | 
			
		||||
			nodeName:     "vm2",
 | 
			
		||||
			metadataName: "vm2",
 | 
			
		||||
			vmType:       vmTypeStandard,
 | 
			
		||||
			expectedID:   "/subscriptions/subscription/resourceGroups/rg/providers/Microsoft.Compute/virtualMachines/vm2",
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			name:                "InstanceID should report error if node doesn't exist",
 | 
			
		||||
			vmList:              []string{"vm1"},
 | 
			
		||||
			nodeName:            "vm3",
 | 
			
		||||
			vmType:              vmTypeStandard,
 | 
			
		||||
			useInstanceMetadata: true,
 | 
			
		||||
			expectedErrMsg:      fmt.Errorf("instance not found"),
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			name:                "InstanceID should report error if metadata.Compute is nil",
 | 
			
		||||
			nodeName:            "vm1",
 | 
			
		||||
			metadataName:        "vm1",
 | 
			
		||||
			vmType:              vmTypeStandard,
 | 
			
		||||
			metadataTemplate:    `{"network":{"interface":[]}}`,
 | 
			
		||||
			useInstanceMetadata: true,
 | 
			
		||||
			expectedErrMsg:      fmt.Errorf("failure of getting instance metadata"),
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			name:                "NodeAddresses should report error if cloud.VMSet is nil",
 | 
			
		||||
			nodeName:            "vm1",
 | 
			
		||||
			vmType:              vmTypeStandard,
 | 
			
		||||
			useInstanceMetadata: true,
 | 
			
		||||
			nilVMSet:            true,
 | 
			
		||||
			expectedErrMsg:      fmt.Errorf("no credentials provided for Azure cloud provider"),
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			name:                "NodeAddresses should report error if invoking GetMetadata returns error",
 | 
			
		||||
			nodeName:            "vm1",
 | 
			
		||||
			metadataName:        "vm1",
 | 
			
		||||
			vmType:              vmTypeStandard,
 | 
			
		||||
			useCustomImsCache:   true,
 | 
			
		||||
			useInstanceMetadata: true,
 | 
			
		||||
			expectedErrMsg:      fmt.Errorf("getError"),
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	for _, test := range testcases {
 | 
			
		||||
		if test.nilVMSet {
 | 
			
		||||
			cloud.VMSet = nil
 | 
			
		||||
		} else {
 | 
			
		||||
			cloud.VMSet = newAvailabilitySet(cloud)
 | 
			
		||||
		}
 | 
			
		||||
		cloud.Config.VMType = test.vmType
 | 
			
		||||
		cloud.Config.UseInstanceMetadata = test.useInstanceMetadata
 | 
			
		||||
		listener, err := net.Listen("tcp", "127.0.0.1:0")
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			t.Errorf("Test [%s] unexpected error: %v", test.name, err)
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		mux := http.NewServeMux()
 | 
			
		||||
		mux.Handle("/", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
 | 
			
		||||
			if test.metadataTemplate != "" {
 | 
			
		||||
				fmt.Fprintf(w, test.metadataTemplate)
 | 
			
		||||
			} else {
 | 
			
		||||
				fmt.Fprintf(w, "{\"compute\":{\"name\":\"%s\",\"VMScaleSetName\":\"%s\",\"subscriptionId\":\"subscription\",\"resourceGroupName\":\"rg\"}}", test.metadataName, test.vmssName)
 | 
			
		||||
			}
 | 
			
		||||
		}))
 | 
			
		||||
		go func() {
 | 
			
		||||
			http.Serve(listener, mux)
 | 
			
		||||
		}()
 | 
			
		||||
		defer listener.Close()
 | 
			
		||||
 | 
			
		||||
		cloud.metadata, err = NewInstanceMetadataService("http://" + listener.Addr().String() + "/")
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			t.Errorf("Test [%s] unexpected error: %v", test.name, err)
 | 
			
		||||
		}
 | 
			
		||||
		if test.useCustomImsCache {
 | 
			
		||||
			cloud.metadata.imsCache, err = azcache.NewTimedcache(metadataCacheTTL, func(key string) (interface{}, error) {
 | 
			
		||||
				return nil, fmt.Errorf("getError")
 | 
			
		||||
			})
 | 
			
		||||
			if err != nil {
 | 
			
		||||
				t.Errorf("Test [%s] unexpected error: %v", test.name, err)
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
		vmListWithPowerState := make(map[string]string)
 | 
			
		||||
		for _, vm := range test.vmList {
 | 
			
		||||
			vmListWithPowerState[vm] = ""
 | 
			
		||||
		}
 | 
			
		||||
		expectedVMs := setTestVirtualMachines(cloud, vmListWithPowerState, false)
 | 
			
		||||
		mockVMsClient := cloud.VirtualMachinesClient.(*mockvmclient.MockInterface)
 | 
			
		||||
		for _, vm := range expectedVMs {
 | 
			
		||||
			mockVMsClient.EXPECT().Get(gomock.Any(), cloud.ResourceGroup, *vm.Name, gomock.Any()).Return(vm, nil).AnyTimes()
 | 
			
		||||
		}
 | 
			
		||||
		mockVMsClient.EXPECT().Get(gomock.Any(), cloud.ResourceGroup, "vm3", gomock.Any()).Return(compute.VirtualMachine{}, &retry.Error{HTTPStatusCode: http.StatusNotFound, RawError: cloudprovider.InstanceNotFound}).AnyTimes()
 | 
			
		||||
		mockVMsClient.EXPECT().Update(gomock.Any(), cloud.ResourceGroup, gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).AnyTimes()
 | 
			
		||||
 | 
			
		||||
		instanceID, err := cloud.InstanceID(context.Background(), types.NodeName(test.nodeName))
 | 
			
		||||
		assert.Equal(t, test.expectedErrMsg, err, test.name)
 | 
			
		||||
		assert.Equal(t, test.expectedID, instanceID, test.name)
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestInstanceShutdownByProviderID(t *testing.T) {
 | 
			
		||||
	testcases := []struct {
 | 
			
		||||
		name              string
 | 
			
		||||
		vmList            map[string]string
 | 
			
		||||
		nodeName          string
 | 
			
		||||
		providerID        string
 | 
			
		||||
		provisioningState string
 | 
			
		||||
		expected          bool
 | 
			
		||||
		expectedErrMsg    error
 | 
			
		||||
	}{
 | 
			
		||||
		{
 | 
			
		||||
			name:       "InstanceShutdownByProviderID should return false if the vm is in PowerState/Running status",
 | 
			
		||||
			vmList:     map[string]string{"vm1": "PowerState/Running"},
 | 
			
		||||
			nodeName:   "vm1",
 | 
			
		||||
			providerID: "azure:///subscriptions/subscription/resourceGroups/rg/providers/Microsoft.Compute/virtualMachines/vm1",
 | 
			
		||||
			expected:   false,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			name:       "InstanceShutdownByProviderID should return true if the vm is in PowerState/Deallocated status",
 | 
			
		||||
			vmList:     map[string]string{"vm2": "PowerState/Deallocated"},
 | 
			
		||||
			nodeName:   "vm2",
 | 
			
		||||
			providerID: "azure:///subscriptions/subscription/resourceGroups/rg/providers/Microsoft.Compute/virtualMachines/vm2",
 | 
			
		||||
			expected:   true,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			name:       "InstanceShutdownByProviderID should return false if the vm is in PowerState/Deallocating status",
 | 
			
		||||
			vmList:     map[string]string{"vm3": "PowerState/Deallocating"},
 | 
			
		||||
			nodeName:   "vm3",
 | 
			
		||||
			providerID: "azure:///subscriptions/subscription/resourceGroups/rg/providers/Microsoft.Compute/virtualMachines/vm3",
 | 
			
		||||
			expected:   true,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			name:       "InstanceShutdownByProviderID should return false if the vm is in PowerState/Starting status",
 | 
			
		||||
			vmList:     map[string]string{"vm4": "PowerState/Starting"},
 | 
			
		||||
			nodeName:   "vm4",
 | 
			
		||||
			providerID: "azure:///subscriptions/subscription/resourceGroups/rg/providers/Microsoft.Compute/virtualMachines/vm4",
 | 
			
		||||
			expected:   false,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			name:       "InstanceShutdownByProviderID should return true if the vm is in PowerState/Stopped status",
 | 
			
		||||
			vmList:     map[string]string{"vm5": "PowerState/Stopped"},
 | 
			
		||||
			nodeName:   "vm5",
 | 
			
		||||
			providerID: "azure:///subscriptions/subscription/resourceGroups/rg/providers/Microsoft.Compute/virtualMachines/vm5",
 | 
			
		||||
			expected:   true,
 | 
			
		||||
		},
 | 
			
		||||
 | 
			
		||||
		{
 | 
			
		||||
			name:       "InstanceShutdownByProviderID should return false if the vm is in PowerState/Stopping status",
 | 
			
		||||
			vmList:     map[string]string{"vm6": "PowerState/Stopping"},
 | 
			
		||||
			nodeName:   "vm6",
 | 
			
		||||
			providerID: "azure:///subscriptions/subscription/resourceGroups/rg/providers/Microsoft.Compute/virtualMachines/vm6",
 | 
			
		||||
			expected:   false,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			name:       "InstanceShutdownByProviderID should return false if the vm is in PowerState/Unknown status",
 | 
			
		||||
			vmList:     map[string]string{"vm7": "PowerState/Unknown"},
 | 
			
		||||
			nodeName:   "vm7",
 | 
			
		||||
			providerID: "azure:///subscriptions/subscription/resourceGroups/rg/providers/Microsoft.Compute/virtualMachines/vm7",
 | 
			
		||||
			expected:   false,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			name:       "InstanceShutdownByProviderID should return false if node doesn't exist",
 | 
			
		||||
			vmList:     map[string]string{"vm1": "PowerState/running"},
 | 
			
		||||
			nodeName:   "vm8",
 | 
			
		||||
			providerID: "azure:///subscriptions/subscription/resourceGroups/rg/providers/Microsoft.Compute/virtualMachines/vm8",
 | 
			
		||||
			expected:   false,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			name:              "InstanceShutdownByProviderID should return false if the vm is in PowerState/Stopped state with Creating provisioning state",
 | 
			
		||||
			vmList:            map[string]string{"vm9": "PowerState/Stopped"},
 | 
			
		||||
			nodeName:          "vm9",
 | 
			
		||||
			provisioningState: "Creating",
 | 
			
		||||
			providerID:        "azure:///subscriptions/subscription/resourceGroups/rg/providers/Microsoft.Compute/virtualMachines/vm9",
 | 
			
		||||
			expected:          false,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			name:     "InstanceShutdownByProviderID should report error if providerID is null",
 | 
			
		||||
			nodeName: "vmm",
 | 
			
		||||
			expected: false,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			name:           "InstanceShutdownByProviderID should report error if providerID is invalid",
 | 
			
		||||
			providerID:     "azure:///subscriptions/subscription/resourceGroups/rg/providers/Microsoft.Compute/VM/vm10",
 | 
			
		||||
			nodeName:       "vm10",
 | 
			
		||||
			expected:       false,
 | 
			
		||||
			expectedErrMsg: fmt.Errorf("error splitting providerID"),
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	ctrl := gomock.NewController(t)
 | 
			
		||||
	defer ctrl.Finish()
 | 
			
		||||
	for _, test := range testcases {
 | 
			
		||||
		cloud := GetTestCloud(ctrl)
 | 
			
		||||
		expectedVMs := setTestVirtualMachines(cloud, test.vmList, false)
 | 
			
		||||
		if test.provisioningState != "" {
 | 
			
		||||
			expectedVMs[0].ProvisioningState = pointer.String(test.provisioningState)
 | 
			
		||||
		}
 | 
			
		||||
		mockVMsClient := cloud.VirtualMachinesClient.(*mockvmclient.MockInterface)
 | 
			
		||||
		for _, vm := range expectedVMs {
 | 
			
		||||
			mockVMsClient.EXPECT().Get(gomock.Any(), cloud.ResourceGroup, *vm.Name, gomock.Any()).Return(vm, nil).AnyTimes()
 | 
			
		||||
		}
 | 
			
		||||
		mockVMsClient.EXPECT().Get(gomock.Any(), cloud.ResourceGroup, test.nodeName, gomock.Any()).Return(compute.VirtualMachine{}, &retry.Error{HTTPStatusCode: http.StatusNotFound, RawError: cloudprovider.InstanceNotFound}).AnyTimes()
 | 
			
		||||
 | 
			
		||||
		hasShutdown, err := cloud.InstanceShutdownByProviderID(context.Background(), test.providerID)
 | 
			
		||||
		assert.Equal(t, test.expectedErrMsg, err, test.name)
 | 
			
		||||
		assert.Equal(t, test.expected, hasShutdown, test.name)
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestNodeAddresses(t *testing.T) {
 | 
			
		||||
	ctrl := gomock.NewController(t)
 | 
			
		||||
	defer ctrl.Finish()
 | 
			
		||||
	cloud := GetTestCloud(ctrl)
 | 
			
		||||
 | 
			
		||||
	expectedVM := compute.VirtualMachine{
 | 
			
		||||
		VirtualMachineProperties: &compute.VirtualMachineProperties{
 | 
			
		||||
			NetworkProfile: &compute.NetworkProfile{
 | 
			
		||||
				NetworkInterfaces: &[]compute.NetworkInterfaceReference{
 | 
			
		||||
					{
 | 
			
		||||
						NetworkInterfaceReferenceProperties: &compute.NetworkInterfaceReferenceProperties{
 | 
			
		||||
							Primary: pointer.Bool(true),
 | 
			
		||||
						},
 | 
			
		||||
						ID: pointer.String("/subscriptions/sub/resourceGroups/rg/providers/Microsoft.Network/networkInterfaces/nic"),
 | 
			
		||||
					},
 | 
			
		||||
				},
 | 
			
		||||
			},
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	expectedPIP := network.PublicIPAddress{
 | 
			
		||||
		ID: pointer.String("/subscriptions/subscriptionID/resourceGroups/rg/providers/Microsoft.Network/publicIPAddresses/pip1"),
 | 
			
		||||
		PublicIPAddressPropertiesFormat: &network.PublicIPAddressPropertiesFormat{
 | 
			
		||||
			IPAddress: pointer.String("192.168.1.12"),
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	expectedInterface := network.Interface{
 | 
			
		||||
		InterfacePropertiesFormat: &network.InterfacePropertiesFormat{
 | 
			
		||||
			IPConfigurations: &[]network.InterfaceIPConfiguration{
 | 
			
		||||
				{
 | 
			
		||||
					InterfaceIPConfigurationPropertiesFormat: &network.InterfaceIPConfigurationPropertiesFormat{
 | 
			
		||||
						PrivateIPAddress: pointer.String("172.1.0.3"),
 | 
			
		||||
						PublicIPAddress:  &expectedPIP,
 | 
			
		||||
					},
 | 
			
		||||
				},
 | 
			
		||||
			},
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	expectedNodeAddress := []v1.NodeAddress{
 | 
			
		||||
		{
 | 
			
		||||
			Type:    v1.NodeInternalIP,
 | 
			
		||||
			Address: "172.1.0.3",
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			Type:    v1.NodeHostName,
 | 
			
		||||
			Address: "vm1",
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			Type:    v1.NodeExternalIP,
 | 
			
		||||
			Address: "192.168.1.12",
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
	metadataTemplate := `{"compute":{"name":"%s"},"network":{"interface":[{"ipv4":{"ipAddress":[{"privateIpAddress":"%s","publicIpAddress":"%s"}]},"ipv6":{"ipAddress":[{"privateIpAddress":"%s","publicIpAddress":"%s"}]}}]}}`
 | 
			
		||||
	loadbalancerTemplate := `{"loadbalancer": {"publicIpAddresses": [{"frontendIpAddress": "%s","privateIpAddress": "%s"},{"frontendIpAddress": "%s","privateIpAddress": "%s"}]}}`
 | 
			
		||||
	testcases := []struct {
 | 
			
		||||
		name                string
 | 
			
		||||
		nodeName            string
 | 
			
		||||
		metadataName        string
 | 
			
		||||
		metadataTemplate    string
 | 
			
		||||
		vmType              string
 | 
			
		||||
		ipV4                string
 | 
			
		||||
		ipV6                string
 | 
			
		||||
		ipV4Public          string
 | 
			
		||||
		ipV6Public          string
 | 
			
		||||
		loadBalancerSku     string
 | 
			
		||||
		expectedAddress     []v1.NodeAddress
 | 
			
		||||
		useInstanceMetadata bool
 | 
			
		||||
		useCustomImsCache   bool
 | 
			
		||||
		nilVMSet            bool
 | 
			
		||||
		expectedErrMsg      error
 | 
			
		||||
	}{
 | 
			
		||||
		{
 | 
			
		||||
			name:                "NodeAddresses should report error if metadata.Network is nil",
 | 
			
		||||
			metadataTemplate:    `{"compute":{"name":"vm1"}}`,
 | 
			
		||||
			useInstanceMetadata: true,
 | 
			
		||||
			expectedErrMsg:      fmt.Errorf("failure of getting instance metadata"),
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			name:                "NodeAddresses should report error if metadata.Compute is nil",
 | 
			
		||||
			metadataTemplate:    `{"network":{"interface":[]}}`,
 | 
			
		||||
			useInstanceMetadata: true,
 | 
			
		||||
			expectedErrMsg:      fmt.Errorf("failure of getting instance metadata"),
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			name:                "NodeAddresses should report error if metadata.Network.Interface is nil",
 | 
			
		||||
			nodeName:            "vm1",
 | 
			
		||||
			metadataName:        "vm1",
 | 
			
		||||
			vmType:              vmTypeStandard,
 | 
			
		||||
			metadataTemplate:    `{"compute":{"name":"vm1"},"network":{}}`,
 | 
			
		||||
			useInstanceMetadata: true,
 | 
			
		||||
			expectedErrMsg:      fmt.Errorf("no interface is found for the instance"),
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			name:                "NodeAddresses should report error when invoke GetMetadata",
 | 
			
		||||
			nodeName:            "vm1",
 | 
			
		||||
			metadataName:        "vm1",
 | 
			
		||||
			vmType:              vmTypeStandard,
 | 
			
		||||
			useCustomImsCache:   true,
 | 
			
		||||
			useInstanceMetadata: true,
 | 
			
		||||
			expectedErrMsg:      fmt.Errorf("getError"),
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			name:                "NodeAddresses should report error if cloud.VMSet is nil",
 | 
			
		||||
			nodeName:            "vm1",
 | 
			
		||||
			vmType:              vmTypeStandard,
 | 
			
		||||
			useInstanceMetadata: true,
 | 
			
		||||
			nilVMSet:            true,
 | 
			
		||||
			expectedErrMsg:      fmt.Errorf("no credentials provided for Azure cloud provider"),
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			name:                "NodeAddresses should report error when IPs are empty",
 | 
			
		||||
			nodeName:            "vm1",
 | 
			
		||||
			metadataName:        "vm1",
 | 
			
		||||
			vmType:              vmTypeStandard,
 | 
			
		||||
			useInstanceMetadata: true,
 | 
			
		||||
			expectedErrMsg:      fmt.Errorf("get empty IP addresses from instance metadata service"),
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			name:                "NodeAddresses should report error if node don't exist",
 | 
			
		||||
			nodeName:            "vm2",
 | 
			
		||||
			metadataName:        "vm1",
 | 
			
		||||
			vmType:              vmTypeStandard,
 | 
			
		||||
			useInstanceMetadata: true,
 | 
			
		||||
			expectedErrMsg:      wait.ErrWaitTimeout,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			name:                "NodeAddresses should get IP addresses from Azure API if node's name isn't equal to metadataName",
 | 
			
		||||
			nodeName:            "vm1",
 | 
			
		||||
			vmType:              vmTypeStandard,
 | 
			
		||||
			useInstanceMetadata: true,
 | 
			
		||||
			expectedAddress:     expectedNodeAddress,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			name:            "NodeAddresses should get IP addresses from Azure API if useInstanceMetadata is false",
 | 
			
		||||
			nodeName:        "vm1",
 | 
			
		||||
			vmType:          vmTypeStandard,
 | 
			
		||||
			expectedAddress: expectedNodeAddress,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			name:                "NodeAddresses should get IP addresses from local IMDS if node's name is equal to metadataName",
 | 
			
		||||
			nodeName:            "vm1",
 | 
			
		||||
			metadataName:        "vm1",
 | 
			
		||||
			vmType:              vmTypeStandard,
 | 
			
		||||
			ipV4:                "10.240.0.1",
 | 
			
		||||
			ipV4Public:          "192.168.1.12",
 | 
			
		||||
			ipV6:                "1111:11111:00:00:1111:1111:000:111",
 | 
			
		||||
			ipV6Public:          "2222:22221:00:00:2222:2222:000:111",
 | 
			
		||||
			loadBalancerSku:     "basic",
 | 
			
		||||
			useInstanceMetadata: true,
 | 
			
		||||
			expectedAddress: []v1.NodeAddress{
 | 
			
		||||
				{
 | 
			
		||||
					Type:    v1.NodeHostName,
 | 
			
		||||
					Address: "vm1",
 | 
			
		||||
				},
 | 
			
		||||
				{
 | 
			
		||||
					Type:    v1.NodeInternalIP,
 | 
			
		||||
					Address: "10.240.0.1",
 | 
			
		||||
				},
 | 
			
		||||
				{
 | 
			
		||||
					Type:    v1.NodeExternalIP,
 | 
			
		||||
					Address: "192.168.1.12",
 | 
			
		||||
				},
 | 
			
		||||
				{
 | 
			
		||||
					Type:    v1.NodeInternalIP,
 | 
			
		||||
					Address: "1111:11111:00:00:1111:1111:000:111",
 | 
			
		||||
				},
 | 
			
		||||
				{
 | 
			
		||||
					Type:    v1.NodeExternalIP,
 | 
			
		||||
					Address: "2222:22221:00:00:2222:2222:000:111",
 | 
			
		||||
				},
 | 
			
		||||
			},
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			name:                "NodeAddresses should get IP addresses from local IMDS for standard LoadBalancer if node's name is equal to metadataName",
 | 
			
		||||
			nodeName:            "vm1",
 | 
			
		||||
			metadataName:        "vm1",
 | 
			
		||||
			vmType:              vmTypeStandard,
 | 
			
		||||
			ipV4:                "10.240.0.1",
 | 
			
		||||
			ipV4Public:          "192.168.1.12",
 | 
			
		||||
			ipV6:                "1111:11111:00:00:1111:1111:000:111",
 | 
			
		||||
			ipV6Public:          "2222:22221:00:00:2222:2222:000:111",
 | 
			
		||||
			loadBalancerSku:     "standard",
 | 
			
		||||
			useInstanceMetadata: true,
 | 
			
		||||
			expectedAddress: []v1.NodeAddress{
 | 
			
		||||
				{
 | 
			
		||||
					Type:    v1.NodeHostName,
 | 
			
		||||
					Address: "vm1",
 | 
			
		||||
				},
 | 
			
		||||
				{
 | 
			
		||||
					Type:    v1.NodeInternalIP,
 | 
			
		||||
					Address: "10.240.0.1",
 | 
			
		||||
				},
 | 
			
		||||
				{
 | 
			
		||||
					Type:    v1.NodeExternalIP,
 | 
			
		||||
					Address: "192.168.1.12",
 | 
			
		||||
				},
 | 
			
		||||
				{
 | 
			
		||||
					Type:    v1.NodeInternalIP,
 | 
			
		||||
					Address: "1111:11111:00:00:1111:1111:000:111",
 | 
			
		||||
				},
 | 
			
		||||
				{
 | 
			
		||||
					Type:    v1.NodeExternalIP,
 | 
			
		||||
					Address: "2222:22221:00:00:2222:2222:000:111",
 | 
			
		||||
				},
 | 
			
		||||
			},
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	for _, test := range testcases {
 | 
			
		||||
		if test.nilVMSet {
 | 
			
		||||
			cloud.VMSet = nil
 | 
			
		||||
		} else {
 | 
			
		||||
			cloud.VMSet = newAvailabilitySet(cloud)
 | 
			
		||||
		}
 | 
			
		||||
		cloud.Config.VMType = test.vmType
 | 
			
		||||
		cloud.Config.UseInstanceMetadata = test.useInstanceMetadata
 | 
			
		||||
		listener, err := net.Listen("tcp", "127.0.0.1:0")
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			t.Errorf("Test [%s] unexpected error: %v", test.name, err)
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		mux := http.NewServeMux()
 | 
			
		||||
		mux.Handle("/", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
 | 
			
		||||
			if strings.Contains(r.RequestURI, imdsLoadBalancerURI) {
 | 
			
		||||
				fmt.Fprintf(w, loadbalancerTemplate, test.ipV4Public, test.ipV4, test.ipV6Public, test.ipV6)
 | 
			
		||||
				return
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			if test.metadataTemplate != "" {
 | 
			
		||||
				fmt.Fprintf(w, test.metadataTemplate)
 | 
			
		||||
			} else {
 | 
			
		||||
				if test.loadBalancerSku == "standard" {
 | 
			
		||||
					fmt.Fprintf(w, metadataTemplate, test.metadataName, test.ipV4, "", test.ipV6, "")
 | 
			
		||||
				} else {
 | 
			
		||||
					fmt.Fprintf(w, metadataTemplate, test.metadataName, test.ipV4, test.ipV4Public, test.ipV6, test.ipV6Public)
 | 
			
		||||
				}
 | 
			
		||||
			}
 | 
			
		||||
		}))
 | 
			
		||||
		go func() {
 | 
			
		||||
			http.Serve(listener, mux)
 | 
			
		||||
		}()
 | 
			
		||||
		defer listener.Close()
 | 
			
		||||
 | 
			
		||||
		cloud.metadata, err = NewInstanceMetadataService("http://" + listener.Addr().String() + "/")
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			t.Errorf("Test [%s] unexpected error: %v", test.name, err)
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		if test.useCustomImsCache {
 | 
			
		||||
			cloud.metadata.imsCache, err = azcache.NewTimedcache(metadataCacheTTL, func(key string) (interface{}, error) {
 | 
			
		||||
				return nil, fmt.Errorf("getError")
 | 
			
		||||
			})
 | 
			
		||||
			if err != nil {
 | 
			
		||||
				t.Errorf("Test [%s] unexpected error: %v", test.name, err)
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
		mockVMClient := cloud.VirtualMachinesClient.(*mockvmclient.MockInterface)
 | 
			
		||||
		mockVMClient.EXPECT().Get(gomock.Any(), cloud.ResourceGroup, "vm1", gomock.Any()).Return(expectedVM, nil).AnyTimes()
 | 
			
		||||
		mockVMClient.EXPECT().Get(gomock.Any(), cloud.ResourceGroup, "vm2", gomock.Any()).Return(compute.VirtualMachine{}, &retry.Error{HTTPStatusCode: http.StatusNotFound, RawError: cloudprovider.InstanceNotFound}).AnyTimes()
 | 
			
		||||
 | 
			
		||||
		mockPublicIPAddressesClient := cloud.PublicIPAddressesClient.(*mockpublicipclient.MockInterface)
 | 
			
		||||
		mockPublicIPAddressesClient.EXPECT().Get(gomock.Any(), cloud.ResourceGroup, "pip1", gomock.Any()).Return(expectedPIP, nil).AnyTimes()
 | 
			
		||||
 | 
			
		||||
		mockInterfaceClient := cloud.InterfacesClient.(*mockinterfaceclient.MockInterface)
 | 
			
		||||
		mockInterfaceClient.EXPECT().Get(gomock.Any(), cloud.ResourceGroup, "nic", gomock.Any()).Return(expectedInterface, nil).AnyTimes()
 | 
			
		||||
 | 
			
		||||
		ipAddresses, err := cloud.NodeAddresses(context.Background(), types.NodeName(test.nodeName))
 | 
			
		||||
		assert.Equal(t, test.expectedErrMsg, err, test.name)
 | 
			
		||||
		assert.Equal(t, test.expectedAddress, ipAddresses, test.name)
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestInstanceExistsByProviderID(t *testing.T) {
 | 
			
		||||
	ctrl := gomock.NewController(t)
 | 
			
		||||
	defer ctrl.Finish()
 | 
			
		||||
	cloud := GetTestCloud(ctrl)
 | 
			
		||||
 | 
			
		||||
	testcases := []struct {
 | 
			
		||||
		name           string
 | 
			
		||||
		vmList         []string
 | 
			
		||||
		nodeName       string
 | 
			
		||||
		providerID     string
 | 
			
		||||
		expected       bool
 | 
			
		||||
		expectedErrMsg error
 | 
			
		||||
	}{
 | 
			
		||||
		{
 | 
			
		||||
			name:       "InstanceExistsByProviderID should return true if node exists",
 | 
			
		||||
			vmList:     []string{"vm2"},
 | 
			
		||||
			nodeName:   "vm2",
 | 
			
		||||
			providerID: "azure:///subscriptions/subscription/resourceGroups/rg/providers/Microsoft.Compute/virtualMachines/vm2",
 | 
			
		||||
			expected:   true,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			name:       "InstanceExistsByProviderID should return true if node is unmanaged",
 | 
			
		||||
			providerID: "/subscriptions/subscription/resourceGroups/rg/providers/Microsoft.Compute/virtualMachines/vm1",
 | 
			
		||||
			expected:   true,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			name:       "InstanceExistsByProviderID should return false if node doesn't exist",
 | 
			
		||||
			vmList:     []string{"vm1"},
 | 
			
		||||
			nodeName:   "vm3",
 | 
			
		||||
			providerID: "azure:///subscriptions/subscription/resourceGroups/rg/providers/Microsoft.Compute/virtualMachines/vm3",
 | 
			
		||||
			expected:   false,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			name:           "InstanceExistsByProviderID should report error if providerID is invalid",
 | 
			
		||||
			providerID:     "azure:///subscriptions/subscription/resourceGroups/rg/providers/Microsoft.Compute/virtualMachine/vm3",
 | 
			
		||||
			expected:       false,
 | 
			
		||||
			expectedErrMsg: fmt.Errorf("error splitting providerID"),
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			name:           "InstanceExistsByProviderID should report error if providerID is null",
 | 
			
		||||
			expected:       false,
 | 
			
		||||
			expectedErrMsg: fmt.Errorf("providerID is empty, the node is not initialized yet"),
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	for _, test := range testcases {
 | 
			
		||||
		vmListWithPowerState := make(map[string]string)
 | 
			
		||||
		for _, vm := range test.vmList {
 | 
			
		||||
			vmListWithPowerState[vm] = ""
 | 
			
		||||
		}
 | 
			
		||||
		expectedVMs := setTestVirtualMachines(cloud, vmListWithPowerState, false)
 | 
			
		||||
		mockVMsClient := cloud.VirtualMachinesClient.(*mockvmclient.MockInterface)
 | 
			
		||||
		for _, vm := range expectedVMs {
 | 
			
		||||
			mockVMsClient.EXPECT().Get(gomock.Any(), cloud.ResourceGroup, *vm.Name, gomock.Any()).Return(vm, nil).AnyTimes()
 | 
			
		||||
		}
 | 
			
		||||
		mockVMsClient.EXPECT().Get(gomock.Any(), cloud.ResourceGroup, "vm3", gomock.Any()).Return(compute.VirtualMachine{}, &retry.Error{HTTPStatusCode: http.StatusNotFound, RawError: cloudprovider.InstanceNotFound}).AnyTimes()
 | 
			
		||||
		mockVMsClient.EXPECT().Update(gomock.Any(), cloud.ResourceGroup, gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).AnyTimes()
 | 
			
		||||
 | 
			
		||||
		exist, err := cloud.InstanceExistsByProviderID(context.Background(), test.providerID)
 | 
			
		||||
		assert.Equal(t, test.expectedErrMsg, err, test.name)
 | 
			
		||||
		assert.Equal(t, test.expected, exist, test.name)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	vmssTestCases := []struct {
 | 
			
		||||
		name       string
 | 
			
		||||
		providerID string
 | 
			
		||||
		scaleSet   string
 | 
			
		||||
		vmList     []string
 | 
			
		||||
		expected   bool
 | 
			
		||||
		rerr       *retry.Error
 | 
			
		||||
	}{
 | 
			
		||||
		{
 | 
			
		||||
			name:       "InstanceExistsByProviderID should return true if VMSS and VM exist",
 | 
			
		||||
			providerID: "azure:///subscriptions/script/resourceGroups/rg/providers/Microsoft.Compute/virtualMachineScaleSets/vmssee6c2/virtualMachines/0",
 | 
			
		||||
			scaleSet:   "vmssee6c2",
 | 
			
		||||
			vmList:     []string{"vmssee6c2000000"},
 | 
			
		||||
			expected:   true,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			name:       "InstanceExistsByProviderID should return false if VMSS exist but VM doesn't",
 | 
			
		||||
			providerID: "azure:///subscriptions/script/resourceGroups/rg/providers/Microsoft.Compute/virtualMachineScaleSets/vmssee6c2/virtualMachines/0",
 | 
			
		||||
			scaleSet:   "vmssee6c2",
 | 
			
		||||
			expected:   false,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			name:       "InstanceExistsByProviderID should return false if VMSS doesn't exist",
 | 
			
		||||
			providerID: "azure:///subscriptions/script/resourceGroups/rg/providers/Microsoft.Compute/virtualMachineScaleSets/missing-vmss/virtualMachines/0",
 | 
			
		||||
			rerr:       &retry.Error{HTTPStatusCode: 404},
 | 
			
		||||
			expected:   false,
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	for _, test := range vmssTestCases {
 | 
			
		||||
		ss, err := newTestScaleSet(ctrl)
 | 
			
		||||
		assert.NoError(t, err, test.name)
 | 
			
		||||
		cloud.VMSet = ss
 | 
			
		||||
 | 
			
		||||
		mockVMSSClient := mockvmssclient.NewMockInterface(ctrl)
 | 
			
		||||
		mockVMSSVMClient := mockvmssvmclient.NewMockInterface(ctrl)
 | 
			
		||||
		ss.cloud.VirtualMachineScaleSetsClient = mockVMSSClient
 | 
			
		||||
		ss.cloud.VirtualMachineScaleSetVMsClient = mockVMSSVMClient
 | 
			
		||||
 | 
			
		||||
		expectedScaleSet := buildTestVMSS(test.scaleSet, test.scaleSet)
 | 
			
		||||
		mockVMSSClient.EXPECT().List(gomock.Any(), gomock.Any()).Return([]compute.VirtualMachineScaleSet{expectedScaleSet}, test.rerr).AnyTimes()
 | 
			
		||||
 | 
			
		||||
		expectedVMs, _, _ := buildTestVirtualMachineEnv(ss.cloud, test.scaleSet, "", 0, test.vmList, "succeeded", false)
 | 
			
		||||
		mockVMSSVMClient.EXPECT().List(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(expectedVMs, test.rerr).AnyTimes()
 | 
			
		||||
 | 
			
		||||
		mockVMsClient := ss.cloud.VirtualMachinesClient.(*mockvmclient.MockInterface)
 | 
			
		||||
		mockVMsClient.EXPECT().List(gomock.Any(), gomock.Any()).Return([]compute.VirtualMachine{}, nil).AnyTimes()
 | 
			
		||||
 | 
			
		||||
		exist, _ := cloud.InstanceExistsByProviderID(context.Background(), test.providerID)
 | 
			
		||||
		assert.Equal(t, test.expected, exist, test.name)
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestNodeAddressesByProviderID(t *testing.T) {
 | 
			
		||||
	ctrl := gomock.NewController(t)
 | 
			
		||||
	defer ctrl.Finish()
 | 
			
		||||
	cloud := GetTestCloud(ctrl)
 | 
			
		||||
	cloud.Config.UseInstanceMetadata = true
 | 
			
		||||
	metadataTemplate := `{"compute":{"name":"%s"},"network":{"interface":[{"ipv4":{"ipAddress":[{"privateIpAddress":"%s","publicIpAddress":"%s"}]},"ipv6":{"ipAddress":[{"privateIpAddress":"%s","publicIpAddress":"%s"}]}}]}}`
 | 
			
		||||
 | 
			
		||||
	testcases := []struct {
 | 
			
		||||
		name            string
 | 
			
		||||
		nodeName        string
 | 
			
		||||
		ipV4            string
 | 
			
		||||
		ipV6            string
 | 
			
		||||
		ipV4Public      string
 | 
			
		||||
		ipV6Public      string
 | 
			
		||||
		providerID      string
 | 
			
		||||
		expectedAddress []v1.NodeAddress
 | 
			
		||||
		expectedErrMsg  error
 | 
			
		||||
	}{
 | 
			
		||||
		{
 | 
			
		||||
			name:       "NodeAddressesByProviderID should get both ipV4 and ipV6 private addresses",
 | 
			
		||||
			nodeName:   "vm1",
 | 
			
		||||
			providerID: "azure:///subscriptions/subscription/resourceGroups/rg/providers/Microsoft.Compute/virtualMachines/vm1",
 | 
			
		||||
			ipV4:       "10.240.0.1",
 | 
			
		||||
			ipV6:       "1111:11111:00:00:1111:1111:000:111",
 | 
			
		||||
			expectedAddress: []v1.NodeAddress{
 | 
			
		||||
				{
 | 
			
		||||
					Type:    v1.NodeHostName,
 | 
			
		||||
					Address: "vm1",
 | 
			
		||||
				},
 | 
			
		||||
				{
 | 
			
		||||
					Type:    v1.NodeInternalIP,
 | 
			
		||||
					Address: "10.240.0.1",
 | 
			
		||||
				},
 | 
			
		||||
				{
 | 
			
		||||
					Type:    v1.NodeInternalIP,
 | 
			
		||||
					Address: "1111:11111:00:00:1111:1111:000:111",
 | 
			
		||||
				},
 | 
			
		||||
			},
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			name:           "NodeAddressesByProviderID should report error when IPs are empty",
 | 
			
		||||
			nodeName:       "vm1",
 | 
			
		||||
			providerID:     "azure:///subscriptions/subscription/resourceGroups/rg/providers/Microsoft.Compute/virtualMachines/vm1",
 | 
			
		||||
			expectedErrMsg: fmt.Errorf("get empty IP addresses from instance metadata service"),
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			name:       "NodeAddressesByProviderID should return nil if node is unmanaged",
 | 
			
		||||
			providerID: "/subscriptions/subscription/resourceGroups/rg/providers/Microsoft.Compute/virtualMachines/vm1",
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			name:           "NodeAddressesByProviderID should report error if providerID is invalid",
 | 
			
		||||
			providerID:     "azure:///subscriptions/subscription/resourceGroups/rg/providers/Microsoft.Compute/virtualMachine/vm3",
 | 
			
		||||
			expectedErrMsg: fmt.Errorf("error splitting providerID"),
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			name:           "NodeAddressesByProviderID should report error if providerID is null",
 | 
			
		||||
			expectedErrMsg: fmt.Errorf("providerID is empty, the node is not initialized yet"),
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	for _, test := range testcases {
 | 
			
		||||
		listener, err := net.Listen("tcp", "127.0.0.1:0")
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			t.Errorf("Test [%s] unexpected error: %v", test.name, err)
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		mux := http.NewServeMux()
 | 
			
		||||
		mux.Handle("/", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
 | 
			
		||||
			fmt.Fprintf(w, metadataTemplate, test.nodeName, test.ipV4, test.ipV4Public, test.ipV6, test.ipV6Public)
 | 
			
		||||
		}))
 | 
			
		||||
		go func() {
 | 
			
		||||
			http.Serve(listener, mux)
 | 
			
		||||
		}()
 | 
			
		||||
		defer listener.Close()
 | 
			
		||||
 | 
			
		||||
		cloud.metadata, err = NewInstanceMetadataService("http://" + listener.Addr().String() + "/")
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			t.Errorf("Test [%s] unexpected error: %v", test.name, err)
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		ipAddresses, err := cloud.NodeAddressesByProviderID(context.Background(), test.providerID)
 | 
			
		||||
		assert.Equal(t, test.expectedErrMsg, err, test.name)
 | 
			
		||||
		assert.Equal(t, test.expectedAddress, ipAddresses, test.name)
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestCurrentNodeName(t *testing.T) {
 | 
			
		||||
	ctrl := gomock.NewController(t)
 | 
			
		||||
	defer ctrl.Finish()
 | 
			
		||||
	cloud := GetTestCloud(ctrl)
 | 
			
		||||
 | 
			
		||||
	hostname := "testvm"
 | 
			
		||||
	nodeName, err := cloud.CurrentNodeName(context.Background(), hostname)
 | 
			
		||||
	assert.Equal(t, types.NodeName(hostname), nodeName)
 | 
			
		||||
	assert.NoError(t, err)
 | 
			
		||||
}
 | 
			
		||||
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							@@ -1,399 +0,0 @@
 | 
			
		||||
//go:build !providerless
 | 
			
		||||
// +build !providerless
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
Copyright 2017 The Kubernetes Authors.
 | 
			
		||||
 | 
			
		||||
Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
you may not use this file except in compliance with the License.
 | 
			
		||||
You may obtain a copy of the License at
 | 
			
		||||
 | 
			
		||||
    http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
 | 
			
		||||
Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
See the License for the specific language governing permissions and
 | 
			
		||||
limitations under the License.
 | 
			
		||||
*/
 | 
			
		||||
 | 
			
		||||
package azure
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"context"
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"net/http"
 | 
			
		||||
	"path"
 | 
			
		||||
	"strconv"
 | 
			
		||||
	"strings"
 | 
			
		||||
 | 
			
		||||
	"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-12-01/compute"
 | 
			
		||||
 | 
			
		||||
	v1 "k8s.io/api/core/v1"
 | 
			
		||||
	"k8s.io/apimachinery/pkg/api/resource"
 | 
			
		||||
	kwait "k8s.io/apimachinery/pkg/util/wait"
 | 
			
		||||
	cloudvolume "k8s.io/cloud-provider/volume"
 | 
			
		||||
	volumehelpers "k8s.io/cloud-provider/volume/helpers"
 | 
			
		||||
	"k8s.io/klog/v2"
 | 
			
		||||
	"k8s.io/utils/pointer"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
const (
 | 
			
		||||
	// default IOPS Caps & Throughput Cap (MBps) per https://docs.microsoft.com/en-us/azure/virtual-machines/linux/disks-ultra-ssd
 | 
			
		||||
	defaultDiskIOPSReadWrite = 500
 | 
			
		||||
	defaultDiskMBpsReadWrite = 100
 | 
			
		||||
 | 
			
		||||
	diskEncryptionSetIDFormat = "/subscriptions/{subs-id}/resourceGroups/{rg-name}/providers/Microsoft.Compute/diskEncryptionSets/{diskEncryptionSet-name}"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// ManagedDiskController : managed disk controller struct
 | 
			
		||||
type ManagedDiskController struct {
 | 
			
		||||
	common *controllerCommon
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// ManagedDiskOptions specifies the options of managed disks.
 | 
			
		||||
type ManagedDiskOptions struct {
 | 
			
		||||
	// The name of the disk.
 | 
			
		||||
	DiskName string
 | 
			
		||||
	// The size in GB.
 | 
			
		||||
	SizeGB int
 | 
			
		||||
	// The name of PVC.
 | 
			
		||||
	PVCName string
 | 
			
		||||
	// The name of resource group.
 | 
			
		||||
	ResourceGroup string
 | 
			
		||||
	// The AvailabilityZone to create the disk.
 | 
			
		||||
	AvailabilityZone string
 | 
			
		||||
	// The tags of the disk.
 | 
			
		||||
	Tags map[string]string
 | 
			
		||||
	// The SKU of storage account.
 | 
			
		||||
	StorageAccountType compute.DiskStorageAccountTypes
 | 
			
		||||
	// IOPS Caps for UltraSSD disk
 | 
			
		||||
	DiskIOPSReadWrite string
 | 
			
		||||
	// Throughput Cap (MBps) for UltraSSD disk
 | 
			
		||||
	DiskMBpsReadWrite string
 | 
			
		||||
	// if SourceResourceID is not empty, then it's a disk copy operation(for snapshot)
 | 
			
		||||
	SourceResourceID string
 | 
			
		||||
	// The type of source
 | 
			
		||||
	SourceType string
 | 
			
		||||
	// ResourceId of the disk encryption set to use for enabling encryption at rest.
 | 
			
		||||
	DiskEncryptionSetID string
 | 
			
		||||
	// The maximum number of VMs that can attach to the disk at the same time. Value greater than one indicates a disk that can be mounted on multiple VMs at the same time.
 | 
			
		||||
	MaxShares int32
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// CreateManagedDisk : create managed disk
 | 
			
		||||
func (c *ManagedDiskController) CreateManagedDisk(options *ManagedDiskOptions) (string, error) {
 | 
			
		||||
	var err error
 | 
			
		||||
	klog.V(4).Infof("azureDisk - creating new managed Name:%s StorageAccountType:%s Size:%v", options.DiskName, options.StorageAccountType, options.SizeGB)
 | 
			
		||||
 | 
			
		||||
	var createZones []string
 | 
			
		||||
	if len(options.AvailabilityZone) > 0 {
 | 
			
		||||
		requestedZone := c.common.cloud.GetZoneID(options.AvailabilityZone)
 | 
			
		||||
		if requestedZone != "" {
 | 
			
		||||
			createZones = append(createZones, requestedZone)
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// insert original tags to newTags
 | 
			
		||||
	newTags := make(map[string]*string)
 | 
			
		||||
	azureDDTag := "kubernetes-azure-dd"
 | 
			
		||||
	newTags["created-by"] = &azureDDTag
 | 
			
		||||
	if options.Tags != nil {
 | 
			
		||||
		for k, v := range options.Tags {
 | 
			
		||||
			// Azure won't allow / (forward slash) in tags
 | 
			
		||||
			newKey := strings.Replace(k, "/", "-", -1)
 | 
			
		||||
			newValue := strings.Replace(v, "/", "-", -1)
 | 
			
		||||
			newTags[newKey] = &newValue
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	diskSizeGB := int32(options.SizeGB)
 | 
			
		||||
	diskSku := compute.DiskStorageAccountTypes(options.StorageAccountType)
 | 
			
		||||
 | 
			
		||||
	creationData, err := getValidCreationData(c.common.subscriptionID, options.ResourceGroup, options.SourceResourceID, options.SourceType)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return "", err
 | 
			
		||||
	}
 | 
			
		||||
	diskProperties := compute.DiskProperties{
 | 
			
		||||
		DiskSizeGB:   &diskSizeGB,
 | 
			
		||||
		CreationData: &creationData,
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if diskSku == compute.UltraSSDLRS {
 | 
			
		||||
		diskIOPSReadWrite := int64(defaultDiskIOPSReadWrite)
 | 
			
		||||
		if options.DiskIOPSReadWrite != "" {
 | 
			
		||||
			v, err := strconv.Atoi(options.DiskIOPSReadWrite)
 | 
			
		||||
			if err != nil {
 | 
			
		||||
				return "", fmt.Errorf("AzureDisk - failed to parse DiskIOPSReadWrite: %v", err)
 | 
			
		||||
			}
 | 
			
		||||
			diskIOPSReadWrite = int64(v)
 | 
			
		||||
		}
 | 
			
		||||
		diskProperties.DiskIOPSReadWrite = pointer.Int64(diskIOPSReadWrite)
 | 
			
		||||
 | 
			
		||||
		diskMBpsReadWrite := int64(defaultDiskMBpsReadWrite)
 | 
			
		||||
		if options.DiskMBpsReadWrite != "" {
 | 
			
		||||
			v, err := strconv.Atoi(options.DiskMBpsReadWrite)
 | 
			
		||||
			if err != nil {
 | 
			
		||||
				return "", fmt.Errorf("AzureDisk - failed to parse DiskMBpsReadWrite: %v", err)
 | 
			
		||||
			}
 | 
			
		||||
			diskMBpsReadWrite = int64(v)
 | 
			
		||||
		}
 | 
			
		||||
		diskProperties.DiskMBpsReadWrite = pointer.Int64(diskMBpsReadWrite)
 | 
			
		||||
	} else {
 | 
			
		||||
		if options.DiskIOPSReadWrite != "" {
 | 
			
		||||
			return "", fmt.Errorf("AzureDisk - DiskIOPSReadWrite parameter is only applicable in UltraSSD_LRS disk type")
 | 
			
		||||
		}
 | 
			
		||||
		if options.DiskMBpsReadWrite != "" {
 | 
			
		||||
			return "", fmt.Errorf("AzureDisk - DiskMBpsReadWrite parameter is only applicable in UltraSSD_LRS disk type")
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if options.DiskEncryptionSetID != "" {
 | 
			
		||||
		if strings.Index(strings.ToLower(options.DiskEncryptionSetID), "/subscriptions/") != 0 {
 | 
			
		||||
			return "", fmt.Errorf("AzureDisk - format of DiskEncryptionSetID(%s) is incorrect, correct format: %s", options.DiskEncryptionSetID, diskEncryptionSetIDFormat)
 | 
			
		||||
		}
 | 
			
		||||
		diskProperties.Encryption = &compute.Encryption{
 | 
			
		||||
			DiskEncryptionSetID: &options.DiskEncryptionSetID,
 | 
			
		||||
			Type:                compute.EncryptionAtRestWithCustomerKey,
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if options.MaxShares > 1 {
 | 
			
		||||
		diskProperties.MaxShares = &options.MaxShares
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	model := compute.Disk{
 | 
			
		||||
		Location: &c.common.location,
 | 
			
		||||
		Tags:     newTags,
 | 
			
		||||
		Sku: &compute.DiskSku{
 | 
			
		||||
			Name: diskSku,
 | 
			
		||||
		},
 | 
			
		||||
		DiskProperties: &diskProperties,
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if len(createZones) > 0 {
 | 
			
		||||
		model.Zones = &createZones
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if options.ResourceGroup == "" {
 | 
			
		||||
		options.ResourceGroup = c.common.resourceGroup
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	ctx, cancel := getContextWithCancel()
 | 
			
		||||
	defer cancel()
 | 
			
		||||
	rerr := c.common.cloud.DisksClient.CreateOrUpdate(ctx, options.ResourceGroup, options.DiskName, model)
 | 
			
		||||
	if rerr != nil {
 | 
			
		||||
		return "", rerr.Error()
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	diskID := ""
 | 
			
		||||
 | 
			
		||||
	err = kwait.ExponentialBackoff(defaultBackOff, func() (bool, error) {
 | 
			
		||||
		provisionState, id, err := c.GetDisk(options.ResourceGroup, options.DiskName)
 | 
			
		||||
		diskID = id
 | 
			
		||||
		// We are waiting for provisioningState==Succeeded
 | 
			
		||||
		// We don't want to hand-off managed disks to k8s while they are
 | 
			
		||||
		//still being provisioned, this is to avoid some race conditions
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			return false, err
 | 
			
		||||
		}
 | 
			
		||||
		if strings.ToLower(provisionState) == "succeeded" {
 | 
			
		||||
			return true, nil
 | 
			
		||||
		}
 | 
			
		||||
		return false, nil
 | 
			
		||||
	})
 | 
			
		||||
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		klog.V(2).Infof("azureDisk - created new MD Name:%s StorageAccountType:%s Size:%v but was unable to confirm provisioningState in poll process", options.DiskName, options.StorageAccountType, options.SizeGB)
 | 
			
		||||
	} else {
 | 
			
		||||
		klog.V(2).Infof("azureDisk - created new MD Name:%s StorageAccountType:%s Size:%v", options.DiskName, options.StorageAccountType, options.SizeGB)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return diskID, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// DeleteManagedDisk : delete managed disk
 | 
			
		||||
func (c *ManagedDiskController) DeleteManagedDisk(diskURI string) error {
 | 
			
		||||
	diskName := path.Base(diskURI)
 | 
			
		||||
	resourceGroup, err := getResourceGroupFromDiskURI(diskURI)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	ctx, cancel := getContextWithCancel()
 | 
			
		||||
	defer cancel()
 | 
			
		||||
 | 
			
		||||
	if _, ok := c.common.diskAttachDetachMap.Load(strings.ToLower(diskURI)); ok {
 | 
			
		||||
		return fmt.Errorf("failed to delete disk(%s) since it's in attaching or detaching state", diskURI)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	disk, rerr := c.common.cloud.DisksClient.Get(ctx, resourceGroup, diskName)
 | 
			
		||||
	if rerr != nil {
 | 
			
		||||
		if rerr.HTTPStatusCode == http.StatusNotFound {
 | 
			
		||||
			klog.V(2).Infof("azureDisk - disk(%s) is already deleted", diskURI)
 | 
			
		||||
			return nil
 | 
			
		||||
		}
 | 
			
		||||
		return rerr.Error()
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if disk.ManagedBy != nil {
 | 
			
		||||
		return fmt.Errorf("disk(%s) already attached to node(%s), could not be deleted", diskURI, *disk.ManagedBy)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	rerr = c.common.cloud.DisksClient.Delete(ctx, resourceGroup, diskName)
 | 
			
		||||
	if rerr != nil {
 | 
			
		||||
		if rerr.HTTPStatusCode == http.StatusNotFound {
 | 
			
		||||
			klog.V(2).Infof("azureDisk - disk(%s) is already deleted", diskURI)
 | 
			
		||||
			return nil
 | 
			
		||||
		}
 | 
			
		||||
		return rerr.Error()
 | 
			
		||||
	}
 | 
			
		||||
	// We don't need poll here, k8s will immediately stop referencing the disk
 | 
			
		||||
	// the disk will be eventually deleted - cleanly - by ARM
 | 
			
		||||
 | 
			
		||||
	klog.V(2).Infof("azureDisk - deleted a managed disk: %s", diskURI)
 | 
			
		||||
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// GetDisk return: disk provisionState, diskID, error
 | 
			
		||||
func (c *ManagedDiskController) GetDisk(resourceGroup, diskName string) (string, string, error) {
 | 
			
		||||
	ctx, cancel := getContextWithCancel()
 | 
			
		||||
	defer cancel()
 | 
			
		||||
 | 
			
		||||
	result, rerr := c.common.cloud.DisksClient.Get(ctx, resourceGroup, diskName)
 | 
			
		||||
	if rerr != nil {
 | 
			
		||||
		return "", "", rerr.Error()
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if result.DiskProperties != nil && (*result.DiskProperties).ProvisioningState != nil {
 | 
			
		||||
		return *(*result.DiskProperties).ProvisioningState, *result.ID, nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return "", "", nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// ResizeDisk Expand the disk to new size
 | 
			
		||||
func (c *ManagedDiskController) ResizeDisk(diskURI string, oldSize resource.Quantity, newSize resource.Quantity) (resource.Quantity, error) {
 | 
			
		||||
	ctx, cancel := getContextWithCancel()
 | 
			
		||||
	defer cancel()
 | 
			
		||||
 | 
			
		||||
	diskName := path.Base(diskURI)
 | 
			
		||||
	resourceGroup, err := getResourceGroupFromDiskURI(diskURI)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return oldSize, err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	result, rerr := c.common.cloud.DisksClient.Get(ctx, resourceGroup, diskName)
 | 
			
		||||
	if rerr != nil {
 | 
			
		||||
		return oldSize, rerr.Error()
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if result.DiskProperties == nil || result.DiskProperties.DiskSizeGB == nil {
 | 
			
		||||
		return oldSize, fmt.Errorf("DiskProperties of disk(%s) is nil", diskName)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Azure resizes in chunks of GiB (not GB)
 | 
			
		||||
	requestGiB, err := volumehelpers.RoundUpToGiBInt32(newSize)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return oldSize, err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	newSizeQuant := resource.MustParse(fmt.Sprintf("%dGi", requestGiB))
 | 
			
		||||
 | 
			
		||||
	klog.V(2).Infof("azureDisk - begin to resize disk(%s) with new size(%d), old size(%v)", diskName, requestGiB, oldSize)
 | 
			
		||||
	// If disk already of greater or equal size than requested we return
 | 
			
		||||
	if *result.DiskProperties.DiskSizeGB >= requestGiB {
 | 
			
		||||
		return newSizeQuant, nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if result.DiskProperties.DiskState != compute.Unattached {
 | 
			
		||||
		return oldSize, fmt.Errorf("azureDisk - disk resize is only supported on Unattached disk, current disk state: %s, already attached to %s", result.DiskProperties.DiskState, pointer.StringDeref(result.ManagedBy, ""))
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	diskParameter := compute.DiskUpdate{
 | 
			
		||||
		DiskUpdateProperties: &compute.DiskUpdateProperties{
 | 
			
		||||
			DiskSizeGB: &requestGiB,
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	ctx, cancel = getContextWithCancel()
 | 
			
		||||
	defer cancel()
 | 
			
		||||
	if rerr := c.common.cloud.DisksClient.Update(ctx, resourceGroup, diskName, diskParameter); rerr != nil {
 | 
			
		||||
		return oldSize, rerr.Error()
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	klog.V(2).Infof("azureDisk - resize disk(%s) with new size(%d) completed", diskName, requestGiB)
 | 
			
		||||
 | 
			
		||||
	return newSizeQuant, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// get resource group name from a managed disk URI, e.g. return {group-name} according to
 | 
			
		||||
// /subscriptions/{sub-id}/resourcegroups/{group-name}/providers/microsoft.compute/disks/{disk-id}
 | 
			
		||||
// according to https://docs.microsoft.com/en-us/rest/api/compute/disks/get
 | 
			
		||||
func getResourceGroupFromDiskURI(diskURI string) (string, error) {
 | 
			
		||||
	fields := strings.Split(diskURI, "/")
 | 
			
		||||
	if len(fields) != 9 || strings.ToLower(fields[3]) != "resourcegroups" {
 | 
			
		||||
		return "", fmt.Errorf("invalid disk URI: %s", diskURI)
 | 
			
		||||
	}
 | 
			
		||||
	return fields[4], nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// GetLabelsForVolume implements PVLabeler.GetLabelsForVolume
 | 
			
		||||
func (c *Cloud) GetLabelsForVolume(ctx context.Context, pv *v1.PersistentVolume) (map[string]string, error) {
 | 
			
		||||
	// Ignore if not AzureDisk.
 | 
			
		||||
	if pv.Spec.AzureDisk == nil {
 | 
			
		||||
		return nil, nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Ignore any volumes that are being provisioned
 | 
			
		||||
	if pv.Spec.AzureDisk.DiskName == cloudvolume.ProvisionedVolumeName {
 | 
			
		||||
		return nil, nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return c.GetAzureDiskLabels(pv.Spec.AzureDisk.DataDiskURI)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// GetAzureDiskLabels gets availability zone labels for Azuredisk.
 | 
			
		||||
func (c *Cloud) GetAzureDiskLabels(diskURI string) (map[string]string, error) {
 | 
			
		||||
	// Get disk's resource group.
 | 
			
		||||
	diskName := path.Base(diskURI)
 | 
			
		||||
	resourceGroup, err := getResourceGroupFromDiskURI(diskURI)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		klog.Errorf("Failed to get resource group for AzureDisk %q: %v", diskName, err)
 | 
			
		||||
		return nil, err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	labels := map[string]string{
 | 
			
		||||
		v1.LabelTopologyRegion: c.Location,
 | 
			
		||||
	}
 | 
			
		||||
	// no azure credential is set, return nil
 | 
			
		||||
	if c.DisksClient == nil {
 | 
			
		||||
		return labels, nil
 | 
			
		||||
	}
 | 
			
		||||
	// Get information of the disk.
 | 
			
		||||
	ctx, cancel := getContextWithCancel()
 | 
			
		||||
	defer cancel()
 | 
			
		||||
	disk, rerr := c.DisksClient.Get(ctx, resourceGroup, diskName)
 | 
			
		||||
	if rerr != nil {
 | 
			
		||||
		klog.Errorf("Failed to get information for AzureDisk %q: %v", diskName, rerr)
 | 
			
		||||
		return nil, rerr.Error()
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Check whether availability zone is specified.
 | 
			
		||||
	if disk.Zones == nil || len(*disk.Zones) == 0 {
 | 
			
		||||
		klog.V(4).Infof("Azure disk %q is not zoned", diskName)
 | 
			
		||||
		return labels, nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	zones := *disk.Zones
 | 
			
		||||
	zoneID, err := strconv.Atoi(zones[0])
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return nil, fmt.Errorf("failed to parse zone %v for AzureDisk %v: %v", zones, diskName, err)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	zone := c.makeZone(c.Location, zoneID)
 | 
			
		||||
	klog.V(4).Infof("Got zone %q for Azure disk %q", zone, diskName)
 | 
			
		||||
	labels[v1.LabelTopologyZone] = zone
 | 
			
		||||
	return labels, nil
 | 
			
		||||
}
 | 
			
		||||
@@ -1,553 +0,0 @@
 | 
			
		||||
//go:build !providerless
 | 
			
		||||
// +build !providerless
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
Copyright 2020 The Kubernetes Authors.
 | 
			
		||||
 | 
			
		||||
Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
you may not use this file except in compliance with the License.
 | 
			
		||||
You may obtain a copy of the License at
 | 
			
		||||
 | 
			
		||||
    http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
 | 
			
		||||
Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
See the License for the specific language governing permissions and
 | 
			
		||||
limitations under the License.
 | 
			
		||||
*/
 | 
			
		||||
 | 
			
		||||
package azure
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"context"
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"strings"
 | 
			
		||||
	"testing"
 | 
			
		||||
 | 
			
		||||
	"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-12-01/compute"
 | 
			
		||||
	"github.com/golang/mock/gomock"
 | 
			
		||||
	"github.com/stretchr/testify/assert"
 | 
			
		||||
 | 
			
		||||
	v1 "k8s.io/api/core/v1"
 | 
			
		||||
	"k8s.io/apimachinery/pkg/api/resource"
 | 
			
		||||
	cloudvolume "k8s.io/cloud-provider/volume"
 | 
			
		||||
	"k8s.io/legacy-cloud-providers/azure/clients/diskclient/mockdiskclient"
 | 
			
		||||
	"k8s.io/legacy-cloud-providers/azure/retry"
 | 
			
		||||
	"k8s.io/utils/pointer"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
func TestCreateManagedDisk(t *testing.T) {
 | 
			
		||||
	ctrl := gomock.NewController(t)
 | 
			
		||||
	defer ctrl.Finish()
 | 
			
		||||
 | 
			
		||||
	maxShare := int32(2)
 | 
			
		||||
	goodDiskEncryptionSetID := fmt.Sprintf("/subscriptions/subscription/resourceGroups/rg/providers/Microsoft.Compute/diskEncryptionSets/%s", "diskEncryptionSet-name")
 | 
			
		||||
	badDiskEncryptionSetID := fmt.Sprintf("badDiskEncryptionSetID")
 | 
			
		||||
	testTags := make(map[string]*string)
 | 
			
		||||
	testTags[WriteAcceleratorEnabled] = pointer.String("true")
 | 
			
		||||
	testCases := []struct {
 | 
			
		||||
		desc                string
 | 
			
		||||
		diskID              string
 | 
			
		||||
		diskName            string
 | 
			
		||||
		storageAccountType  compute.DiskStorageAccountTypes
 | 
			
		||||
		diskIOPSReadWrite   string
 | 
			
		||||
		diskMBPSReadWrite   string
 | 
			
		||||
		diskEncryptionSetID string
 | 
			
		||||
		expectedDiskID      string
 | 
			
		||||
		existedDisk         compute.Disk
 | 
			
		||||
		expectedErr         bool
 | 
			
		||||
		expectedErrMsg      error
 | 
			
		||||
	}{
 | 
			
		||||
		{
 | 
			
		||||
			desc:                "disk Id and no error shall be returned if everything is good with UltraSSDLRS storage account",
 | 
			
		||||
			diskID:              "diskid1",
 | 
			
		||||
			diskName:            "disk1",
 | 
			
		||||
			storageAccountType:  compute.UltraSSDLRS,
 | 
			
		||||
			diskIOPSReadWrite:   "100",
 | 
			
		||||
			diskMBPSReadWrite:   "100",
 | 
			
		||||
			diskEncryptionSetID: goodDiskEncryptionSetID,
 | 
			
		||||
			expectedDiskID:      "diskid1",
 | 
			
		||||
			existedDisk:         compute.Disk{ID: pointer.String("diskid1"), Name: pointer.String("disk1"), DiskProperties: &compute.DiskProperties{Encryption: &compute.Encryption{DiskEncryptionSetID: &goodDiskEncryptionSetID, Type: compute.EncryptionAtRestWithCustomerKey}, ProvisioningState: pointer.String("Succeeded")}, Tags: testTags},
 | 
			
		||||
			expectedErr:         false,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			desc:                "disk Id and no error shall be returned if everything is good with StandardLRS storage account",
 | 
			
		||||
			diskID:              "diskid1",
 | 
			
		||||
			diskName:            "disk1",
 | 
			
		||||
			storageAccountType:  compute.StandardLRS,
 | 
			
		||||
			diskIOPSReadWrite:   "",
 | 
			
		||||
			diskMBPSReadWrite:   "",
 | 
			
		||||
			diskEncryptionSetID: goodDiskEncryptionSetID,
 | 
			
		||||
			expectedDiskID:      "diskid1",
 | 
			
		||||
			existedDisk:         compute.Disk{ID: pointer.String("diskid1"), Name: pointer.String("disk1"), DiskProperties: &compute.DiskProperties{Encryption: &compute.Encryption{DiskEncryptionSetID: &goodDiskEncryptionSetID, Type: compute.EncryptionAtRestWithCustomerKey}, ProvisioningState: pointer.String("Succeeded")}, Tags: testTags},
 | 
			
		||||
			expectedErr:         false,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			desc:                "empty diskid and an error shall be returned if everything is good with UltraSSDLRS storage account but DiskIOPSReadWrite is invalid",
 | 
			
		||||
			diskID:              "diskid1",
 | 
			
		||||
			diskName:            "disk1",
 | 
			
		||||
			storageAccountType:  compute.UltraSSDLRS,
 | 
			
		||||
			diskIOPSReadWrite:   "invalid",
 | 
			
		||||
			diskMBPSReadWrite:   "100",
 | 
			
		||||
			diskEncryptionSetID: goodDiskEncryptionSetID,
 | 
			
		||||
			expectedDiskID:      "",
 | 
			
		||||
			existedDisk:         compute.Disk{ID: pointer.String("diskid1"), Name: pointer.String("disk1"), DiskProperties: &compute.DiskProperties{Encryption: &compute.Encryption{DiskEncryptionSetID: &goodDiskEncryptionSetID, Type: compute.EncryptionAtRestWithCustomerKey}, ProvisioningState: pointer.String("Succeeded")}, Tags: testTags},
 | 
			
		||||
			expectedErr:         true,
 | 
			
		||||
			expectedErrMsg:      fmt.Errorf("AzureDisk - failed to parse DiskIOPSReadWrite: strconv.Atoi: parsing \"invalid\": invalid syntax"),
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			desc:                "empty diskid and an error shall be returned if everything is good with UltraSSDLRS storage account but DiskMBPSReadWrite is invalid",
 | 
			
		||||
			diskID:              "diskid1",
 | 
			
		||||
			diskName:            "disk1",
 | 
			
		||||
			storageAccountType:  compute.UltraSSDLRS,
 | 
			
		||||
			diskIOPSReadWrite:   "100",
 | 
			
		||||
			diskMBPSReadWrite:   "invalid",
 | 
			
		||||
			diskEncryptionSetID: goodDiskEncryptionSetID,
 | 
			
		||||
			expectedDiskID:      "",
 | 
			
		||||
			existedDisk:         compute.Disk{ID: pointer.String("diskid1"), Name: pointer.String("disk1"), DiskProperties: &compute.DiskProperties{Encryption: &compute.Encryption{DiskEncryptionSetID: &goodDiskEncryptionSetID, Type: compute.EncryptionAtRestWithCustomerKey}, ProvisioningState: pointer.String("Succeeded")}, Tags: testTags},
 | 
			
		||||
			expectedErr:         true,
 | 
			
		||||
			expectedErrMsg:      fmt.Errorf("AzureDisk - failed to parse DiskMBpsReadWrite: strconv.Atoi: parsing \"invalid\": invalid syntax"),
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			desc:                "empty diskid and an error shall be returned if everything is good with UltraSSDLRS storage account with bad Disk EncryptionSetID",
 | 
			
		||||
			diskID:              "diskid1",
 | 
			
		||||
			diskName:            "disk1",
 | 
			
		||||
			storageAccountType:  compute.UltraSSDLRS,
 | 
			
		||||
			diskIOPSReadWrite:   "100",
 | 
			
		||||
			diskMBPSReadWrite:   "100",
 | 
			
		||||
			diskEncryptionSetID: badDiskEncryptionSetID,
 | 
			
		||||
			expectedDiskID:      "",
 | 
			
		||||
			existedDisk:         compute.Disk{ID: pointer.String("diskid1"), Name: pointer.String("disk1"), DiskProperties: &compute.DiskProperties{Encryption: &compute.Encryption{DiskEncryptionSetID: &goodDiskEncryptionSetID, Type: compute.EncryptionAtRestWithCustomerKey}, ProvisioningState: pointer.String("Succeeded")}, Tags: testTags},
 | 
			
		||||
			expectedErr:         true,
 | 
			
		||||
			expectedErrMsg:      fmt.Errorf("AzureDisk - format of DiskEncryptionSetID(%s) is incorrect, correct format: %s", badDiskEncryptionSetID, diskEncryptionSetIDFormat),
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			desc:                "disk Id and no error shall be returned if everything is good with StandardLRS storage account with not empty diskIOPSReadWrite",
 | 
			
		||||
			diskID:              "diskid1",
 | 
			
		||||
			diskName:            "disk1",
 | 
			
		||||
			storageAccountType:  compute.StandardLRS,
 | 
			
		||||
			diskIOPSReadWrite:   "100",
 | 
			
		||||
			diskMBPSReadWrite:   "",
 | 
			
		||||
			diskEncryptionSetID: goodDiskEncryptionSetID,
 | 
			
		||||
			expectedDiskID:      "",
 | 
			
		||||
			existedDisk:         compute.Disk{ID: pointer.String("diskid1"), Name: pointer.String("disk1"), DiskProperties: &compute.DiskProperties{Encryption: &compute.Encryption{DiskEncryptionSetID: &goodDiskEncryptionSetID, Type: compute.EncryptionAtRestWithCustomerKey}, ProvisioningState: pointer.String("Succeeded")}, Tags: testTags},
 | 
			
		||||
			expectedErr:         true,
 | 
			
		||||
			expectedErrMsg:      fmt.Errorf("AzureDisk - DiskIOPSReadWrite parameter is only applicable in UltraSSD_LRS disk type"),
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			desc:                "disk Id and no error shall be returned if everything is good with StandardLRS storage account with not empty diskMBPSReadWrite",
 | 
			
		||||
			diskID:              "diskid1",
 | 
			
		||||
			diskName:            "disk1",
 | 
			
		||||
			storageAccountType:  compute.StandardLRS,
 | 
			
		||||
			diskIOPSReadWrite:   "",
 | 
			
		||||
			diskMBPSReadWrite:   "100",
 | 
			
		||||
			diskEncryptionSetID: goodDiskEncryptionSetID,
 | 
			
		||||
			expectedDiskID:      "",
 | 
			
		||||
			existedDisk:         compute.Disk{ID: pointer.String("diskid1"), Name: pointer.String("disk1"), DiskProperties: &compute.DiskProperties{Encryption: &compute.Encryption{DiskEncryptionSetID: &goodDiskEncryptionSetID, Type: compute.EncryptionAtRestWithCustomerKey}, ProvisioningState: pointer.String("Succeeded")}, Tags: testTags},
 | 
			
		||||
			expectedErr:         true,
 | 
			
		||||
			expectedErrMsg:      fmt.Errorf("AzureDisk - DiskMBpsReadWrite parameter is only applicable in UltraSSD_LRS disk type"),
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	for i, test := range testCases {
 | 
			
		||||
		testCloud := GetTestCloud(ctrl)
 | 
			
		||||
		managedDiskController := testCloud.ManagedDiskController
 | 
			
		||||
		volumeOptions := &ManagedDiskOptions{
 | 
			
		||||
			DiskName:            test.diskName,
 | 
			
		||||
			StorageAccountType:  test.storageAccountType,
 | 
			
		||||
			ResourceGroup:       "",
 | 
			
		||||
			SizeGB:              1,
 | 
			
		||||
			Tags:                map[string]string{"tag1": "azure-tag1"},
 | 
			
		||||
			AvailabilityZone:    "westus-testzone",
 | 
			
		||||
			DiskIOPSReadWrite:   test.diskIOPSReadWrite,
 | 
			
		||||
			DiskMBpsReadWrite:   test.diskMBPSReadWrite,
 | 
			
		||||
			DiskEncryptionSetID: test.diskEncryptionSetID,
 | 
			
		||||
			MaxShares:           maxShare,
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		mockDisksClient := testCloud.DisksClient.(*mockdiskclient.MockInterface)
 | 
			
		||||
		//disk := getTestDisk(test.diskName)
 | 
			
		||||
		mockDisksClient.EXPECT().CreateOrUpdate(gomock.Any(), testCloud.ResourceGroup, test.diskName, gomock.Any()).Return(nil).AnyTimes()
 | 
			
		||||
		mockDisksClient.EXPECT().Get(gomock.Any(), testCloud.ResourceGroup, test.diskName).Return(test.existedDisk, nil).AnyTimes()
 | 
			
		||||
 | 
			
		||||
		actualDiskID, err := managedDiskController.CreateManagedDisk(volumeOptions)
 | 
			
		||||
		assert.Equal(t, test.expectedDiskID, actualDiskID, "TestCase[%d]: %s", i, test.desc)
 | 
			
		||||
		assert.Equal(t, test.expectedErr, err != nil, "TestCase[%d]: %s, return error: %v", i, test.desc, err)
 | 
			
		||||
		assert.Equal(t, test.expectedErrMsg, err, "TestCase[%d]: %s, expected error: %v, return error: %v", i, test.desc, test.expectedErrMsg, err)
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestDeleteManagedDisk(t *testing.T) {
 | 
			
		||||
	ctrl := gomock.NewController(t)
 | 
			
		||||
	defer ctrl.Finish()
 | 
			
		||||
 | 
			
		||||
	fakeGetDiskFailed := "fakeGetDiskFailed"
 | 
			
		||||
	testCases := []struct {
 | 
			
		||||
		desc           string
 | 
			
		||||
		diskName       string
 | 
			
		||||
		diskState      string
 | 
			
		||||
		existedDisk    compute.Disk
 | 
			
		||||
		expectedErr    bool
 | 
			
		||||
		expectedErrMsg error
 | 
			
		||||
	}{
 | 
			
		||||
		{
 | 
			
		||||
			desc:           "an error shall be returned if delete an attaching disk",
 | 
			
		||||
			diskName:       "disk1",
 | 
			
		||||
			diskState:      "attaching",
 | 
			
		||||
			existedDisk:    compute.Disk{Name: pointer.String("disk1")},
 | 
			
		||||
			expectedErr:    true,
 | 
			
		||||
			expectedErrMsg: fmt.Errorf("failed to delete disk(/subscriptions/subscription/resourceGroups/rg/providers/Microsoft.Compute/disks/disk1) since it's in attaching or detaching state"),
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			desc:        "no error shall be returned if everything is good",
 | 
			
		||||
			diskName:    "disk1",
 | 
			
		||||
			existedDisk: compute.Disk{Name: pointer.String("disk1")},
 | 
			
		||||
			expectedErr: false,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			desc:           "an error shall be returned if get disk failed",
 | 
			
		||||
			diskName:       fakeGetDiskFailed,
 | 
			
		||||
			existedDisk:    compute.Disk{Name: pointer.String(fakeGetDiskFailed)},
 | 
			
		||||
			expectedErr:    true,
 | 
			
		||||
			expectedErrMsg: fmt.Errorf("Retriable: false, RetryAfter: 0s, HTTPStatusCode: 0, RawError: %w", fmt.Errorf("Get Disk failed")),
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	for i, test := range testCases {
 | 
			
		||||
		testCloud := GetTestCloud(ctrl)
 | 
			
		||||
		managedDiskController := testCloud.ManagedDiskController
 | 
			
		||||
		diskURI := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Compute/disks/%s",
 | 
			
		||||
			testCloud.SubscriptionID, testCloud.ResourceGroup, *test.existedDisk.Name)
 | 
			
		||||
		if test.diskState == "attaching" {
 | 
			
		||||
			managedDiskController.common.diskAttachDetachMap.Store(strings.ToLower(diskURI), test.diskState)
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		mockDisksClient := testCloud.DisksClient.(*mockdiskclient.MockInterface)
 | 
			
		||||
		if test.diskName == fakeGetDiskFailed {
 | 
			
		||||
			mockDisksClient.EXPECT().Get(gomock.Any(), testCloud.ResourceGroup, test.diskName).Return(test.existedDisk, &retry.Error{RawError: fmt.Errorf("Get Disk failed")}).AnyTimes()
 | 
			
		||||
		} else {
 | 
			
		||||
			mockDisksClient.EXPECT().Get(gomock.Any(), testCloud.ResourceGroup, test.diskName).Return(test.existedDisk, nil).AnyTimes()
 | 
			
		||||
		}
 | 
			
		||||
		mockDisksClient.EXPECT().Delete(gomock.Any(), testCloud.ResourceGroup, test.diskName).Return(nil).AnyTimes()
 | 
			
		||||
 | 
			
		||||
		err := managedDiskController.DeleteManagedDisk(diskURI)
 | 
			
		||||
		assert.Equal(t, test.expectedErr, err != nil, "TestCase[%d]: %s, return error: %v", i, test.desc, err)
 | 
			
		||||
		assert.Equal(t, test.expectedErrMsg, err, "TestCase[%d]: %s, expected: %v, return: %v", i, test.desc, test.expectedErrMsg, err)
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestGetDisk(t *testing.T) {
 | 
			
		||||
	ctrl := gomock.NewController(t)
 | 
			
		||||
	defer ctrl.Finish()
 | 
			
		||||
 | 
			
		||||
	fakeGetDiskFailed := "fakeGetDiskFailed"
 | 
			
		||||
	testCases := []struct {
 | 
			
		||||
		desc                      string
 | 
			
		||||
		diskName                  string
 | 
			
		||||
		existedDisk               compute.Disk
 | 
			
		||||
		expectedErr               bool
 | 
			
		||||
		expectedErrMsg            error
 | 
			
		||||
		expectedProvisioningState string
 | 
			
		||||
		expectedDiskID            string
 | 
			
		||||
	}{
 | 
			
		||||
		{
 | 
			
		||||
			desc:                      "no error shall be returned if get a normal disk without DiskProperties",
 | 
			
		||||
			diskName:                  "disk1",
 | 
			
		||||
			existedDisk:               compute.Disk{Name: pointer.String("disk1")},
 | 
			
		||||
			expectedErr:               false,
 | 
			
		||||
			expectedProvisioningState: "",
 | 
			
		||||
			expectedDiskID:            "",
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			desc:                      "an error shall be returned if get disk failed",
 | 
			
		||||
			diskName:                  fakeGetDiskFailed,
 | 
			
		||||
			existedDisk:               compute.Disk{Name: pointer.String(fakeGetDiskFailed)},
 | 
			
		||||
			expectedErr:               true,
 | 
			
		||||
			expectedErrMsg:            fmt.Errorf("Retriable: false, RetryAfter: 0s, HTTPStatusCode: 0, RawError: %w", fmt.Errorf("Get Disk failed")),
 | 
			
		||||
			expectedProvisioningState: "",
 | 
			
		||||
			expectedDiskID:            "",
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	for i, test := range testCases {
 | 
			
		||||
		testCloud := GetTestCloud(ctrl)
 | 
			
		||||
		managedDiskController := testCloud.ManagedDiskController
 | 
			
		||||
 | 
			
		||||
		mockDisksClient := testCloud.DisksClient.(*mockdiskclient.MockInterface)
 | 
			
		||||
		if test.diskName == fakeGetDiskFailed {
 | 
			
		||||
			mockDisksClient.EXPECT().Get(gomock.Any(), testCloud.ResourceGroup, test.diskName).Return(test.existedDisk, &retry.Error{RawError: fmt.Errorf("Get Disk failed")}).AnyTimes()
 | 
			
		||||
		} else {
 | 
			
		||||
			mockDisksClient.EXPECT().Get(gomock.Any(), testCloud.ResourceGroup, test.diskName).Return(test.existedDisk, nil).AnyTimes()
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		provisioningState, diskid, err := managedDiskController.GetDisk(testCloud.ResourceGroup, test.diskName)
 | 
			
		||||
		assert.Equal(t, test.expectedErr, err != nil, "TestCase[%d]: %s, return error: %v", i, test.desc, err)
 | 
			
		||||
		assert.Equal(t, test.expectedErrMsg, err, "TestCase[%d]: %s, expected: %v, return: %v", i, test.desc, test.expectedErrMsg, err)
 | 
			
		||||
		assert.Equal(t, test.expectedProvisioningState, provisioningState, "TestCase[%d]: %s, expected ProvisioningState: %v, return ProvisioningState: %v", i, test.desc, test.expectedProvisioningState, provisioningState)
 | 
			
		||||
		assert.Equal(t, test.expectedDiskID, diskid, "TestCase[%d]: %s, expected DiskID: %v, return DiskID: %v", i, test.desc, test.expectedDiskID, diskid)
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestResizeDisk(t *testing.T) {
 | 
			
		||||
	ctrl := gomock.NewController(t)
 | 
			
		||||
	defer ctrl.Finish()
 | 
			
		||||
 | 
			
		||||
	diskSizeGB := int32(2)
 | 
			
		||||
	diskName := "disk1"
 | 
			
		||||
	fakeGetDiskFailed := "fakeGetDiskFailed"
 | 
			
		||||
	fakeCreateDiskFailed := "fakeCreateDiskFailed"
 | 
			
		||||
	testCases := []struct {
 | 
			
		||||
		desc             string
 | 
			
		||||
		diskName         string
 | 
			
		||||
		oldSize          resource.Quantity
 | 
			
		||||
		newSize          resource.Quantity
 | 
			
		||||
		existedDisk      compute.Disk
 | 
			
		||||
		expectedQuantity resource.Quantity
 | 
			
		||||
		expectedErr      bool
 | 
			
		||||
		expectedErrMsg   error
 | 
			
		||||
	}{
 | 
			
		||||
		{
 | 
			
		||||
			desc:             "new quantity and no error shall be returned if everything is good",
 | 
			
		||||
			diskName:         diskName,
 | 
			
		||||
			oldSize:          *resource.NewQuantity(2*(1024*1024*1024), resource.BinarySI),
 | 
			
		||||
			newSize:          *resource.NewQuantity(3*(1024*1024*1024), resource.BinarySI),
 | 
			
		||||
			existedDisk:      compute.Disk{Name: pointer.String("disk1"), DiskProperties: &compute.DiskProperties{DiskSizeGB: &diskSizeGB, DiskState: compute.Unattached}},
 | 
			
		||||
			expectedQuantity: *resource.NewQuantity(3*(1024*1024*1024), resource.BinarySI),
 | 
			
		||||
			expectedErr:      false,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			desc:             "new quantity and no error shall be returned if everything is good with DiskProperties is null",
 | 
			
		||||
			diskName:         diskName,
 | 
			
		||||
			oldSize:          *resource.NewQuantity(2*(1024*1024*1024), resource.BinarySI),
 | 
			
		||||
			newSize:          *resource.NewQuantity(3*(1024*1024*1024), resource.BinarySI),
 | 
			
		||||
			existedDisk:      compute.Disk{Name: pointer.String("disk1")},
 | 
			
		||||
			expectedQuantity: *resource.NewQuantity(2*(1024*1024*1024), resource.BinarySI),
 | 
			
		||||
			expectedErr:      true,
 | 
			
		||||
			expectedErrMsg:   fmt.Errorf("DiskProperties of disk(%s) is nil", diskName),
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			desc:             "new quantity and no error shall be returned if everything is good with disk already of greater or equal size than requested",
 | 
			
		||||
			diskName:         diskName,
 | 
			
		||||
			oldSize:          *resource.NewQuantity(1*(1024*1024*1024), resource.BinarySI),
 | 
			
		||||
			newSize:          *resource.NewQuantity(2*(1024*1024*1024), resource.BinarySI),
 | 
			
		||||
			existedDisk:      compute.Disk{Name: pointer.String("disk1"), DiskProperties: &compute.DiskProperties{DiskSizeGB: &diskSizeGB, DiskState: compute.Unattached}},
 | 
			
		||||
			expectedQuantity: *resource.NewQuantity(2*(1024*1024*1024), resource.BinarySI),
 | 
			
		||||
			expectedErr:      false,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			desc:             "an error shall be returned if everything is good but get disk failed",
 | 
			
		||||
			diskName:         fakeGetDiskFailed,
 | 
			
		||||
			oldSize:          *resource.NewQuantity(2*(1024*1024*1024), resource.BinarySI),
 | 
			
		||||
			newSize:          *resource.NewQuantity(3*(1024*1024*1024), resource.BinarySI),
 | 
			
		||||
			existedDisk:      compute.Disk{Name: pointer.String(fakeGetDiskFailed), DiskProperties: &compute.DiskProperties{DiskSizeGB: &diskSizeGB, DiskState: compute.Unattached}},
 | 
			
		||||
			expectedQuantity: *resource.NewQuantity(2*(1024*1024*1024), resource.BinarySI),
 | 
			
		||||
			expectedErr:      true,
 | 
			
		||||
			expectedErrMsg:   fmt.Errorf("Retriable: false, RetryAfter: 0s, HTTPStatusCode: 0, RawError: %w", fmt.Errorf("Get Disk failed")),
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			desc:             "an error shall be returned if everything is good but create disk failed",
 | 
			
		||||
			diskName:         fakeCreateDiskFailed,
 | 
			
		||||
			oldSize:          *resource.NewQuantity(2*(1024*1024*1024), resource.BinarySI),
 | 
			
		||||
			newSize:          *resource.NewQuantity(3*(1024*1024*1024), resource.BinarySI),
 | 
			
		||||
			existedDisk:      compute.Disk{Name: pointer.String(fakeCreateDiskFailed), DiskProperties: &compute.DiskProperties{DiskSizeGB: &diskSizeGB, DiskState: compute.Unattached}},
 | 
			
		||||
			expectedQuantity: *resource.NewQuantity(2*(1024*1024*1024), resource.BinarySI),
 | 
			
		||||
			expectedErr:      true,
 | 
			
		||||
			expectedErrMsg:   fmt.Errorf("Retriable: false, RetryAfter: 0s, HTTPStatusCode: 0, RawError: %w", fmt.Errorf("Create Disk failed")),
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			desc:             "an error shall be returned if disk is not in Unattached state",
 | 
			
		||||
			diskName:         fakeCreateDiskFailed,
 | 
			
		||||
			oldSize:          *resource.NewQuantity(2*(1024*1024*1024), resource.BinarySI),
 | 
			
		||||
			newSize:          *resource.NewQuantity(3*(1024*1024*1024), resource.BinarySI),
 | 
			
		||||
			existedDisk:      compute.Disk{Name: pointer.String(fakeCreateDiskFailed), DiskProperties: &compute.DiskProperties{DiskSizeGB: &diskSizeGB, DiskState: compute.Attached}},
 | 
			
		||||
			expectedQuantity: *resource.NewQuantity(2*(1024*1024*1024), resource.BinarySI),
 | 
			
		||||
			expectedErr:      true,
 | 
			
		||||
			expectedErrMsg:   fmt.Errorf("azureDisk - disk resize is only supported on Unattached disk, current disk state: Attached, already attached to "),
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	for i, test := range testCases {
 | 
			
		||||
		testCloud := GetTestCloud(ctrl)
 | 
			
		||||
		managedDiskController := testCloud.ManagedDiskController
 | 
			
		||||
		diskURI := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Compute/disks/%s",
 | 
			
		||||
			testCloud.SubscriptionID, testCloud.ResourceGroup, *test.existedDisk.Name)
 | 
			
		||||
 | 
			
		||||
		mockDisksClient := testCloud.DisksClient.(*mockdiskclient.MockInterface)
 | 
			
		||||
		if test.diskName == fakeGetDiskFailed {
 | 
			
		||||
			mockDisksClient.EXPECT().Get(gomock.Any(), testCloud.ResourceGroup, test.diskName).Return(test.existedDisk, &retry.Error{RawError: fmt.Errorf("Get Disk failed")}).AnyTimes()
 | 
			
		||||
		} else {
 | 
			
		||||
			mockDisksClient.EXPECT().Get(gomock.Any(), testCloud.ResourceGroup, test.diskName).Return(test.existedDisk, nil).AnyTimes()
 | 
			
		||||
		}
 | 
			
		||||
		if test.diskName == fakeCreateDiskFailed {
 | 
			
		||||
			mockDisksClient.EXPECT().Update(gomock.Any(), testCloud.ResourceGroup, test.diskName, gomock.Any()).Return(&retry.Error{RawError: fmt.Errorf("Create Disk failed")}).AnyTimes()
 | 
			
		||||
		} else {
 | 
			
		||||
			mockDisksClient.EXPECT().Update(gomock.Any(), testCloud.ResourceGroup, test.diskName, gomock.Any()).Return(nil).AnyTimes()
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		result, err := managedDiskController.ResizeDisk(diskURI, test.oldSize, test.newSize)
 | 
			
		||||
		assert.Equal(t, test.expectedErr, err != nil, "TestCase[%d]: %s, return error: %v", i, test.desc, err)
 | 
			
		||||
		assert.Equal(t, test.expectedErrMsg, err, "TestCase[%d]: %s, expected: %v, return: %v", i, test.desc, test.expectedErrMsg, err)
 | 
			
		||||
		assert.Equal(t, test.expectedQuantity.Value(), result.Value(), "TestCase[%d]: %s, expected Quantity: %v, return Quantity: %v", i, test.desc, test.expectedQuantity, result)
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestGetLabelsForVolume(t *testing.T) {
 | 
			
		||||
	ctrl := gomock.NewController(t)
 | 
			
		||||
	defer ctrl.Finish()
 | 
			
		||||
 | 
			
		||||
	testCloud0 := GetTestCloud(ctrl)
 | 
			
		||||
	diskName := "disk1"
 | 
			
		||||
	diskURI := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Compute/disks/%s",
 | 
			
		||||
		testCloud0.SubscriptionID, testCloud0.ResourceGroup, diskName)
 | 
			
		||||
	diskSizeGB := int32(30)
 | 
			
		||||
	fakeGetDiskFailed := "fakeGetDiskFailed"
 | 
			
		||||
	fakeGetDiskFailedDiskURI := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Compute/disks/%s",
 | 
			
		||||
		testCloud0.SubscriptionID, testCloud0.ResourceGroup, fakeGetDiskFailed)
 | 
			
		||||
	testCases := []struct {
 | 
			
		||||
		desc           string
 | 
			
		||||
		diskName       string
 | 
			
		||||
		pv             *v1.PersistentVolume
 | 
			
		||||
		existedDisk    compute.Disk
 | 
			
		||||
		expected       map[string]string
 | 
			
		||||
		expectedErr    bool
 | 
			
		||||
		expectedErrMsg error
 | 
			
		||||
	}{
 | 
			
		||||
		{
 | 
			
		||||
			desc:     "labels and no error shall be returned if everything is good",
 | 
			
		||||
			diskName: diskName,
 | 
			
		||||
			pv: &v1.PersistentVolume{
 | 
			
		||||
				Spec: v1.PersistentVolumeSpec{
 | 
			
		||||
					PersistentVolumeSource: v1.PersistentVolumeSource{
 | 
			
		||||
						AzureDisk: &v1.AzureDiskVolumeSource{
 | 
			
		||||
							DiskName:    diskName,
 | 
			
		||||
							DataDiskURI: diskURI,
 | 
			
		||||
						},
 | 
			
		||||
					},
 | 
			
		||||
				},
 | 
			
		||||
			},
 | 
			
		||||
			existedDisk: compute.Disk{Name: pointer.String(diskName), DiskProperties: &compute.DiskProperties{DiskSizeGB: &diskSizeGB}, Zones: &[]string{"1"}},
 | 
			
		||||
			expected: map[string]string{
 | 
			
		||||
				v1.LabelTopologyRegion: testCloud0.Location,
 | 
			
		||||
				v1.LabelTopologyZone:   testCloud0.makeZone(testCloud0.Location, 1),
 | 
			
		||||
			},
 | 
			
		||||
			expectedErr: false,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			desc:     "an error shall be returned if everything is good with invalid zone",
 | 
			
		||||
			diskName: diskName,
 | 
			
		||||
			pv: &v1.PersistentVolume{
 | 
			
		||||
				Spec: v1.PersistentVolumeSpec{
 | 
			
		||||
					PersistentVolumeSource: v1.PersistentVolumeSource{
 | 
			
		||||
						AzureDisk: &v1.AzureDiskVolumeSource{
 | 
			
		||||
							DiskName:    diskName,
 | 
			
		||||
							DataDiskURI: diskURI,
 | 
			
		||||
						},
 | 
			
		||||
					},
 | 
			
		||||
				},
 | 
			
		||||
			},
 | 
			
		||||
			existedDisk:    compute.Disk{Name: pointer.String(diskName), DiskProperties: &compute.DiskProperties{DiskSizeGB: &diskSizeGB}, Zones: &[]string{"invalid"}},
 | 
			
		||||
			expectedErr:    true,
 | 
			
		||||
			expectedErrMsg: fmt.Errorf("failed to parse zone [invalid] for AzureDisk %v: %v", diskName, "strconv.Atoi: parsing \"invalid\": invalid syntax"),
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			desc:     "nil shall be returned if everything is good with null Zones",
 | 
			
		||||
			diskName: diskName,
 | 
			
		||||
			pv: &v1.PersistentVolume{
 | 
			
		||||
				Spec: v1.PersistentVolumeSpec{
 | 
			
		||||
					PersistentVolumeSource: v1.PersistentVolumeSource{
 | 
			
		||||
						AzureDisk: &v1.AzureDiskVolumeSource{
 | 
			
		||||
							DiskName:    diskName,
 | 
			
		||||
							DataDiskURI: diskURI,
 | 
			
		||||
						},
 | 
			
		||||
					},
 | 
			
		||||
				},
 | 
			
		||||
			},
 | 
			
		||||
			existedDisk: compute.Disk{Name: pointer.String(diskName), DiskProperties: &compute.DiskProperties{DiskSizeGB: &diskSizeGB}},
 | 
			
		||||
			expected: map[string]string{
 | 
			
		||||
				v1.LabelTopologyRegion: testCloud0.Location,
 | 
			
		||||
			},
 | 
			
		||||
			expectedErr:    false,
 | 
			
		||||
			expectedErrMsg: nil,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			desc:     "an error shall be returned if everything is good with get disk failed",
 | 
			
		||||
			diskName: fakeGetDiskFailed,
 | 
			
		||||
			pv: &v1.PersistentVolume{
 | 
			
		||||
				Spec: v1.PersistentVolumeSpec{
 | 
			
		||||
					PersistentVolumeSource: v1.PersistentVolumeSource{
 | 
			
		||||
						AzureDisk: &v1.AzureDiskVolumeSource{
 | 
			
		||||
							DiskName:    fakeGetDiskFailed,
 | 
			
		||||
							DataDiskURI: fakeGetDiskFailedDiskURI,
 | 
			
		||||
						},
 | 
			
		||||
					},
 | 
			
		||||
				},
 | 
			
		||||
			},
 | 
			
		||||
			existedDisk:    compute.Disk{Name: pointer.String(fakeGetDiskFailed), DiskProperties: &compute.DiskProperties{DiskSizeGB: &diskSizeGB}, Zones: &[]string{"1"}},
 | 
			
		||||
			expectedErr:    true,
 | 
			
		||||
			expectedErrMsg: fmt.Errorf("Retriable: false, RetryAfter: 0s, HTTPStatusCode: 0, RawError: %w", fmt.Errorf("Get Disk failed")),
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			desc:     "an error shall be returned if everything is good with invalid DiskURI",
 | 
			
		||||
			diskName: diskName,
 | 
			
		||||
			pv: &v1.PersistentVolume{
 | 
			
		||||
				Spec: v1.PersistentVolumeSpec{
 | 
			
		||||
					PersistentVolumeSource: v1.PersistentVolumeSource{
 | 
			
		||||
						AzureDisk: &v1.AzureDiskVolumeSource{
 | 
			
		||||
							DiskName:    diskName,
 | 
			
		||||
							DataDiskURI: "invalidDiskURI",
 | 
			
		||||
						},
 | 
			
		||||
					},
 | 
			
		||||
				},
 | 
			
		||||
			},
 | 
			
		||||
			existedDisk:    compute.Disk{Name: pointer.String(diskName), DiskProperties: &compute.DiskProperties{DiskSizeGB: &diskSizeGB}, Zones: &[]string{"1"}},
 | 
			
		||||
			expectedErr:    true,
 | 
			
		||||
			expectedErrMsg: fmt.Errorf("invalid disk URI: invalidDiskURI"),
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			desc:     "nil shall be returned if everything is good but pv.Spec.AzureDisk.DiskName is cloudvolume.ProvisionedVolumeName",
 | 
			
		||||
			diskName: diskName,
 | 
			
		||||
			pv: &v1.PersistentVolume{
 | 
			
		||||
				Spec: v1.PersistentVolumeSpec{
 | 
			
		||||
					PersistentVolumeSource: v1.PersistentVolumeSource{
 | 
			
		||||
						AzureDisk: &v1.AzureDiskVolumeSource{
 | 
			
		||||
							DiskName:    cloudvolume.ProvisionedVolumeName,
 | 
			
		||||
							DataDiskURI: diskURI,
 | 
			
		||||
						},
 | 
			
		||||
					},
 | 
			
		||||
				},
 | 
			
		||||
			},
 | 
			
		||||
			existedDisk: compute.Disk{Name: pointer.String(diskName), DiskProperties: &compute.DiskProperties{DiskSizeGB: &diskSizeGB}},
 | 
			
		||||
			expected:    nil,
 | 
			
		||||
			expectedErr: false,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			desc:     "nil shall be returned if everything is good but pv.Spec.AzureDisk is nil",
 | 
			
		||||
			diskName: diskName,
 | 
			
		||||
			pv: &v1.PersistentVolume{
 | 
			
		||||
				Spec: v1.PersistentVolumeSpec{
 | 
			
		||||
					PersistentVolumeSource: v1.PersistentVolumeSource{},
 | 
			
		||||
				},
 | 
			
		||||
			},
 | 
			
		||||
			existedDisk: compute.Disk{Name: pointer.String(diskName), DiskProperties: &compute.DiskProperties{DiskSizeGB: &diskSizeGB}},
 | 
			
		||||
			expected:    nil,
 | 
			
		||||
			expectedErr: false,
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	for i, test := range testCases {
 | 
			
		||||
		testCloud := GetTestCloud(ctrl)
 | 
			
		||||
		mockDisksClient := testCloud.DisksClient.(*mockdiskclient.MockInterface)
 | 
			
		||||
		if test.diskName == fakeGetDiskFailed {
 | 
			
		||||
			mockDisksClient.EXPECT().Get(gomock.Any(), testCloud.ResourceGroup, test.diskName).Return(test.existedDisk, &retry.Error{RawError: fmt.Errorf("Get Disk failed")}).AnyTimes()
 | 
			
		||||
		} else {
 | 
			
		||||
			mockDisksClient.EXPECT().Get(gomock.Any(), testCloud.ResourceGroup, test.diskName).Return(test.existedDisk, nil).AnyTimes()
 | 
			
		||||
		}
 | 
			
		||||
		mockDisksClient.EXPECT().CreateOrUpdate(gomock.Any(), testCloud.ResourceGroup, test.diskName, gomock.Any()).Return(nil).AnyTimes()
 | 
			
		||||
 | 
			
		||||
		result, err := testCloud.GetLabelsForVolume(context.TODO(), test.pv)
 | 
			
		||||
		assert.Equal(t, test.expected, result, "TestCase[%d]: %s, expected: %v, return: %v", i, test.desc, test.expected, result)
 | 
			
		||||
		assert.Equal(t, test.expectedErr, err != nil, "TestCase[%d]: %s, return error: %v", i, test.desc, err)
 | 
			
		||||
		assert.Equal(t, test.expectedErrMsg, err, "TestCase[%d]: %s, expected: %v, return: %v", i, test.desc, test.expectedErrMsg, err)
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
@@ -1,110 +0,0 @@
 | 
			
		||||
//go:build !providerless
 | 
			
		||||
// +build !providerless
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
Copyright 2019 The Kubernetes Authors.
 | 
			
		||||
 | 
			
		||||
Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
you may not use this file except in compliance with the License.
 | 
			
		||||
You may obtain a copy of the License at
 | 
			
		||||
 | 
			
		||||
    http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
 | 
			
		||||
Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
See the License for the specific language governing permissions and
 | 
			
		||||
limitations under the License.
 | 
			
		||||
*/
 | 
			
		||||
 | 
			
		||||
package azure
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	azclients "k8s.io/legacy-cloud-providers/azure/clients"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// CloudProviderRateLimitConfig indicates the rate limit config for each clients.
 | 
			
		||||
type CloudProviderRateLimitConfig struct {
 | 
			
		||||
	// The default rate limit config options.
 | 
			
		||||
	azclients.RateLimitConfig
 | 
			
		||||
 | 
			
		||||
	// Rate limit config for each clients. Values would override default settings above.
 | 
			
		||||
	RouteRateLimit                  *azclients.RateLimitConfig `json:"routeRateLimit,omitempty" yaml:"routeRateLimit,omitempty"`
 | 
			
		||||
	SubnetsRateLimit                *azclients.RateLimitConfig `json:"subnetsRateLimit,omitempty" yaml:"subnetsRateLimit,omitempty"`
 | 
			
		||||
	InterfaceRateLimit              *azclients.RateLimitConfig `json:"interfaceRateLimit,omitempty" yaml:"interfaceRateLimit,omitempty"`
 | 
			
		||||
	RouteTableRateLimit             *azclients.RateLimitConfig `json:"routeTableRateLimit,omitempty" yaml:"routeTableRateLimit,omitempty"`
 | 
			
		||||
	LoadBalancerRateLimit           *azclients.RateLimitConfig `json:"loadBalancerRateLimit,omitempty" yaml:"loadBalancerRateLimit,omitempty"`
 | 
			
		||||
	PublicIPAddressRateLimit        *azclients.RateLimitConfig `json:"publicIPAddressRateLimit,omitempty" yaml:"publicIPAddressRateLimit,omitempty"`
 | 
			
		||||
	SecurityGroupRateLimit          *azclients.RateLimitConfig `json:"securityGroupRateLimit,omitempty" yaml:"securityGroupRateLimit,omitempty"`
 | 
			
		||||
	VirtualMachineRateLimit         *azclients.RateLimitConfig `json:"virtualMachineRateLimit,omitempty" yaml:"virtualMachineRateLimit,omitempty"`
 | 
			
		||||
	StorageAccountRateLimit         *azclients.RateLimitConfig `json:"storageAccountRateLimit,omitempty" yaml:"storageAccountRateLimit,omitempty"`
 | 
			
		||||
	DiskRateLimit                   *azclients.RateLimitConfig `json:"diskRateLimit,omitempty" yaml:"diskRateLimit,omitempty"`
 | 
			
		||||
	SnapshotRateLimit               *azclients.RateLimitConfig `json:"snapshotRateLimit,omitempty" yaml:"snapshotRateLimit,omitempty"`
 | 
			
		||||
	VirtualMachineScaleSetRateLimit *azclients.RateLimitConfig `json:"virtualMachineScaleSetRateLimit,omitempty" yaml:"virtualMachineScaleSetRateLimit,omitempty"`
 | 
			
		||||
	VirtualMachineSizeRateLimit     *azclients.RateLimitConfig `json:"virtualMachineSizesRateLimit,omitempty" yaml:"virtualMachineSizesRateLimit,omitempty"`
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// InitializeCloudProviderRateLimitConfig initializes rate limit configs.
 | 
			
		||||
func InitializeCloudProviderRateLimitConfig(config *CloudProviderRateLimitConfig) {
 | 
			
		||||
	if config == nil {
 | 
			
		||||
		return
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Assign read rate limit defaults if no configuration was passed in.
 | 
			
		||||
	if config.CloudProviderRateLimitQPS == 0 {
 | 
			
		||||
		config.CloudProviderRateLimitQPS = rateLimitQPSDefault
 | 
			
		||||
	}
 | 
			
		||||
	if config.CloudProviderRateLimitBucket == 0 {
 | 
			
		||||
		config.CloudProviderRateLimitBucket = rateLimitBucketDefault
 | 
			
		||||
	}
 | 
			
		||||
	// Assign write rate limit defaults if no configuration was passed in.
 | 
			
		||||
	if config.CloudProviderRateLimitQPSWrite == 0 {
 | 
			
		||||
		config.CloudProviderRateLimitQPSWrite = config.CloudProviderRateLimitQPS
 | 
			
		||||
	}
 | 
			
		||||
	if config.CloudProviderRateLimitBucketWrite == 0 {
 | 
			
		||||
		config.CloudProviderRateLimitBucketWrite = config.CloudProviderRateLimitBucket
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	config.RouteRateLimit = overrideDefaultRateLimitConfig(&config.RateLimitConfig, config.RouteRateLimit)
 | 
			
		||||
	config.SubnetsRateLimit = overrideDefaultRateLimitConfig(&config.RateLimitConfig, config.SubnetsRateLimit)
 | 
			
		||||
	config.InterfaceRateLimit = overrideDefaultRateLimitConfig(&config.RateLimitConfig, config.InterfaceRateLimit)
 | 
			
		||||
	config.RouteTableRateLimit = overrideDefaultRateLimitConfig(&config.RateLimitConfig, config.RouteTableRateLimit)
 | 
			
		||||
	config.LoadBalancerRateLimit = overrideDefaultRateLimitConfig(&config.RateLimitConfig, config.LoadBalancerRateLimit)
 | 
			
		||||
	config.PublicIPAddressRateLimit = overrideDefaultRateLimitConfig(&config.RateLimitConfig, config.PublicIPAddressRateLimit)
 | 
			
		||||
	config.SecurityGroupRateLimit = overrideDefaultRateLimitConfig(&config.RateLimitConfig, config.SecurityGroupRateLimit)
 | 
			
		||||
	config.VirtualMachineRateLimit = overrideDefaultRateLimitConfig(&config.RateLimitConfig, config.VirtualMachineRateLimit)
 | 
			
		||||
	config.StorageAccountRateLimit = overrideDefaultRateLimitConfig(&config.RateLimitConfig, config.StorageAccountRateLimit)
 | 
			
		||||
	config.DiskRateLimit = overrideDefaultRateLimitConfig(&config.RateLimitConfig, config.DiskRateLimit)
 | 
			
		||||
	config.SnapshotRateLimit = overrideDefaultRateLimitConfig(&config.RateLimitConfig, config.SnapshotRateLimit)
 | 
			
		||||
	config.VirtualMachineScaleSetRateLimit = overrideDefaultRateLimitConfig(&config.RateLimitConfig, config.VirtualMachineScaleSetRateLimit)
 | 
			
		||||
	config.VirtualMachineSizeRateLimit = overrideDefaultRateLimitConfig(&config.RateLimitConfig, config.VirtualMachineSizeRateLimit)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// overrideDefaultRateLimitConfig overrides the default CloudProviderRateLimitConfig.
 | 
			
		||||
func overrideDefaultRateLimitConfig(defaults, config *azclients.RateLimitConfig) *azclients.RateLimitConfig {
 | 
			
		||||
	// If config not set, apply defaults.
 | 
			
		||||
	if config == nil {
 | 
			
		||||
		return defaults
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Remain disabled if it's set explicitly.
 | 
			
		||||
	if !config.CloudProviderRateLimit {
 | 
			
		||||
		return &azclients.RateLimitConfig{CloudProviderRateLimit: false}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Apply default values.
 | 
			
		||||
	if config.CloudProviderRateLimitQPS == 0 {
 | 
			
		||||
		config.CloudProviderRateLimitQPS = defaults.CloudProviderRateLimitQPS
 | 
			
		||||
	}
 | 
			
		||||
	if config.CloudProviderRateLimitBucket == 0 {
 | 
			
		||||
		config.CloudProviderRateLimitBucket = defaults.CloudProviderRateLimitBucket
 | 
			
		||||
	}
 | 
			
		||||
	if config.CloudProviderRateLimitQPSWrite == 0 {
 | 
			
		||||
		config.CloudProviderRateLimitQPSWrite = defaults.CloudProviderRateLimitQPSWrite
 | 
			
		||||
	}
 | 
			
		||||
	if config.CloudProviderRateLimitBucketWrite == 0 {
 | 
			
		||||
		config.CloudProviderRateLimitBucketWrite = defaults.CloudProviderRateLimitBucketWrite
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return config
 | 
			
		||||
}
 | 
			
		||||
@@ -1,180 +0,0 @@
 | 
			
		||||
//go:build !providerless
 | 
			
		||||
// +build !providerless
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
Copyright 2019 The Kubernetes Authors.
 | 
			
		||||
 | 
			
		||||
Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
you may not use this file except in compliance with the License.
 | 
			
		||||
You may obtain a copy of the License at
 | 
			
		||||
 | 
			
		||||
    http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
 | 
			
		||||
Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
See the License for the specific language governing permissions and
 | 
			
		||||
limitations under the License.
 | 
			
		||||
*/
 | 
			
		||||
 | 
			
		||||
package azure
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"bytes"
 | 
			
		||||
	"testing"
 | 
			
		||||
 | 
			
		||||
	"github.com/stretchr/testify/assert"
 | 
			
		||||
	"k8s.io/legacy-cloud-providers/azure/auth"
 | 
			
		||||
	azclients "k8s.io/legacy-cloud-providers/azure/clients"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
var (
 | 
			
		||||
	testAzureConfig = `{
 | 
			
		||||
		"aadClientCertPassword": "aadClientCertPassword",
 | 
			
		||||
		"aadClientCertPath": "aadClientCertPath",
 | 
			
		||||
		"aadClientId": "aadClientId",
 | 
			
		||||
		"aadClientSecret": "aadClientSecret",
 | 
			
		||||
		"cloud":"AzurePublicCloud",
 | 
			
		||||
		"cloudProviderBackoff": true,
 | 
			
		||||
		"cloudProviderBackoffDuration": 1,
 | 
			
		||||
		"cloudProviderBackoffExponent": 1,
 | 
			
		||||
		"cloudProviderBackoffJitter": 1,
 | 
			
		||||
		"cloudProviderBackoffRetries": 1,
 | 
			
		||||
		"cloudProviderRatelimit": true,
 | 
			
		||||
		"cloudProviderRateLimitBucket": 1,
 | 
			
		||||
		"cloudProviderRateLimitBucketWrite": 1,
 | 
			
		||||
		"cloudProviderRateLimitQPS": 1,
 | 
			
		||||
		"cloudProviderRateLimitQPSWrite": 1,
 | 
			
		||||
		"virtualMachineScaleSetRateLimit": {
 | 
			
		||||
			"cloudProviderRatelimit": true,
 | 
			
		||||
			"cloudProviderRateLimitBucket": 2,
 | 
			
		||||
			"CloudProviderRateLimitBucketWrite": 2,
 | 
			
		||||
			"cloudProviderRateLimitQPS": 0,
 | 
			
		||||
			"CloudProviderRateLimitQPSWrite": 0
 | 
			
		||||
		},
 | 
			
		||||
		"loadBalancerRateLimit": {
 | 
			
		||||
			"cloudProviderRatelimit": false,
 | 
			
		||||
		},
 | 
			
		||||
		"networkResourceTenantId": "networkResourceTenantId",
 | 
			
		||||
		"networkResourceSubscriptionId": "networkResourceSubscriptionId",
 | 
			
		||||
		"availabilitySetNodesCacheTTLInSeconds": 100,
 | 
			
		||||
		"vmssCacheTTLInSeconds": 100,
 | 
			
		||||
		"vmssVirtualMachinesCacheTTLInSeconds": 100,
 | 
			
		||||
		"vmCacheTTLInSeconds": 100,
 | 
			
		||||
		"loadBalancerCacheTTLInSeconds": 100,
 | 
			
		||||
		"nsgCacheTTLInSeconds": 100,
 | 
			
		||||
		"routeTableCacheTTLInSeconds": 100,
 | 
			
		||||
		"location": "location",
 | 
			
		||||
		"maximumLoadBalancerRuleCount": 1,
 | 
			
		||||
		"primaryAvailabilitySetName": "primaryAvailabilitySetName",
 | 
			
		||||
		"primaryScaleSetName": "primaryScaleSetName",
 | 
			
		||||
		"resourceGroup": "resourceGroup",
 | 
			
		||||
		"routeTableName": "routeTableName",
 | 
			
		||||
		"routeTableResourceGroup": "routeTableResourceGroup",
 | 
			
		||||
		"securityGroupName": "securityGroupName",
 | 
			
		||||
		"securityGroupResourceGroup": "securityGroupResourceGroup",
 | 
			
		||||
		"subnetName": "subnetName",
 | 
			
		||||
		"subscriptionId": "subscriptionId",
 | 
			
		||||
		"tenantId": "tenantId",
 | 
			
		||||
		"useInstanceMetadata": true,
 | 
			
		||||
		"useManagedIdentityExtension": true,
 | 
			
		||||
		"vnetName": "vnetName",
 | 
			
		||||
		"vnetResourceGroup": "vnetResourceGroup",
 | 
			
		||||
		vmType: "standard"
 | 
			
		||||
	}`
 | 
			
		||||
 | 
			
		||||
	testDefaultRateLimitConfig = azclients.RateLimitConfig{
 | 
			
		||||
		CloudProviderRateLimit:            true,
 | 
			
		||||
		CloudProviderRateLimitBucket:      1,
 | 
			
		||||
		CloudProviderRateLimitBucketWrite: 1,
 | 
			
		||||
		CloudProviderRateLimitQPS:         1,
 | 
			
		||||
		CloudProviderRateLimitQPSWrite:    1,
 | 
			
		||||
	}
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
func TestParseConfig(t *testing.T) {
 | 
			
		||||
	expected := &Config{
 | 
			
		||||
		AzureAuthConfig: auth.AzureAuthConfig{
 | 
			
		||||
			AADClientCertPassword:         "aadClientCertPassword",
 | 
			
		||||
			AADClientCertPath:             "aadClientCertPath",
 | 
			
		||||
			AADClientID:                   "aadClientId",
 | 
			
		||||
			AADClientSecret:               "aadClientSecret",
 | 
			
		||||
			Cloud:                         "AzurePublicCloud",
 | 
			
		||||
			SubscriptionID:                "subscriptionId",
 | 
			
		||||
			TenantID:                      "tenantId",
 | 
			
		||||
			UseManagedIdentityExtension:   true,
 | 
			
		||||
			NetworkResourceTenantID:       "networkResourceTenantId",
 | 
			
		||||
			NetworkResourceSubscriptionID: "networkResourceSubscriptionId",
 | 
			
		||||
		},
 | 
			
		||||
		CloudProviderBackoff:         true,
 | 
			
		||||
		CloudProviderBackoffDuration: 1,
 | 
			
		||||
		CloudProviderBackoffExponent: 1,
 | 
			
		||||
		CloudProviderBackoffJitter:   1,
 | 
			
		||||
		CloudProviderBackoffRetries:  1,
 | 
			
		||||
		CloudProviderRateLimitConfig: CloudProviderRateLimitConfig{
 | 
			
		||||
			RateLimitConfig: testDefaultRateLimitConfig,
 | 
			
		||||
			LoadBalancerRateLimit: &azclients.RateLimitConfig{
 | 
			
		||||
				CloudProviderRateLimit: false,
 | 
			
		||||
			},
 | 
			
		||||
			VirtualMachineScaleSetRateLimit: &azclients.RateLimitConfig{
 | 
			
		||||
				CloudProviderRateLimit:            true,
 | 
			
		||||
				CloudProviderRateLimitBucket:      2,
 | 
			
		||||
				CloudProviderRateLimitBucketWrite: 2,
 | 
			
		||||
			},
 | 
			
		||||
		},
 | 
			
		||||
		AvailabilitySetNodesCacheTTLInSeconds: 100,
 | 
			
		||||
		VmssCacheTTLInSeconds:                 100,
 | 
			
		||||
		VmssVirtualMachinesCacheTTLInSeconds:  100,
 | 
			
		||||
		VMCacheTTLInSeconds:                   100,
 | 
			
		||||
		LoadBalancerCacheTTLInSeconds:         100,
 | 
			
		||||
		NsgCacheTTLInSeconds:                  100,
 | 
			
		||||
		RouteTableCacheTTLInSeconds:           100,
 | 
			
		||||
		Location:                              "location",
 | 
			
		||||
		MaximumLoadBalancerRuleCount:          1,
 | 
			
		||||
		PrimaryAvailabilitySetName:            "primaryAvailabilitySetName",
 | 
			
		||||
		PrimaryScaleSetName:                   "primaryScaleSetName",
 | 
			
		||||
		ResourceGroup:                         "resourcegroup",
 | 
			
		||||
		RouteTableName:                        "routeTableName",
 | 
			
		||||
		RouteTableResourceGroup:               "routeTableResourceGroup",
 | 
			
		||||
		SecurityGroupName:                     "securityGroupName",
 | 
			
		||||
		SecurityGroupResourceGroup:            "securityGroupResourceGroup",
 | 
			
		||||
		SubnetName:                            "subnetName",
 | 
			
		||||
		UseInstanceMetadata:                   true,
 | 
			
		||||
		VMType:                                "standard",
 | 
			
		||||
		VnetName:                              "vnetName",
 | 
			
		||||
		VnetResourceGroup:                     "vnetResourceGroup",
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	buffer := bytes.NewBufferString(testAzureConfig)
 | 
			
		||||
	config, err := parseConfig(buffer)
 | 
			
		||||
	assert.NoError(t, err)
 | 
			
		||||
	assert.Equal(t, expected, config)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestInitializeCloudProviderRateLimitConfig(t *testing.T) {
 | 
			
		||||
	buffer := bytes.NewBufferString(testAzureConfig)
 | 
			
		||||
	config, err := parseConfig(buffer)
 | 
			
		||||
	assert.NoError(t, err)
 | 
			
		||||
 | 
			
		||||
	InitializeCloudProviderRateLimitConfig(&config.CloudProviderRateLimitConfig)
 | 
			
		||||
	assert.Equal(t, config.LoadBalancerRateLimit, &azclients.RateLimitConfig{
 | 
			
		||||
		CloudProviderRateLimit: false,
 | 
			
		||||
	})
 | 
			
		||||
	assert.Equal(t, config.VirtualMachineScaleSetRateLimit, &azclients.RateLimitConfig{
 | 
			
		||||
		CloudProviderRateLimit:            true,
 | 
			
		||||
		CloudProviderRateLimitBucket:      2,
 | 
			
		||||
		CloudProviderRateLimitBucketWrite: 2,
 | 
			
		||||
		CloudProviderRateLimitQPS:         1,
 | 
			
		||||
		CloudProviderRateLimitQPSWrite:    1,
 | 
			
		||||
	})
 | 
			
		||||
	assert.Equal(t, config.VirtualMachineSizeRateLimit, &testDefaultRateLimitConfig)
 | 
			
		||||
	assert.Equal(t, config.VirtualMachineRateLimit, &testDefaultRateLimitConfig)
 | 
			
		||||
	assert.Equal(t, config.RouteRateLimit, &testDefaultRateLimitConfig)
 | 
			
		||||
	assert.Equal(t, config.SubnetsRateLimit, &testDefaultRateLimitConfig)
 | 
			
		||||
	assert.Equal(t, config.InterfaceRateLimit, &testDefaultRateLimitConfig)
 | 
			
		||||
	assert.Equal(t, config.RouteTableRateLimit, &testDefaultRateLimitConfig)
 | 
			
		||||
	assert.Equal(t, config.SecurityGroupRateLimit, &testDefaultRateLimitConfig)
 | 
			
		||||
	assert.Equal(t, config.StorageAccountRateLimit, &testDefaultRateLimitConfig)
 | 
			
		||||
	assert.Equal(t, config.DiskRateLimit, &testDefaultRateLimitConfig)
 | 
			
		||||
	assert.Equal(t, config.SnapshotRateLimit, &testDefaultRateLimitConfig)
 | 
			
		||||
}
 | 
			
		||||
@@ -1,579 +0,0 @@
 | 
			
		||||
//go:build !providerless
 | 
			
		||||
// +build !providerless
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
Copyright 2016 The Kubernetes Authors.
 | 
			
		||||
 | 
			
		||||
Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
you may not use this file except in compliance with the License.
 | 
			
		||||
You may obtain a copy of the License at
 | 
			
		||||
 | 
			
		||||
    http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
 | 
			
		||||
Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
See the License for the specific language governing permissions and
 | 
			
		||||
limitations under the License.
 | 
			
		||||
*/
 | 
			
		||||
 | 
			
		||||
package azure
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"context"
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"strings"
 | 
			
		||||
	"sync"
 | 
			
		||||
	"time"
 | 
			
		||||
 | 
			
		||||
	"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network"
 | 
			
		||||
 | 
			
		||||
	"k8s.io/apimachinery/pkg/types"
 | 
			
		||||
	"k8s.io/apimachinery/pkg/util/wait"
 | 
			
		||||
	cloudprovider "k8s.io/cloud-provider"
 | 
			
		||||
	"k8s.io/klog/v2"
 | 
			
		||||
	azcache "k8s.io/legacy-cloud-providers/azure/cache"
 | 
			
		||||
	"k8s.io/legacy-cloud-providers/azure/metrics"
 | 
			
		||||
	utilnet "k8s.io/utils/net"
 | 
			
		||||
	"k8s.io/utils/pointer"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
var (
 | 
			
		||||
	// routeUpdateInterval defines the route reconciling interval.
 | 
			
		||||
	routeUpdateInterval = 30 * time.Second
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// routeOperation defines the allowed operations for route updating.
 | 
			
		||||
type routeOperation string
 | 
			
		||||
 | 
			
		||||
// copied to minimize the number of cross reference
 | 
			
		||||
// and exceptions in publishing and allowed imports.
 | 
			
		||||
const (
 | 
			
		||||
	routeNameFmt       = "%s____%s"
 | 
			
		||||
	routeNameSeparator = "____"
 | 
			
		||||
 | 
			
		||||
	// Route operations.
 | 
			
		||||
	routeOperationAdd             routeOperation = "add"
 | 
			
		||||
	routeOperationDelete          routeOperation = "delete"
 | 
			
		||||
	routeTableOperationUpdateTags routeOperation = "updateRouteTableTags"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// delayedRouteOperation defines a delayed route operation which is used in delayedRouteUpdater.
 | 
			
		||||
type delayedRouteOperation struct {
 | 
			
		||||
	route          network.Route
 | 
			
		||||
	routeTableTags map[string]*string
 | 
			
		||||
	operation      routeOperation
 | 
			
		||||
	result         chan error
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// wait waits for the operation completion and returns the result.
 | 
			
		||||
func (op *delayedRouteOperation) wait() error {
 | 
			
		||||
	return <-op.result
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// delayedRouteUpdater defines a delayed route updater, which batches all the
 | 
			
		||||
// route updating operations within "interval" period.
 | 
			
		||||
// Example usage:
 | 
			
		||||
//
 | 
			
		||||
//	op, err := updater.addRouteOperation(routeOperationAdd, route)
 | 
			
		||||
//	err = op.wait()
 | 
			
		||||
type delayedRouteUpdater struct {
 | 
			
		||||
	az       *Cloud
 | 
			
		||||
	interval time.Duration
 | 
			
		||||
 | 
			
		||||
	lock           sync.Mutex
 | 
			
		||||
	routesToUpdate []*delayedRouteOperation
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// newDelayedRouteUpdater creates a new delayedRouteUpdater.
 | 
			
		||||
func newDelayedRouteUpdater(az *Cloud, interval time.Duration) *delayedRouteUpdater {
 | 
			
		||||
	return &delayedRouteUpdater{
 | 
			
		||||
		az:             az,
 | 
			
		||||
		interval:       interval,
 | 
			
		||||
		routesToUpdate: make([]*delayedRouteOperation, 0),
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// run starts the updater reconciling loop.
 | 
			
		||||
func (d *delayedRouteUpdater) run() {
 | 
			
		||||
	err := wait.PollImmediateInfinite(d.interval, func() (bool, error) {
 | 
			
		||||
		d.updateRoutes()
 | 
			
		||||
		return false, nil
 | 
			
		||||
	})
 | 
			
		||||
	if err != nil { // this should never happen, if it does, panic
 | 
			
		||||
		panic(err)
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// updateRoutes invokes route table client to update all routes.
 | 
			
		||||
func (d *delayedRouteUpdater) updateRoutes() {
 | 
			
		||||
	d.lock.Lock()
 | 
			
		||||
	defer d.lock.Unlock()
 | 
			
		||||
 | 
			
		||||
	// No need to do any updating.
 | 
			
		||||
	if len(d.routesToUpdate) == 0 {
 | 
			
		||||
		klog.V(6).Info("updateRoutes: nothing to update, returning")
 | 
			
		||||
		return
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	var err error
 | 
			
		||||
	defer func() {
 | 
			
		||||
		// Notify all the goroutines.
 | 
			
		||||
		for _, rt := range d.routesToUpdate {
 | 
			
		||||
			rt.result <- err
 | 
			
		||||
		}
 | 
			
		||||
		// Clear all the jobs.
 | 
			
		||||
		d.routesToUpdate = make([]*delayedRouteOperation, 0)
 | 
			
		||||
	}()
 | 
			
		||||
 | 
			
		||||
	var (
 | 
			
		||||
		routeTable       network.RouteTable
 | 
			
		||||
		existsRouteTable bool
 | 
			
		||||
	)
 | 
			
		||||
	routeTable, existsRouteTable, err = d.az.getRouteTable(azcache.CacheReadTypeDefault)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		klog.Errorf("getRouteTable() failed with error: %v", err)
 | 
			
		||||
		return
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// create route table if it doesn't exists yet.
 | 
			
		||||
	if !existsRouteTable {
 | 
			
		||||
		err = d.az.createRouteTable()
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			klog.Errorf("createRouteTable() failed with error: %v", err)
 | 
			
		||||
			return
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		routeTable, _, err = d.az.getRouteTable(azcache.CacheReadTypeDefault)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			klog.Errorf("getRouteTable() failed with error: %v", err)
 | 
			
		||||
			return
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// reconcile routes.
 | 
			
		||||
	dirty, onlyUpdateTags := false, true
 | 
			
		||||
	routes := []network.Route{}
 | 
			
		||||
	if routeTable.Routes != nil {
 | 
			
		||||
		routes = *routeTable.Routes
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	routes, dirty = d.cleanupOutdatedRoutes(routes)
 | 
			
		||||
	if dirty {
 | 
			
		||||
		onlyUpdateTags = false
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	for _, rt := range d.routesToUpdate {
 | 
			
		||||
		if rt.operation == routeTableOperationUpdateTags {
 | 
			
		||||
			routeTable.Tags = rt.routeTableTags
 | 
			
		||||
			dirty = true
 | 
			
		||||
			continue
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		routeMatch := false
 | 
			
		||||
		onlyUpdateTags = false
 | 
			
		||||
		for i, existingRoute := range routes {
 | 
			
		||||
			if strings.EqualFold(pointer.StringDeref(existingRoute.Name, ""), pointer.StringDeref(rt.route.Name, "")) {
 | 
			
		||||
				// delete the name-matched routes here (missing routes would be added later if the operation is add).
 | 
			
		||||
				routes = append(routes[:i], routes[i+1:]...)
 | 
			
		||||
				if existingRoute.RoutePropertiesFormat != nil &&
 | 
			
		||||
					rt.route.RoutePropertiesFormat != nil &&
 | 
			
		||||
					strings.EqualFold(pointer.StringDeref(existingRoute.AddressPrefix, ""), pointer.StringDeref(rt.route.AddressPrefix, "")) &&
 | 
			
		||||
					strings.EqualFold(pointer.StringDeref(existingRoute.NextHopIPAddress, ""), pointer.StringDeref(rt.route.NextHopIPAddress, "")) {
 | 
			
		||||
					routeMatch = true
 | 
			
		||||
				}
 | 
			
		||||
				if rt.operation == routeOperationDelete {
 | 
			
		||||
					dirty = true
 | 
			
		||||
				}
 | 
			
		||||
				break
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		// Add missing routes if the operation is add.
 | 
			
		||||
		if rt.operation == routeOperationAdd {
 | 
			
		||||
			routes = append(routes, rt.route)
 | 
			
		||||
			if !routeMatch {
 | 
			
		||||
				dirty = true
 | 
			
		||||
			}
 | 
			
		||||
			continue
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if dirty {
 | 
			
		||||
		if !onlyUpdateTags {
 | 
			
		||||
			klog.V(2).Infof("updateRoutes: updating routes")
 | 
			
		||||
			routeTable.Routes = &routes
 | 
			
		||||
		}
 | 
			
		||||
		err = d.az.CreateOrUpdateRouteTable(routeTable)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			klog.Errorf("CreateOrUpdateRouteTable() failed with error: %v", err)
 | 
			
		||||
			return
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// cleanupOutdatedRoutes deletes all non-dualstack routes when dualstack is enabled,
 | 
			
		||||
// and deletes all dualstack routes when dualstack is not enabled.
 | 
			
		||||
func (d *delayedRouteUpdater) cleanupOutdatedRoutes(existingRoutes []network.Route) (routes []network.Route, changed bool) {
 | 
			
		||||
	for i := len(existingRoutes) - 1; i >= 0; i-- {
 | 
			
		||||
		existingRouteName := pointer.StringDeref(existingRoutes[i].Name, "")
 | 
			
		||||
		split := strings.Split(existingRouteName, routeNameSeparator)
 | 
			
		||||
 | 
			
		||||
		klog.V(4).Infof("cleanupOutdatedRoutes: checking route %s", existingRouteName)
 | 
			
		||||
 | 
			
		||||
		// filter out unmanaged routes
 | 
			
		||||
		deleteRoute := false
 | 
			
		||||
		if d.az.nodeNames.Has(split[0]) {
 | 
			
		||||
			if d.az.ipv6DualStackEnabled && len(split) == 1 {
 | 
			
		||||
				klog.V(2).Infof("cleanupOutdatedRoutes: deleting outdated non-dualstack route %s", existingRouteName)
 | 
			
		||||
				deleteRoute = true
 | 
			
		||||
			} else if !d.az.ipv6DualStackEnabled && len(split) == 2 {
 | 
			
		||||
				klog.V(2).Infof("cleanupOutdatedRoutes: deleting outdated dualstack route %s", existingRouteName)
 | 
			
		||||
				deleteRoute = true
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			if deleteRoute {
 | 
			
		||||
				existingRoutes = append(existingRoutes[:i], existingRoutes[i+1:]...)
 | 
			
		||||
				changed = true
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return existingRoutes, changed
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// addRouteOperation adds the routeOperation to delayedRouteUpdater and returns a delayedRouteOperation.
 | 
			
		||||
func (d *delayedRouteUpdater) addRouteOperation(operation routeOperation, route network.Route) (*delayedRouteOperation, error) {
 | 
			
		||||
	d.lock.Lock()
 | 
			
		||||
	defer d.lock.Unlock()
 | 
			
		||||
 | 
			
		||||
	op := &delayedRouteOperation{
 | 
			
		||||
		route:     route,
 | 
			
		||||
		operation: operation,
 | 
			
		||||
		result:    make(chan error),
 | 
			
		||||
	}
 | 
			
		||||
	d.routesToUpdate = append(d.routesToUpdate, op)
 | 
			
		||||
	return op, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// addUpdateRouteTableTagsOperation adds a update route table tags operation to delayedRouteUpdater and returns a delayedRouteOperation.
 | 
			
		||||
func (d *delayedRouteUpdater) addUpdateRouteTableTagsOperation(operation routeOperation, tags map[string]*string) (*delayedRouteOperation, error) {
 | 
			
		||||
	d.lock.Lock()
 | 
			
		||||
	defer d.lock.Unlock()
 | 
			
		||||
 | 
			
		||||
	op := &delayedRouteOperation{
 | 
			
		||||
		routeTableTags: tags,
 | 
			
		||||
		operation:      operation,
 | 
			
		||||
		result:         make(chan error),
 | 
			
		||||
	}
 | 
			
		||||
	d.routesToUpdate = append(d.routesToUpdate, op)
 | 
			
		||||
	return op, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// ListRoutes lists all managed routes that belong to the specified clusterName
 | 
			
		||||
func (az *Cloud) ListRoutes(ctx context.Context, clusterName string) ([]*cloudprovider.Route, error) {
 | 
			
		||||
	klog.V(10).Infof("ListRoutes: START clusterName=%q", clusterName)
 | 
			
		||||
	routeTable, existsRouteTable, err := az.getRouteTable(azcache.CacheReadTypeDefault)
 | 
			
		||||
	routes, err := processRoutes(az.ipv6DualStackEnabled, routeTable, existsRouteTable, err)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return nil, err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Compose routes for unmanaged routes so that node controller won't retry creating routes for them.
 | 
			
		||||
	unmanagedNodes, err := az.GetUnmanagedNodes()
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return nil, err
 | 
			
		||||
	}
 | 
			
		||||
	az.routeCIDRsLock.Lock()
 | 
			
		||||
	defer az.routeCIDRsLock.Unlock()
 | 
			
		||||
	for _, nodeName := range unmanagedNodes.List() {
 | 
			
		||||
		if cidr, ok := az.routeCIDRs[nodeName]; ok {
 | 
			
		||||
			routes = append(routes, &cloudprovider.Route{
 | 
			
		||||
				Name:            nodeName,
 | 
			
		||||
				TargetNode:      mapRouteNameToNodeName(az.ipv6DualStackEnabled, nodeName),
 | 
			
		||||
				DestinationCIDR: cidr,
 | 
			
		||||
			})
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// ensure the route table is tagged as configured
 | 
			
		||||
	tags, changed := az.ensureRouteTableTagged(&routeTable)
 | 
			
		||||
	if changed {
 | 
			
		||||
		klog.V(2).Infof("ListRoutes: updating tags on route table %s", pointer.StringDeref(routeTable.Name, ""))
 | 
			
		||||
		op, err := az.routeUpdater.addUpdateRouteTableTagsOperation(routeTableOperationUpdateTags, tags)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			klog.Errorf("ListRoutes: failed to add route table operation with error: %v", err)
 | 
			
		||||
			return nil, err
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		// Wait for operation complete.
 | 
			
		||||
		err = op.wait()
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			klog.Errorf("ListRoutes: failed to update route table tags with error: %v", err)
 | 
			
		||||
			return nil, err
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return routes, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Injectable for testing
 | 
			
		||||
func processRoutes(ipv6DualStackEnabled bool, routeTable network.RouteTable, exists bool, err error) ([]*cloudprovider.Route, error) {
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return nil, err
 | 
			
		||||
	}
 | 
			
		||||
	if !exists {
 | 
			
		||||
		return []*cloudprovider.Route{}, nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	var kubeRoutes []*cloudprovider.Route
 | 
			
		||||
	if routeTable.RouteTablePropertiesFormat != nil && routeTable.Routes != nil {
 | 
			
		||||
		kubeRoutes = make([]*cloudprovider.Route, len(*routeTable.Routes))
 | 
			
		||||
		for i, route := range *routeTable.Routes {
 | 
			
		||||
			instance := mapRouteNameToNodeName(ipv6DualStackEnabled, *route.Name)
 | 
			
		||||
			cidr := *route.AddressPrefix
 | 
			
		||||
			klog.V(10).Infof("ListRoutes: * instance=%q, cidr=%q", instance, cidr)
 | 
			
		||||
 | 
			
		||||
			kubeRoutes[i] = &cloudprovider.Route{
 | 
			
		||||
				Name:            *route.Name,
 | 
			
		||||
				TargetNode:      instance,
 | 
			
		||||
				DestinationCIDR: cidr,
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	klog.V(10).Info("ListRoutes: FINISH")
 | 
			
		||||
	return kubeRoutes, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (az *Cloud) createRouteTable() error {
 | 
			
		||||
	routeTable := network.RouteTable{
 | 
			
		||||
		Name:                       pointer.String(az.RouteTableName),
 | 
			
		||||
		Location:                   pointer.String(az.Location),
 | 
			
		||||
		RouteTablePropertiesFormat: &network.RouteTablePropertiesFormat{},
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	klog.V(3).Infof("createRouteTableIfNotExists: creating routetable. routeTableName=%q", az.RouteTableName)
 | 
			
		||||
	err := az.CreateOrUpdateRouteTable(routeTable)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Invalidate the cache right after updating
 | 
			
		||||
	az.rtCache.Delete(az.RouteTableName)
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// CreateRoute creates the described managed route
 | 
			
		||||
// route.Name will be ignored, although the cloud-provider may use nameHint
 | 
			
		||||
// to create a more user-meaningful name.
 | 
			
		||||
func (az *Cloud) CreateRoute(ctx context.Context, clusterName string, nameHint string, kubeRoute *cloudprovider.Route) error {
 | 
			
		||||
	mc := metrics.NewMetricContext("routes", "create_route", az.ResourceGroup, az.SubscriptionID, "")
 | 
			
		||||
	isOperationSucceeded := false
 | 
			
		||||
	defer func() {
 | 
			
		||||
		mc.ObserveOperationWithResult(isOperationSucceeded)
 | 
			
		||||
	}()
 | 
			
		||||
 | 
			
		||||
	// Returns  for unmanaged nodes because azure cloud provider couldn't fetch information for them.
 | 
			
		||||
	var targetIP string
 | 
			
		||||
	nodeName := string(kubeRoute.TargetNode)
 | 
			
		||||
	unmanaged, err := az.IsNodeUnmanaged(nodeName)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return err
 | 
			
		||||
	}
 | 
			
		||||
	if unmanaged {
 | 
			
		||||
		if az.ipv6DualStackEnabled {
 | 
			
		||||
			//TODO (khenidak) add support for unmanaged nodes when the feature reaches beta
 | 
			
		||||
			return fmt.Errorf("unmanaged nodes are not supported in dual stack mode")
 | 
			
		||||
		}
 | 
			
		||||
		klog.V(2).Infof("CreateRoute: omitting unmanaged node %q", kubeRoute.TargetNode)
 | 
			
		||||
		az.routeCIDRsLock.Lock()
 | 
			
		||||
		defer az.routeCIDRsLock.Unlock()
 | 
			
		||||
		az.routeCIDRs[nodeName] = kubeRoute.DestinationCIDR
 | 
			
		||||
		return nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	CIDRv6 := utilnet.IsIPv6CIDRString(string(kubeRoute.DestinationCIDR))
 | 
			
		||||
	// if single stack IPv4 then get the IP for the primary ip config
 | 
			
		||||
	// single stack IPv6 is supported on dual stack host. So the IPv6 IP is secondary IP for both single stack IPv6 and dual stack
 | 
			
		||||
	// Get all private IPs for the machine and find the first one that matches the IPv6 family
 | 
			
		||||
	if !az.ipv6DualStackEnabled && !CIDRv6 {
 | 
			
		||||
		targetIP, _, err = az.getIPForMachine(kubeRoute.TargetNode)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			return err
 | 
			
		||||
		}
 | 
			
		||||
	} else {
 | 
			
		||||
		// for dual stack and single stack IPv6 we need to select
 | 
			
		||||
		// a private ip that matches family of the cidr
 | 
			
		||||
		klog.V(4).Infof("CreateRoute: create route instance=%q cidr=%q is in dual stack mode", kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
 | 
			
		||||
		nodePrivateIPs, err := az.getPrivateIPsForMachine(kubeRoute.TargetNode)
 | 
			
		||||
		if nil != err {
 | 
			
		||||
			klog.V(3).Infof("CreateRoute: create route: failed(GetPrivateIPsByNodeName) instance=%q cidr=%q with error=%v", kubeRoute.TargetNode, kubeRoute.DestinationCIDR, err)
 | 
			
		||||
			return err
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		targetIP, err = findFirstIPByFamily(nodePrivateIPs, CIDRv6)
 | 
			
		||||
		if nil != err {
 | 
			
		||||
			klog.V(3).Infof("CreateRoute: create route: failed(findFirstIpByFamily) instance=%q cidr=%q with error=%v", kubeRoute.TargetNode, kubeRoute.DestinationCIDR, err)
 | 
			
		||||
			return err
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	routeName := mapNodeNameToRouteName(az.ipv6DualStackEnabled, kubeRoute.TargetNode, string(kubeRoute.DestinationCIDR))
 | 
			
		||||
	route := network.Route{
 | 
			
		||||
		Name: pointer.String(routeName),
 | 
			
		||||
		RoutePropertiesFormat: &network.RoutePropertiesFormat{
 | 
			
		||||
			AddressPrefix:    pointer.String(kubeRoute.DestinationCIDR),
 | 
			
		||||
			NextHopType:      network.RouteNextHopTypeVirtualAppliance,
 | 
			
		||||
			NextHopIPAddress: pointer.String(targetIP),
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	klog.V(2).Infof("CreateRoute: creating route for clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
 | 
			
		||||
	op, err := az.routeUpdater.addRouteOperation(routeOperationAdd, route)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		klog.Errorf("CreateRoute failed for node %q with error: %v", kubeRoute.TargetNode, err)
 | 
			
		||||
		return err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Wait for operation complete.
 | 
			
		||||
	err = op.wait()
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		klog.Errorf("CreateRoute failed for node %q with error: %v", kubeRoute.TargetNode, err)
 | 
			
		||||
		return err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	klog.V(2).Infof("CreateRoute: route created. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
 | 
			
		||||
	isOperationSucceeded = true
 | 
			
		||||
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// DeleteRoute deletes the specified managed route
 | 
			
		||||
// Route should be as returned by ListRoutes
 | 
			
		||||
func (az *Cloud) DeleteRoute(ctx context.Context, clusterName string, kubeRoute *cloudprovider.Route) error {
 | 
			
		||||
	mc := metrics.NewMetricContext("routes", "delete_route", az.ResourceGroup, az.SubscriptionID, "")
 | 
			
		||||
	isOperationSucceeded := false
 | 
			
		||||
	defer func() {
 | 
			
		||||
		mc.ObserveOperationWithResult(isOperationSucceeded)
 | 
			
		||||
	}()
 | 
			
		||||
 | 
			
		||||
	// Returns  for unmanaged nodes because azure cloud provider couldn't fetch information for them.
 | 
			
		||||
	nodeName := string(kubeRoute.TargetNode)
 | 
			
		||||
	unmanaged, err := az.IsNodeUnmanaged(nodeName)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return err
 | 
			
		||||
	}
 | 
			
		||||
	if unmanaged {
 | 
			
		||||
		klog.V(2).Infof("DeleteRoute: omitting unmanaged node %q", kubeRoute.TargetNode)
 | 
			
		||||
		az.routeCIDRsLock.Lock()
 | 
			
		||||
		defer az.routeCIDRsLock.Unlock()
 | 
			
		||||
		delete(az.routeCIDRs, nodeName)
 | 
			
		||||
		return nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	routeName := mapNodeNameToRouteName(az.ipv6DualStackEnabled, kubeRoute.TargetNode, string(kubeRoute.DestinationCIDR))
 | 
			
		||||
	klog.V(2).Infof("DeleteRoute: deleting route. clusterName=%q instance=%q cidr=%q routeName=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR, routeName)
 | 
			
		||||
	route := network.Route{
 | 
			
		||||
		Name:                  pointer.String(routeName),
 | 
			
		||||
		RoutePropertiesFormat: &network.RoutePropertiesFormat{},
 | 
			
		||||
	}
 | 
			
		||||
	op, err := az.routeUpdater.addRouteOperation(routeOperationDelete, route)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		klog.Errorf("DeleteRoute failed for node %q with error: %v", kubeRoute.TargetNode, err)
 | 
			
		||||
		return err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Wait for operation complete.
 | 
			
		||||
	err = op.wait()
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		klog.Errorf("DeleteRoute failed for node %q with error: %v", kubeRoute.TargetNode, err)
 | 
			
		||||
		return err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Remove outdated ipv4 routes as well
 | 
			
		||||
	if az.ipv6DualStackEnabled {
 | 
			
		||||
		routeNameWithoutIPV6Suffix := strings.Split(routeName, routeNameSeparator)[0]
 | 
			
		||||
		klog.V(2).Infof("DeleteRoute: deleting route. clusterName=%q instance=%q cidr=%q routeName=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR, routeNameWithoutIPV6Suffix)
 | 
			
		||||
		route := network.Route{
 | 
			
		||||
			Name:                  pointer.String(routeNameWithoutIPV6Suffix),
 | 
			
		||||
			RoutePropertiesFormat: &network.RoutePropertiesFormat{},
 | 
			
		||||
		}
 | 
			
		||||
		op, err := az.routeUpdater.addRouteOperation(routeOperationDelete, route)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			klog.Errorf("DeleteRoute failed for node %q with error: %v", kubeRoute.TargetNode, err)
 | 
			
		||||
			return err
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		// Wait for operation complete.
 | 
			
		||||
		err = op.wait()
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			klog.Errorf("DeleteRoute failed for node %q with error: %v", kubeRoute.TargetNode, err)
 | 
			
		||||
			return err
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	klog.V(2).Infof("DeleteRoute: route deleted. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR)
 | 
			
		||||
	isOperationSucceeded = true
 | 
			
		||||
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// This must be kept in sync with mapRouteNameToNodeName.
 | 
			
		||||
// These two functions enable stashing the instance name in the route
 | 
			
		||||
// and then retrieving it later when listing. This is needed because
 | 
			
		||||
// Azure does not let you put tags/descriptions on the Route itself.
 | 
			
		||||
func mapNodeNameToRouteName(ipv6DualStackEnabled bool, nodeName types.NodeName, cidr string) string {
 | 
			
		||||
	if !ipv6DualStackEnabled {
 | 
			
		||||
		return fmt.Sprintf("%s", nodeName)
 | 
			
		||||
	}
 | 
			
		||||
	return fmt.Sprintf(routeNameFmt, nodeName, cidrtoRfc1035(cidr))
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Used with mapNodeNameToRouteName. See comment on mapNodeNameToRouteName.
 | 
			
		||||
func mapRouteNameToNodeName(ipv6DualStackEnabled bool, routeName string) types.NodeName {
 | 
			
		||||
	if !ipv6DualStackEnabled {
 | 
			
		||||
		return types.NodeName(routeName)
 | 
			
		||||
	}
 | 
			
		||||
	parts := strings.Split(routeName, routeNameSeparator)
 | 
			
		||||
	nodeName := parts[0]
 | 
			
		||||
	return types.NodeName(nodeName)
 | 
			
		||||
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// given a list of ips, return the first one
 | 
			
		||||
// that matches the family requested
 | 
			
		||||
// error if no match, or failure to parse
 | 
			
		||||
// any of the ips
 | 
			
		||||
func findFirstIPByFamily(ips []string, v6 bool) (string, error) {
 | 
			
		||||
	for _, ip := range ips {
 | 
			
		||||
		bIPv6 := utilnet.IsIPv6String(ip)
 | 
			
		||||
		if v6 == bIPv6 {
 | 
			
		||||
			return ip, nil
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	return "", fmt.Errorf("no match found matching the ipfamily requested")
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// strips : . /
 | 
			
		||||
func cidrtoRfc1035(cidr string) string {
 | 
			
		||||
	cidr = strings.ReplaceAll(cidr, ":", "")
 | 
			
		||||
	cidr = strings.ReplaceAll(cidr, ".", "")
 | 
			
		||||
	cidr = strings.ReplaceAll(cidr, "/", "")
 | 
			
		||||
	return cidr
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// ensureRouteTableTagged ensures the route table is tagged as configured
 | 
			
		||||
func (az *Cloud) ensureRouteTableTagged(rt *network.RouteTable) (map[string]*string, bool) {
 | 
			
		||||
	if az.Tags == "" {
 | 
			
		||||
		return nil, false
 | 
			
		||||
	}
 | 
			
		||||
	tags := parseTags(az.Tags)
 | 
			
		||||
	if rt.Tags == nil {
 | 
			
		||||
		rt.Tags = make(map[string]*string)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	tags, changed := reconcileTags(rt.Tags, tags)
 | 
			
		||||
	rt.Tags = tags
 | 
			
		||||
 | 
			
		||||
	return rt.Tags, changed
 | 
			
		||||
}
 | 
			
		||||
@@ -1,813 +0,0 @@
 | 
			
		||||
//go:build !providerless
 | 
			
		||||
// +build !providerless
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
Copyright 2018 The Kubernetes Authors.
 | 
			
		||||
 | 
			
		||||
Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
you may not use this file except in compliance with the License.
 | 
			
		||||
You may obtain a copy of the License at
 | 
			
		||||
 | 
			
		||||
    http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
 | 
			
		||||
Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
See the License for the specific language governing permissions and
 | 
			
		||||
limitations under the License.
 | 
			
		||||
*/
 | 
			
		||||
 | 
			
		||||
package azure
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"context"
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"net/http"
 | 
			
		||||
	"reflect"
 | 
			
		||||
	"testing"
 | 
			
		||||
	"time"
 | 
			
		||||
 | 
			
		||||
	"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network"
 | 
			
		||||
	"github.com/golang/mock/gomock"
 | 
			
		||||
	"github.com/stretchr/testify/assert"
 | 
			
		||||
 | 
			
		||||
	"k8s.io/apimachinery/pkg/types"
 | 
			
		||||
	"k8s.io/apimachinery/pkg/util/sets"
 | 
			
		||||
	"k8s.io/apimachinery/pkg/util/wait"
 | 
			
		||||
	cloudprovider "k8s.io/cloud-provider"
 | 
			
		||||
	"k8s.io/legacy-cloud-providers/azure/clients/routetableclient/mockroutetableclient"
 | 
			
		||||
	"k8s.io/legacy-cloud-providers/azure/mockvmsets"
 | 
			
		||||
	"k8s.io/legacy-cloud-providers/azure/retry"
 | 
			
		||||
	"k8s.io/utils/pointer"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
func TestDeleteRoute(t *testing.T) {
 | 
			
		||||
	ctrl := gomock.NewController(t)
 | 
			
		||||
	defer ctrl.Finish()
 | 
			
		||||
	routeTableClient := mockroutetableclient.NewMockInterface(ctrl)
 | 
			
		||||
 | 
			
		||||
	cloud := &Cloud{
 | 
			
		||||
		RouteTablesClient: routeTableClient,
 | 
			
		||||
		Config: Config{
 | 
			
		||||
			RouteTableResourceGroup: "foo",
 | 
			
		||||
			RouteTableName:          "bar",
 | 
			
		||||
			Location:                "location",
 | 
			
		||||
		},
 | 
			
		||||
		unmanagedNodes:     sets.NewString(),
 | 
			
		||||
		nodeInformerSynced: func() bool { return true },
 | 
			
		||||
	}
 | 
			
		||||
	cache, _ := cloud.newRouteTableCache()
 | 
			
		||||
	cloud.rtCache = cache
 | 
			
		||||
	cloud.routeUpdater = newDelayedRouteUpdater(cloud, 100*time.Millisecond)
 | 
			
		||||
	go cloud.routeUpdater.run()
 | 
			
		||||
	route := cloudprovider.Route{
 | 
			
		||||
		TargetNode:      "node",
 | 
			
		||||
		DestinationCIDR: "1.2.3.4/24",
 | 
			
		||||
	}
 | 
			
		||||
	routeName := mapNodeNameToRouteName(false, route.TargetNode, route.DestinationCIDR)
 | 
			
		||||
	routeTables := network.RouteTable{
 | 
			
		||||
		Name:     &cloud.RouteTableName,
 | 
			
		||||
		Location: &cloud.Location,
 | 
			
		||||
		RouteTablePropertiesFormat: &network.RouteTablePropertiesFormat{
 | 
			
		||||
			Routes: &[]network.Route{
 | 
			
		||||
				{
 | 
			
		||||
					Name: &routeName,
 | 
			
		||||
				},
 | 
			
		||||
			},
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
	routeTablesAfterDeletion := network.RouteTable{
 | 
			
		||||
		Name:     &cloud.RouteTableName,
 | 
			
		||||
		Location: &cloud.Location,
 | 
			
		||||
		RouteTablePropertiesFormat: &network.RouteTablePropertiesFormat{
 | 
			
		||||
			Routes: &[]network.Route{},
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
	routeTableClient.EXPECT().Get(gomock.Any(), cloud.RouteTableResourceGroup, cloud.RouteTableName, "").Return(routeTables, nil)
 | 
			
		||||
	routeTableClient.EXPECT().CreateOrUpdate(gomock.Any(), cloud.RouteTableResourceGroup, cloud.RouteTableName, routeTablesAfterDeletion, "").Return(nil)
 | 
			
		||||
	err := cloud.DeleteRoute(context.TODO(), "cluster", &route)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		t.Errorf("unexpected error deleting route: %v", err)
 | 
			
		||||
		t.FailNow()
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// test delete route for unmanaged nodes.
 | 
			
		||||
	nodeName := "node1"
 | 
			
		||||
	nodeCIDR := "4.3.2.1/24"
 | 
			
		||||
	cloud.unmanagedNodes.Insert(nodeName)
 | 
			
		||||
	cloud.routeCIDRs = map[string]string{
 | 
			
		||||
		nodeName: nodeCIDR,
 | 
			
		||||
	}
 | 
			
		||||
	route1 := cloudprovider.Route{
 | 
			
		||||
		TargetNode:      mapRouteNameToNodeName(false, nodeName),
 | 
			
		||||
		DestinationCIDR: nodeCIDR,
 | 
			
		||||
	}
 | 
			
		||||
	err = cloud.DeleteRoute(context.TODO(), "cluster", &route1)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		t.Errorf("unexpected error deleting route: %v", err)
 | 
			
		||||
		t.FailNow()
 | 
			
		||||
	}
 | 
			
		||||
	cidr, found := cloud.routeCIDRs[nodeName]
 | 
			
		||||
	if found {
 | 
			
		||||
		t.Errorf("unexpected CIDR item (%q) for %s", cidr, nodeName)
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestCreateRoute(t *testing.T) {
 | 
			
		||||
	ctrl := gomock.NewController(t)
 | 
			
		||||
	defer ctrl.Finish()
 | 
			
		||||
	routeTableClient := mockroutetableclient.NewMockInterface(ctrl)
 | 
			
		||||
	mockVMSet := mockvmsets.NewMockVMSet(ctrl)
 | 
			
		||||
 | 
			
		||||
	cloud := &Cloud{
 | 
			
		||||
		RouteTablesClient: routeTableClient,
 | 
			
		||||
		VMSet:             mockVMSet,
 | 
			
		||||
		Config: Config{
 | 
			
		||||
			RouteTableResourceGroup: "foo",
 | 
			
		||||
			RouteTableName:          "bar",
 | 
			
		||||
			Location:                "location",
 | 
			
		||||
		},
 | 
			
		||||
		unmanagedNodes:     sets.NewString(),
 | 
			
		||||
		nodeInformerSynced: func() bool { return true },
 | 
			
		||||
	}
 | 
			
		||||
	cache, _ := cloud.newRouteTableCache()
 | 
			
		||||
	cloud.rtCache = cache
 | 
			
		||||
	cloud.routeUpdater = newDelayedRouteUpdater(cloud, 100*time.Millisecond)
 | 
			
		||||
	go cloud.routeUpdater.run()
 | 
			
		||||
 | 
			
		||||
	route := cloudprovider.Route{TargetNode: "node", DestinationCIDR: "1.2.3.4/24"}
 | 
			
		||||
	nodePrivateIP := "2.4.6.8"
 | 
			
		||||
	networkRoute := &[]network.Route{
 | 
			
		||||
		{
 | 
			
		||||
			Name: pointer.String("node"),
 | 
			
		||||
			RoutePropertiesFormat: &network.RoutePropertiesFormat{
 | 
			
		||||
				AddressPrefix:    pointer.String("1.2.3.4/24"),
 | 
			
		||||
				NextHopIPAddress: &nodePrivateIP,
 | 
			
		||||
				NextHopType:      network.RouteNextHopTypeVirtualAppliance,
 | 
			
		||||
			},
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	testCases := []struct {
 | 
			
		||||
		name                  string
 | 
			
		||||
		routeTableName        string
 | 
			
		||||
		initialRoute          *[]network.Route
 | 
			
		||||
		updatedRoute          *[]network.Route
 | 
			
		||||
		hasUnmanagedNodes     bool
 | 
			
		||||
		nodeInformerNotSynced bool
 | 
			
		||||
		ipv6DualStackEnabled  bool
 | 
			
		||||
		routeTableNotExist    bool
 | 
			
		||||
		unmanagedNodeName     string
 | 
			
		||||
		routeCIDRs            map[string]string
 | 
			
		||||
		expectedRouteCIDRs    map[string]string
 | 
			
		||||
 | 
			
		||||
		getIPError        error
 | 
			
		||||
		getErr            *retry.Error
 | 
			
		||||
		secondGetErr      *retry.Error
 | 
			
		||||
		createOrUpdateErr *retry.Error
 | 
			
		||||
		expectedErrMsg    error
 | 
			
		||||
	}{
 | 
			
		||||
		{
 | 
			
		||||
			name:           "CreateRoute should create route if route doesn't exist",
 | 
			
		||||
			routeTableName: "rt1",
 | 
			
		||||
			updatedRoute:   networkRoute,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			name:           "CreateRoute should report error if error occurs when invoke CreateOrUpdateRouteTable",
 | 
			
		||||
			routeTableName: "rt2",
 | 
			
		||||
			updatedRoute:   networkRoute,
 | 
			
		||||
			createOrUpdateErr: &retry.Error{
 | 
			
		||||
				HTTPStatusCode: http.StatusInternalServerError,
 | 
			
		||||
				RawError:       fmt.Errorf("CreateOrUpdate error"),
 | 
			
		||||
			},
 | 
			
		||||
			expectedErrMsg: fmt.Errorf("Retriable: false, RetryAfter: 0s, HTTPStatusCode: 500, RawError: %w", fmt.Errorf("CreateOrUpdate error")),
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			name:           "CreateRoute should do nothing if route already exists",
 | 
			
		||||
			routeTableName: "rt3",
 | 
			
		||||
			initialRoute:   networkRoute,
 | 
			
		||||
			updatedRoute:   networkRoute,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			name:           "CreateRoute should report error if error occurs when invoke createRouteTable",
 | 
			
		||||
			routeTableName: "rt4",
 | 
			
		||||
			getErr: &retry.Error{
 | 
			
		||||
				HTTPStatusCode: http.StatusNotFound,
 | 
			
		||||
				RawError:       cloudprovider.InstanceNotFound,
 | 
			
		||||
			},
 | 
			
		||||
			createOrUpdateErr: &retry.Error{
 | 
			
		||||
				HTTPStatusCode: http.StatusInternalServerError,
 | 
			
		||||
				RawError:       fmt.Errorf("CreateOrUpdate error"),
 | 
			
		||||
			},
 | 
			
		||||
			expectedErrMsg: fmt.Errorf("Retriable: false, RetryAfter: 0s, HTTPStatusCode: 500, RawError: %w", fmt.Errorf("CreateOrUpdate error")),
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			name:           "CreateRoute should report error if error occurs when invoke getRouteTable for the second time",
 | 
			
		||||
			routeTableName: "rt5",
 | 
			
		||||
			getErr: &retry.Error{
 | 
			
		||||
				HTTPStatusCode: http.StatusNotFound,
 | 
			
		||||
				RawError:       cloudprovider.InstanceNotFound,
 | 
			
		||||
			},
 | 
			
		||||
			secondGetErr: &retry.Error{
 | 
			
		||||
				HTTPStatusCode: http.StatusInternalServerError,
 | 
			
		||||
				RawError:       fmt.Errorf("Get error"),
 | 
			
		||||
			},
 | 
			
		||||
			expectedErrMsg: fmt.Errorf("Retriable: false, RetryAfter: 0s, HTTPStatusCode: 500, RawError: %w", fmt.Errorf("Get error")),
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			name:           "CreateRoute should report error if error occurs when invoke routeTableClient.Get",
 | 
			
		||||
			routeTableName: "rt6",
 | 
			
		||||
			getErr: &retry.Error{
 | 
			
		||||
				HTTPStatusCode: http.StatusInternalServerError,
 | 
			
		||||
				RawError:       fmt.Errorf("Get error"),
 | 
			
		||||
			},
 | 
			
		||||
			expectedErrMsg: fmt.Errorf("Retriable: false, RetryAfter: 0s, HTTPStatusCode: 500, RawError: %w", fmt.Errorf("Get error")),
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			name:           "CreateRoute should report error if error occurs when invoke GetIPByNodeName",
 | 
			
		||||
			routeTableName: "rt7",
 | 
			
		||||
			getIPError:     fmt.Errorf("getIP error"),
 | 
			
		||||
			expectedErrMsg: wait.ErrWaitTimeout,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			name:               "CreateRoute should add route to cloud.RouteCIDRs if node is unmanaged",
 | 
			
		||||
			routeTableName:     "rt8",
 | 
			
		||||
			hasUnmanagedNodes:  true,
 | 
			
		||||
			unmanagedNodeName:  "node",
 | 
			
		||||
			routeCIDRs:         map[string]string{},
 | 
			
		||||
			expectedRouteCIDRs: map[string]string{"node": "1.2.3.4/24"},
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			name:                 "CreateRoute should report error if node is unmanaged and cloud.ipv6DualStackEnabled is true",
 | 
			
		||||
			hasUnmanagedNodes:    true,
 | 
			
		||||
			ipv6DualStackEnabled: true,
 | 
			
		||||
			unmanagedNodeName:    "node",
 | 
			
		||||
			expectedErrMsg:       fmt.Errorf("unmanaged nodes are not supported in dual stack mode"),
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			name:           "CreateRoute should create route if cloud.ipv6DualStackEnabled is true and route doesn't exist",
 | 
			
		||||
			routeTableName: "rt9",
 | 
			
		||||
			updatedRoute: &[]network.Route{
 | 
			
		||||
				{
 | 
			
		||||
					Name: pointer.String("node____123424"),
 | 
			
		||||
					RoutePropertiesFormat: &network.RoutePropertiesFormat{
 | 
			
		||||
						AddressPrefix:    pointer.String("1.2.3.4/24"),
 | 
			
		||||
						NextHopIPAddress: &nodePrivateIP,
 | 
			
		||||
						NextHopType:      network.RouteNextHopTypeVirtualAppliance,
 | 
			
		||||
					},
 | 
			
		||||
				},
 | 
			
		||||
			},
 | 
			
		||||
			ipv6DualStackEnabled: true,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			name:                  "CreateRoute should report error if node informer is not synced",
 | 
			
		||||
			nodeInformerNotSynced: true,
 | 
			
		||||
			expectedErrMsg:        fmt.Errorf("node informer is not synced when trying to GetUnmanagedNodes"),
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	for _, test := range testCases {
 | 
			
		||||
		initialTable := network.RouteTable{
 | 
			
		||||
			Name:     pointer.String(test.routeTableName),
 | 
			
		||||
			Location: &cloud.Location,
 | 
			
		||||
			RouteTablePropertiesFormat: &network.RouteTablePropertiesFormat{
 | 
			
		||||
				Routes: test.initialRoute,
 | 
			
		||||
			},
 | 
			
		||||
		}
 | 
			
		||||
		updatedTable := network.RouteTable{
 | 
			
		||||
			Name:     pointer.String(test.routeTableName),
 | 
			
		||||
			Location: &cloud.Location,
 | 
			
		||||
			RouteTablePropertiesFormat: &network.RouteTablePropertiesFormat{
 | 
			
		||||
				Routes: test.updatedRoute,
 | 
			
		||||
			},
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		cloud.RouteTableName = test.routeTableName
 | 
			
		||||
		cloud.ipv6DualStackEnabled = test.ipv6DualStackEnabled
 | 
			
		||||
		if test.hasUnmanagedNodes {
 | 
			
		||||
			cloud.unmanagedNodes.Insert(test.unmanagedNodeName)
 | 
			
		||||
			cloud.routeCIDRs = test.routeCIDRs
 | 
			
		||||
		} else {
 | 
			
		||||
			cloud.unmanagedNodes = sets.NewString()
 | 
			
		||||
			cloud.routeCIDRs = nil
 | 
			
		||||
		}
 | 
			
		||||
		if test.nodeInformerNotSynced {
 | 
			
		||||
			cloud.nodeInformerSynced = func() bool { return false }
 | 
			
		||||
		} else {
 | 
			
		||||
			cloud.nodeInformerSynced = func() bool { return true }
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		mockVMSet.EXPECT().GetIPByNodeName(gomock.Any()).Return(nodePrivateIP, "", test.getIPError).MaxTimes(1)
 | 
			
		||||
		mockVMSet.EXPECT().GetPrivateIPsByNodeName("node").Return([]string{nodePrivateIP, "10.10.10.10"}, nil).MaxTimes(1)
 | 
			
		||||
		routeTableClient.EXPECT().Get(gomock.Any(), cloud.RouteTableResourceGroup, cloud.RouteTableName, "").Return(initialTable, test.getErr).MaxTimes(1)
 | 
			
		||||
		routeTableClient.EXPECT().CreateOrUpdate(gomock.Any(), cloud.RouteTableResourceGroup, cloud.RouteTableName, updatedTable, "").Return(test.createOrUpdateErr).MaxTimes(1)
 | 
			
		||||
 | 
			
		||||
		//Here is the second invocation when route table doesn't exist
 | 
			
		||||
		routeTableClient.EXPECT().Get(gomock.Any(), cloud.RouteTableResourceGroup, cloud.RouteTableName, "").Return(initialTable, test.secondGetErr).MaxTimes(1)
 | 
			
		||||
 | 
			
		||||
		err := cloud.CreateRoute(context.TODO(), "cluster", "unused", &route)
 | 
			
		||||
		assert.Equal(t, cloud.routeCIDRs, test.expectedRouteCIDRs, test.name)
 | 
			
		||||
		assert.Equal(t, test.expectedErrMsg, err, test.name)
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestCreateRouteTable(t *testing.T) {
 | 
			
		||||
	ctrl := gomock.NewController(t)
 | 
			
		||||
	defer ctrl.Finish()
 | 
			
		||||
	routeTableClient := mockroutetableclient.NewMockInterface(ctrl)
 | 
			
		||||
 | 
			
		||||
	cloud := &Cloud{
 | 
			
		||||
		RouteTablesClient: routeTableClient,
 | 
			
		||||
		Config: Config{
 | 
			
		||||
			RouteTableResourceGroup: "foo",
 | 
			
		||||
			RouteTableName:          "bar",
 | 
			
		||||
			Location:                "location",
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
	cache, _ := cloud.newRouteTableCache()
 | 
			
		||||
	cloud.rtCache = cache
 | 
			
		||||
 | 
			
		||||
	expectedTable := network.RouteTable{
 | 
			
		||||
		Name:                       &cloud.RouteTableName,
 | 
			
		||||
		Location:                   &cloud.Location,
 | 
			
		||||
		RouteTablePropertiesFormat: &network.RouteTablePropertiesFormat{},
 | 
			
		||||
	}
 | 
			
		||||
	routeTableClient.EXPECT().CreateOrUpdate(gomock.Any(), cloud.RouteTableResourceGroup, cloud.RouteTableName, expectedTable, "").Return(nil)
 | 
			
		||||
	err := cloud.createRouteTable()
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		t.Errorf("unexpected error in creating route table: %v", err)
 | 
			
		||||
		t.FailNow()
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestProcessRoutes(t *testing.T) {
 | 
			
		||||
	tests := []struct {
 | 
			
		||||
		rt            network.RouteTable
 | 
			
		||||
		exists        bool
 | 
			
		||||
		err           error
 | 
			
		||||
		expectErr     bool
 | 
			
		||||
		expectedError string
 | 
			
		||||
		expectedRoute []cloudprovider.Route
 | 
			
		||||
		name          string
 | 
			
		||||
	}{
 | 
			
		||||
		{
 | 
			
		||||
			err:           fmt.Errorf("test error"),
 | 
			
		||||
			expectErr:     true,
 | 
			
		||||
			expectedError: "test error",
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			exists: false,
 | 
			
		||||
			name:   "doesn't exist",
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			rt:     network.RouteTable{},
 | 
			
		||||
			exists: true,
 | 
			
		||||
			name:   "nil routes",
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			rt: network.RouteTable{
 | 
			
		||||
				RouteTablePropertiesFormat: &network.RouteTablePropertiesFormat{},
 | 
			
		||||
			},
 | 
			
		||||
			exists: true,
 | 
			
		||||
			name:   "no routes",
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			rt: network.RouteTable{
 | 
			
		||||
				RouteTablePropertiesFormat: &network.RouteTablePropertiesFormat{
 | 
			
		||||
					Routes: &[]network.Route{
 | 
			
		||||
						{
 | 
			
		||||
							Name: pointer.String("name"),
 | 
			
		||||
							RoutePropertiesFormat: &network.RoutePropertiesFormat{
 | 
			
		||||
								AddressPrefix: pointer.String("1.2.3.4/16"),
 | 
			
		||||
							},
 | 
			
		||||
						},
 | 
			
		||||
					},
 | 
			
		||||
				},
 | 
			
		||||
			},
 | 
			
		||||
			exists: true,
 | 
			
		||||
			expectedRoute: []cloudprovider.Route{
 | 
			
		||||
				{
 | 
			
		||||
					Name:            "name",
 | 
			
		||||
					TargetNode:      mapRouteNameToNodeName(false, "name"),
 | 
			
		||||
					DestinationCIDR: "1.2.3.4/16",
 | 
			
		||||
				},
 | 
			
		||||
			},
 | 
			
		||||
			name: "one route",
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			rt: network.RouteTable{
 | 
			
		||||
				RouteTablePropertiesFormat: &network.RouteTablePropertiesFormat{
 | 
			
		||||
					Routes: &[]network.Route{
 | 
			
		||||
						{
 | 
			
		||||
							Name: pointer.String("name"),
 | 
			
		||||
							RoutePropertiesFormat: &network.RoutePropertiesFormat{
 | 
			
		||||
								AddressPrefix: pointer.String("1.2.3.4/16"),
 | 
			
		||||
							},
 | 
			
		||||
						},
 | 
			
		||||
						{
 | 
			
		||||
							Name: pointer.String("name2"),
 | 
			
		||||
							RoutePropertiesFormat: &network.RoutePropertiesFormat{
 | 
			
		||||
								AddressPrefix: pointer.String("5.6.7.8/16"),
 | 
			
		||||
							},
 | 
			
		||||
						},
 | 
			
		||||
					},
 | 
			
		||||
				},
 | 
			
		||||
			},
 | 
			
		||||
			exists: true,
 | 
			
		||||
			expectedRoute: []cloudprovider.Route{
 | 
			
		||||
				{
 | 
			
		||||
					Name:            "name",
 | 
			
		||||
					TargetNode:      mapRouteNameToNodeName(false, "name"),
 | 
			
		||||
					DestinationCIDR: "1.2.3.4/16",
 | 
			
		||||
				},
 | 
			
		||||
				{
 | 
			
		||||
					Name:            "name2",
 | 
			
		||||
					TargetNode:      mapRouteNameToNodeName(false, "name2"),
 | 
			
		||||
					DestinationCIDR: "5.6.7.8/16",
 | 
			
		||||
				},
 | 
			
		||||
			},
 | 
			
		||||
			name: "more routes",
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
	for _, test := range tests {
 | 
			
		||||
		routes, err := processRoutes(false, test.rt, test.exists, test.err)
 | 
			
		||||
		if test.expectErr {
 | 
			
		||||
			if err == nil {
 | 
			
		||||
				t.Errorf("%s: unexpected non-error", test.name)
 | 
			
		||||
				continue
 | 
			
		||||
			}
 | 
			
		||||
			if err.Error() != test.expectedError {
 | 
			
		||||
				t.Errorf("%s: Expected error: %v, saw error: %v", test.name, test.expectedError, err.Error())
 | 
			
		||||
				continue
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
		if !test.expectErr && err != nil {
 | 
			
		||||
			t.Errorf("%s; unexpected error: %v", test.name, err)
 | 
			
		||||
			continue
 | 
			
		||||
		}
 | 
			
		||||
		if len(routes) != len(test.expectedRoute) {
 | 
			
		||||
			t.Errorf("%s: Unexpected difference: %#v vs %#v", test.name, routes, test.expectedRoute)
 | 
			
		||||
			continue
 | 
			
		||||
		}
 | 
			
		||||
		for ix := range test.expectedRoute {
 | 
			
		||||
			if !reflect.DeepEqual(test.expectedRoute[ix], *routes[ix]) {
 | 
			
		||||
				t.Errorf("%s: Unexpected difference: %#v vs %#v", test.name, test.expectedRoute[ix], *routes[ix])
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestFindFirstIPByFamily(t *testing.T) {
 | 
			
		||||
	firstIPv4 := "10.0.0.1"
 | 
			
		||||
	firstIPv6 := "2001:1234:5678:9abc::9"
 | 
			
		||||
	testIPs := []string{
 | 
			
		||||
		firstIPv4,
 | 
			
		||||
		"11.0.0.1",
 | 
			
		||||
		firstIPv6,
 | 
			
		||||
		"fda4:6dee:effc:62a0:0:0:0:0",
 | 
			
		||||
	}
 | 
			
		||||
	testCases := []struct {
 | 
			
		||||
		ipv6           bool
 | 
			
		||||
		ips            []string
 | 
			
		||||
		expectedIP     string
 | 
			
		||||
		expectedErrMsg error
 | 
			
		||||
	}{
 | 
			
		||||
		{
 | 
			
		||||
			ipv6:       true,
 | 
			
		||||
			ips:        testIPs,
 | 
			
		||||
			expectedIP: firstIPv6,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			ipv6:       false,
 | 
			
		||||
			ips:        testIPs,
 | 
			
		||||
			expectedIP: firstIPv4,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			ipv6:           true,
 | 
			
		||||
			ips:            []string{"10.0.0.1"},
 | 
			
		||||
			expectedErrMsg: fmt.Errorf("no match found matching the ipfamily requested"),
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
	for _, test := range testCases {
 | 
			
		||||
		ip, err := findFirstIPByFamily(test.ips, test.ipv6)
 | 
			
		||||
		assert.Equal(t, test.expectedErrMsg, err)
 | 
			
		||||
		assert.Equal(t, test.expectedIP, ip)
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestRouteNameFuncs(t *testing.T) {
 | 
			
		||||
	v4CIDR := "10.0.0.1/16"
 | 
			
		||||
	v6CIDR := "fd3e:5f02:6ec0:30ba::/64"
 | 
			
		||||
	nodeName := "thisNode"
 | 
			
		||||
	testCases := []struct {
 | 
			
		||||
		ipv6DualStackEnabled bool
 | 
			
		||||
	}{
 | 
			
		||||
		{
 | 
			
		||||
			ipv6DualStackEnabled: true,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			ipv6DualStackEnabled: false,
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
	for _, test := range testCases {
 | 
			
		||||
		routeName := mapNodeNameToRouteName(test.ipv6DualStackEnabled, types.NodeName(nodeName), v4CIDR)
 | 
			
		||||
		outNodeName := mapRouteNameToNodeName(test.ipv6DualStackEnabled, routeName)
 | 
			
		||||
		assert.Equal(t, string(outNodeName), nodeName)
 | 
			
		||||
 | 
			
		||||
		routeName = mapNodeNameToRouteName(test.ipv6DualStackEnabled, types.NodeName(nodeName), v6CIDR)
 | 
			
		||||
		outNodeName = mapRouteNameToNodeName(test.ipv6DualStackEnabled, routeName)
 | 
			
		||||
		assert.Equal(t, string(outNodeName), nodeName)
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestListRoutes(t *testing.T) {
 | 
			
		||||
	ctrl := gomock.NewController(t)
 | 
			
		||||
	defer ctrl.Finish()
 | 
			
		||||
	routeTableClient := mockroutetableclient.NewMockInterface(ctrl)
 | 
			
		||||
	mockVMSet := mockvmsets.NewMockVMSet(ctrl)
 | 
			
		||||
 | 
			
		||||
	cloud := &Cloud{
 | 
			
		||||
		RouteTablesClient: routeTableClient,
 | 
			
		||||
		VMSet:             mockVMSet,
 | 
			
		||||
		Config: Config{
 | 
			
		||||
			RouteTableResourceGroup: "foo",
 | 
			
		||||
			RouteTableName:          "bar",
 | 
			
		||||
			Location:                "location",
 | 
			
		||||
		},
 | 
			
		||||
		unmanagedNodes:     sets.NewString(),
 | 
			
		||||
		nodeInformerSynced: func() bool { return true },
 | 
			
		||||
	}
 | 
			
		||||
	cache, _ := cloud.newRouteTableCache()
 | 
			
		||||
	cloud.rtCache = cache
 | 
			
		||||
	cloud.routeUpdater = newDelayedRouteUpdater(cloud, 100*time.Millisecond)
 | 
			
		||||
	go cloud.routeUpdater.run()
 | 
			
		||||
 | 
			
		||||
	testCases := []struct {
 | 
			
		||||
		name                  string
 | 
			
		||||
		routeTableName        string
 | 
			
		||||
		routeTable            network.RouteTable
 | 
			
		||||
		hasUnmanagedNodes     bool
 | 
			
		||||
		nodeInformerNotSynced bool
 | 
			
		||||
		unmanagedNodeName     string
 | 
			
		||||
		routeCIDRs            map[string]string
 | 
			
		||||
		expectedRoutes        []*cloudprovider.Route
 | 
			
		||||
		getErr                *retry.Error
 | 
			
		||||
		expectedErrMsg        error
 | 
			
		||||
	}{
 | 
			
		||||
		{
 | 
			
		||||
			name:           "ListRoutes should return correct routes",
 | 
			
		||||
			routeTableName: "rt1",
 | 
			
		||||
			routeTable: network.RouteTable{
 | 
			
		||||
				Name:     pointer.String("rt1"),
 | 
			
		||||
				Location: &cloud.Location,
 | 
			
		||||
				RouteTablePropertiesFormat: &network.RouteTablePropertiesFormat{
 | 
			
		||||
					Routes: &[]network.Route{
 | 
			
		||||
						{
 | 
			
		||||
							Name: pointer.String("node"),
 | 
			
		||||
							RoutePropertiesFormat: &network.RoutePropertiesFormat{
 | 
			
		||||
								AddressPrefix: pointer.String("1.2.3.4/24"),
 | 
			
		||||
							},
 | 
			
		||||
						},
 | 
			
		||||
					},
 | 
			
		||||
				},
 | 
			
		||||
			},
 | 
			
		||||
			expectedRoutes: []*cloudprovider.Route{
 | 
			
		||||
				{
 | 
			
		||||
					Name:            "node",
 | 
			
		||||
					TargetNode:      mapRouteNameToNodeName(false, "node"),
 | 
			
		||||
					DestinationCIDR: "1.2.3.4/24",
 | 
			
		||||
				},
 | 
			
		||||
			},
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			name:              "ListRoutes should return correct routes if there's unmanaged nodes",
 | 
			
		||||
			routeTableName:    "rt2",
 | 
			
		||||
			hasUnmanagedNodes: true,
 | 
			
		||||
			unmanagedNodeName: "unmanaged-node",
 | 
			
		||||
			routeCIDRs:        map[string]string{"unmanaged-node": "2.2.3.4/24"},
 | 
			
		||||
			routeTable: network.RouteTable{
 | 
			
		||||
				Name:     pointer.String("rt2"),
 | 
			
		||||
				Location: &cloud.Location,
 | 
			
		||||
				RouteTablePropertiesFormat: &network.RouteTablePropertiesFormat{
 | 
			
		||||
					Routes: &[]network.Route{
 | 
			
		||||
						{
 | 
			
		||||
							Name: pointer.String("node"),
 | 
			
		||||
							RoutePropertiesFormat: &network.RoutePropertiesFormat{
 | 
			
		||||
								AddressPrefix: pointer.String("1.2.3.4/24"),
 | 
			
		||||
							},
 | 
			
		||||
						},
 | 
			
		||||
					},
 | 
			
		||||
				},
 | 
			
		||||
			},
 | 
			
		||||
			expectedRoutes: []*cloudprovider.Route{
 | 
			
		||||
				{
 | 
			
		||||
					Name:            "node",
 | 
			
		||||
					TargetNode:      mapRouteNameToNodeName(false, "node"),
 | 
			
		||||
					DestinationCIDR: "1.2.3.4/24",
 | 
			
		||||
				},
 | 
			
		||||
				{
 | 
			
		||||
					Name:            "unmanaged-node",
 | 
			
		||||
					TargetNode:      mapRouteNameToNodeName(false, "unmanaged-node"),
 | 
			
		||||
					DestinationCIDR: "2.2.3.4/24",
 | 
			
		||||
				},
 | 
			
		||||
			},
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			name:           "ListRoutes should return nil if routeTable don't exist",
 | 
			
		||||
			routeTableName: "rt3",
 | 
			
		||||
			routeTable:     network.RouteTable{},
 | 
			
		||||
			getErr: &retry.Error{
 | 
			
		||||
				HTTPStatusCode: http.StatusNotFound,
 | 
			
		||||
				RawError:       cloudprovider.InstanceNotFound,
 | 
			
		||||
			},
 | 
			
		||||
			expectedRoutes: []*cloudprovider.Route{},
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			name:           "ListRoutes should report error if error occurs when invoke routeTableClient.Get",
 | 
			
		||||
			routeTableName: "rt4",
 | 
			
		||||
			routeTable:     network.RouteTable{},
 | 
			
		||||
			getErr: &retry.Error{
 | 
			
		||||
				HTTPStatusCode: http.StatusInternalServerError,
 | 
			
		||||
				RawError:       fmt.Errorf("Get error"),
 | 
			
		||||
			},
 | 
			
		||||
			expectedErrMsg: fmt.Errorf("Retriable: false, RetryAfter: 0s, HTTPStatusCode: 500, RawError: %w", fmt.Errorf("Get error")),
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			name:                  "ListRoutes should report error if node informer is not synced",
 | 
			
		||||
			routeTableName:        "rt5",
 | 
			
		||||
			nodeInformerNotSynced: true,
 | 
			
		||||
			routeTable:            network.RouteTable{},
 | 
			
		||||
			expectedErrMsg:        fmt.Errorf("node informer is not synced when trying to GetUnmanagedNodes"),
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	for _, test := range testCases {
 | 
			
		||||
		if test.hasUnmanagedNodes {
 | 
			
		||||
			cloud.unmanagedNodes.Insert(test.unmanagedNodeName)
 | 
			
		||||
			cloud.routeCIDRs = test.routeCIDRs
 | 
			
		||||
		} else {
 | 
			
		||||
			cloud.unmanagedNodes = sets.NewString()
 | 
			
		||||
			cloud.routeCIDRs = nil
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		if test.nodeInformerNotSynced {
 | 
			
		||||
			cloud.nodeInformerSynced = func() bool { return false }
 | 
			
		||||
		} else {
 | 
			
		||||
			cloud.nodeInformerSynced = func() bool { return true }
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		cloud.RouteTableName = test.routeTableName
 | 
			
		||||
		routeTableClient.EXPECT().Get(gomock.Any(), cloud.RouteTableResourceGroup, test.routeTableName, "").Return(test.routeTable, test.getErr)
 | 
			
		||||
 | 
			
		||||
		routes, err := cloud.ListRoutes(context.TODO(), "cluster")
 | 
			
		||||
		assert.Equal(t, test.expectedRoutes, routes, test.name)
 | 
			
		||||
		assert.Equal(t, test.expectedErrMsg, err, test.name)
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestCleanupOutdatedRoutes(t *testing.T) {
 | 
			
		||||
	ctrl := gomock.NewController(t)
 | 
			
		||||
	defer ctrl.Finish()
 | 
			
		||||
 | 
			
		||||
	for _, testCase := range []struct {
 | 
			
		||||
		description                          string
 | 
			
		||||
		existingRoutes, expectedRoutes       []network.Route
 | 
			
		||||
		existingNodeNames                    sets.String
 | 
			
		||||
		expectedChanged, enableIPV6DualStack bool
 | 
			
		||||
	}{
 | 
			
		||||
		{
 | 
			
		||||
			description: "cleanupOutdatedRoutes should delete outdated non-dualstack routes when dualstack is enabled",
 | 
			
		||||
			existingRoutes: []network.Route{
 | 
			
		||||
				{Name: pointer.String("aks-node1-vmss000000____xxx")},
 | 
			
		||||
				{Name: pointer.String("aks-node1-vmss000000")},
 | 
			
		||||
			},
 | 
			
		||||
			expectedRoutes: []network.Route{
 | 
			
		||||
				{Name: pointer.String("aks-node1-vmss000000____xxx")},
 | 
			
		||||
			},
 | 
			
		||||
			existingNodeNames:   sets.NewString("aks-node1-vmss000000"),
 | 
			
		||||
			enableIPV6DualStack: true,
 | 
			
		||||
			expectedChanged:     true,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			description: "cleanupOutdatedRoutes should delete outdated dualstack routes when dualstack is disabled",
 | 
			
		||||
			existingRoutes: []network.Route{
 | 
			
		||||
				{Name: pointer.String("aks-node1-vmss000000____xxx")},
 | 
			
		||||
				{Name: pointer.String("aks-node1-vmss000000")},
 | 
			
		||||
			},
 | 
			
		||||
			expectedRoutes: []network.Route{
 | 
			
		||||
				{Name: pointer.String("aks-node1-vmss000000")},
 | 
			
		||||
			},
 | 
			
		||||
			existingNodeNames: sets.NewString("aks-node1-vmss000000"),
 | 
			
		||||
			expectedChanged:   true,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			description: "cleanupOutdatedRoutes should not delete unmanaged routes when dualstack is enabled",
 | 
			
		||||
			existingRoutes: []network.Route{
 | 
			
		||||
				{Name: pointer.String("aks-node1-vmss000000____xxx")},
 | 
			
		||||
				{Name: pointer.String("aks-node1-vmss000000")},
 | 
			
		||||
			},
 | 
			
		||||
			expectedRoutes: []network.Route{
 | 
			
		||||
				{Name: pointer.String("aks-node1-vmss000000____xxx")},
 | 
			
		||||
				{Name: pointer.String("aks-node1-vmss000000")},
 | 
			
		||||
			},
 | 
			
		||||
			existingNodeNames:   sets.NewString("aks-node1-vmss000001"),
 | 
			
		||||
			enableIPV6DualStack: true,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			description: "cleanupOutdatedRoutes should not delete unmanaged routes when dualstack is disabled",
 | 
			
		||||
			existingRoutes: []network.Route{
 | 
			
		||||
				{Name: pointer.String("aks-node1-vmss000000____xxx")},
 | 
			
		||||
				{Name: pointer.String("aks-node1-vmss000000")},
 | 
			
		||||
			},
 | 
			
		||||
			expectedRoutes: []network.Route{
 | 
			
		||||
				{Name: pointer.String("aks-node1-vmss000000____xxx")},
 | 
			
		||||
				{Name: pointer.String("aks-node1-vmss000000")},
 | 
			
		||||
			},
 | 
			
		||||
			existingNodeNames: sets.NewString("aks-node1-vmss000001"),
 | 
			
		||||
		},
 | 
			
		||||
	} {
 | 
			
		||||
		t.Run(testCase.description, func(t *testing.T) {
 | 
			
		||||
			cloud := &Cloud{
 | 
			
		||||
				ipv6DualStackEnabled: testCase.enableIPV6DualStack,
 | 
			
		||||
				nodeNames:            testCase.existingNodeNames,
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			d := &delayedRouteUpdater{
 | 
			
		||||
				az: cloud,
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			routes, changed := d.cleanupOutdatedRoutes(testCase.existingRoutes)
 | 
			
		||||
			assert.Equal(t, testCase.expectedChanged, changed)
 | 
			
		||||
			assert.Equal(t, testCase.expectedRoutes, routes)
 | 
			
		||||
		})
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestDeleteRouteDualStack(t *testing.T) {
 | 
			
		||||
	ctrl := gomock.NewController(t)
 | 
			
		||||
	defer ctrl.Finish()
 | 
			
		||||
	routeTableClient := mockroutetableclient.NewMockInterface(ctrl)
 | 
			
		||||
 | 
			
		||||
	cloud := &Cloud{
 | 
			
		||||
		RouteTablesClient: routeTableClient,
 | 
			
		||||
		Config: Config{
 | 
			
		||||
			RouteTableResourceGroup: "foo",
 | 
			
		||||
			RouteTableName:          "bar",
 | 
			
		||||
			Location:                "location",
 | 
			
		||||
		},
 | 
			
		||||
		unmanagedNodes:       sets.NewString(),
 | 
			
		||||
		nodeInformerSynced:   func() bool { return true },
 | 
			
		||||
		ipv6DualStackEnabled: true,
 | 
			
		||||
	}
 | 
			
		||||
	cache, _ := cloud.newRouteTableCache()
 | 
			
		||||
	cloud.rtCache = cache
 | 
			
		||||
	cloud.routeUpdater = newDelayedRouteUpdater(cloud, 100*time.Millisecond)
 | 
			
		||||
	go cloud.routeUpdater.run()
 | 
			
		||||
 | 
			
		||||
	route := cloudprovider.Route{
 | 
			
		||||
		TargetNode:      "node",
 | 
			
		||||
		DestinationCIDR: "1.2.3.4/24",
 | 
			
		||||
	}
 | 
			
		||||
	routeName := mapNodeNameToRouteName(true, route.TargetNode, route.DestinationCIDR)
 | 
			
		||||
	routeNameIPV4 := mapNodeNameToRouteName(false, route.TargetNode, route.DestinationCIDR)
 | 
			
		||||
	routeTables := network.RouteTable{
 | 
			
		||||
		Name:     &cloud.RouteTableName,
 | 
			
		||||
		Location: &cloud.Location,
 | 
			
		||||
		RouteTablePropertiesFormat: &network.RouteTablePropertiesFormat{
 | 
			
		||||
			Routes: &[]network.Route{
 | 
			
		||||
				{
 | 
			
		||||
					Name: &routeName,
 | 
			
		||||
				},
 | 
			
		||||
				{
 | 
			
		||||
					Name: &routeNameIPV4,
 | 
			
		||||
				},
 | 
			
		||||
			},
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
	routeTablesAfterFirstDeletion := network.RouteTable{
 | 
			
		||||
		Name:     &cloud.RouteTableName,
 | 
			
		||||
		Location: &cloud.Location,
 | 
			
		||||
		RouteTablePropertiesFormat: &network.RouteTablePropertiesFormat{
 | 
			
		||||
			Routes: &[]network.Route{
 | 
			
		||||
				{
 | 
			
		||||
					Name: &routeNameIPV4,
 | 
			
		||||
				},
 | 
			
		||||
			},
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
	routeTablesAfterSecondDeletion := network.RouteTable{
 | 
			
		||||
		Name:     &cloud.RouteTableName,
 | 
			
		||||
		Location: &cloud.Location,
 | 
			
		||||
		RouteTablePropertiesFormat: &network.RouteTablePropertiesFormat{
 | 
			
		||||
			Routes: &[]network.Route{},
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
	routeTableClient.EXPECT().Get(gomock.Any(), cloud.RouteTableResourceGroup, cloud.RouteTableName, "").Return(routeTables, nil).AnyTimes()
 | 
			
		||||
	routeTableClient.EXPECT().CreateOrUpdate(gomock.Any(), cloud.RouteTableResourceGroup, cloud.RouteTableName, routeTablesAfterFirstDeletion, "").Return(nil)
 | 
			
		||||
	routeTableClient.EXPECT().CreateOrUpdate(gomock.Any(), cloud.RouteTableResourceGroup, cloud.RouteTableName, routeTablesAfterSecondDeletion, "").Return(nil)
 | 
			
		||||
	err := cloud.DeleteRoute(context.TODO(), "cluster", &route)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		t.Errorf("unexpected error deleting route: %v", err)
 | 
			
		||||
		t.FailNow()
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							@@ -1,86 +0,0 @@
 | 
			
		||||
//go:build !providerless
 | 
			
		||||
// +build !providerless
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
Copyright 2016 The Kubernetes Authors.
 | 
			
		||||
 | 
			
		||||
Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
you may not use this file except in compliance with the License.
 | 
			
		||||
You may obtain a copy of the License at
 | 
			
		||||
 | 
			
		||||
    http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
 | 
			
		||||
Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
See the License for the specific language governing permissions and
 | 
			
		||||
limitations under the License.
 | 
			
		||||
*/
 | 
			
		||||
 | 
			
		||||
package azure
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"fmt"
 | 
			
		||||
 | 
			
		||||
	"github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-06-01/storage"
 | 
			
		||||
 | 
			
		||||
	"k8s.io/klog/v2"
 | 
			
		||||
	"k8s.io/legacy-cloud-providers/azure/clients/fileclient"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
const (
 | 
			
		||||
	defaultStorageAccountType      = string(storage.StandardLRS)
 | 
			
		||||
	defaultStorageAccountKind      = storage.StorageV2
 | 
			
		||||
	fileShareAccountNamePrefix     = "f"
 | 
			
		||||
	sharedDiskAccountNamePrefix    = "ds"
 | 
			
		||||
	dedicatedDiskAccountNamePrefix = "dd"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// CreateFileShare creates a file share, using a matching storage account type, account kind, etc.
 | 
			
		||||
// storage account will be created if specified account is not found
 | 
			
		||||
func (az *Cloud) CreateFileShare(accountOptions *AccountOptions, shareOptions *fileclient.ShareOptions) (string, string, error) {
 | 
			
		||||
	if accountOptions == nil {
 | 
			
		||||
		return "", "", fmt.Errorf("account options is nil")
 | 
			
		||||
	}
 | 
			
		||||
	if shareOptions == nil {
 | 
			
		||||
		return "", "", fmt.Errorf("share options is nil")
 | 
			
		||||
	}
 | 
			
		||||
	if accountOptions.ResourceGroup == "" {
 | 
			
		||||
		accountOptions.ResourceGroup = az.resourceGroup
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	accountOptions.EnableHTTPSTrafficOnly = true
 | 
			
		||||
	if shareOptions.Protocol == storage.NFS {
 | 
			
		||||
		accountOptions.EnableHTTPSTrafficOnly = false
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	accountName, accountKey, err := az.EnsureStorageAccount(accountOptions, fileShareAccountNamePrefix)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return "", "", fmt.Errorf("could not get storage key for storage account %s: %v", accountOptions.Name, err)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if err := az.createFileShare(accountOptions.ResourceGroup, accountName, shareOptions); err != nil {
 | 
			
		||||
		return "", "", fmt.Errorf("failed to create share %s in account %s: %v", shareOptions.Name, accountName, err)
 | 
			
		||||
	}
 | 
			
		||||
	klog.V(4).Infof("created share %s in account %s", shareOptions.Name, accountOptions.Name)
 | 
			
		||||
	return accountName, accountKey, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// DeleteFileShare deletes a file share using storage account name and key
 | 
			
		||||
func (az *Cloud) DeleteFileShare(resourceGroup, accountName, shareName string) error {
 | 
			
		||||
	if err := az.deleteFileShare(resourceGroup, accountName, shareName); err != nil {
 | 
			
		||||
		return err
 | 
			
		||||
	}
 | 
			
		||||
	klog.V(4).Infof("share %s deleted", shareName)
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// ResizeFileShare resizes a file share
 | 
			
		||||
func (az *Cloud) ResizeFileShare(resourceGroup, accountName, name string, sizeGiB int) error {
 | 
			
		||||
	return az.resizeFileShare(resourceGroup, accountName, name, sizeGiB)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// GetFileShare gets a file share
 | 
			
		||||
func (az *Cloud) GetFileShare(resourceGroupName, accountName, name string) (storage.FileShare, error) {
 | 
			
		||||
	return az.getFileShare(resourceGroupName, accountName, name)
 | 
			
		||||
}
 | 
			
		||||
@@ -1,315 +0,0 @@
 | 
			
		||||
//go:build !providerless
 | 
			
		||||
// +build !providerless
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
Copyright 2018 The Kubernetes Authors.
 | 
			
		||||
 | 
			
		||||
Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
you may not use this file except in compliance with the License.
 | 
			
		||||
You may obtain a copy of the License at
 | 
			
		||||
 | 
			
		||||
    http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
 | 
			
		||||
Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
See the License for the specific language governing permissions and
 | 
			
		||||
limitations under the License.
 | 
			
		||||
*/
 | 
			
		||||
 | 
			
		||||
package azure
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"testing"
 | 
			
		||||
 | 
			
		||||
	"github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-06-01/storage"
 | 
			
		||||
	"github.com/golang/mock/gomock"
 | 
			
		||||
 | 
			
		||||
	"k8s.io/legacy-cloud-providers/azure/clients/fileclient"
 | 
			
		||||
	"k8s.io/legacy-cloud-providers/azure/clients/fileclient/mockfileclient"
 | 
			
		||||
	"k8s.io/legacy-cloud-providers/azure/clients/storageaccountclient/mockstorageaccountclient"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
func TestCreateFileShare(t *testing.T) {
 | 
			
		||||
	ctrl := gomock.NewController(t)
 | 
			
		||||
	defer ctrl.Finish()
 | 
			
		||||
 | 
			
		||||
	cloud := &Cloud{controllerCommon: &controllerCommon{resourceGroup: "rg"}}
 | 
			
		||||
	name := "baz"
 | 
			
		||||
	sku := "sku"
 | 
			
		||||
	kind := "StorageV2"
 | 
			
		||||
	location := "centralus"
 | 
			
		||||
	value := "foo key"
 | 
			
		||||
	bogus := "bogus"
 | 
			
		||||
 | 
			
		||||
	tests := []struct {
 | 
			
		||||
		rg       string
 | 
			
		||||
		name     string
 | 
			
		||||
		acct     string
 | 
			
		||||
		acctType string
 | 
			
		||||
		acctKind string
 | 
			
		||||
		loc      string
 | 
			
		||||
		gb       int
 | 
			
		||||
		accounts []storage.Account
 | 
			
		||||
		keys     storage.AccountListKeysResult
 | 
			
		||||
		err      error
 | 
			
		||||
 | 
			
		||||
		expectErr  bool
 | 
			
		||||
		expectAcct string
 | 
			
		||||
		expectKey  string
 | 
			
		||||
	}{
 | 
			
		||||
		{
 | 
			
		||||
			name:      "foo",
 | 
			
		||||
			acct:      "bar",
 | 
			
		||||
			acctType:  "type",
 | 
			
		||||
			acctKind:  "StorageV2",
 | 
			
		||||
			loc:       "eastus",
 | 
			
		||||
			gb:        10,
 | 
			
		||||
			expectErr: true,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			name:      "foo",
 | 
			
		||||
			acct:      "",
 | 
			
		||||
			acctType:  "type",
 | 
			
		||||
			acctKind:  "StorageV2",
 | 
			
		||||
			loc:       "eastus",
 | 
			
		||||
			gb:        10,
 | 
			
		||||
			expectErr: true,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			name:     "foo",
 | 
			
		||||
			acct:     "",
 | 
			
		||||
			acctType: sku,
 | 
			
		||||
			acctKind: kind,
 | 
			
		||||
			loc:      location,
 | 
			
		||||
			gb:       10,
 | 
			
		||||
			accounts: []storage.Account{
 | 
			
		||||
				{Name: &name, Sku: &storage.Sku{Name: storage.SkuName(sku)}, Kind: storage.Kind(kind), Location: &location},
 | 
			
		||||
			},
 | 
			
		||||
			keys: storage.AccountListKeysResult{
 | 
			
		||||
				Keys: &[]storage.AccountKey{
 | 
			
		||||
					{Value: &value},
 | 
			
		||||
				},
 | 
			
		||||
			},
 | 
			
		||||
			expectAcct: "baz",
 | 
			
		||||
			expectKey:  "key",
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			rg:       "rg",
 | 
			
		||||
			name:     "foo",
 | 
			
		||||
			acct:     "",
 | 
			
		||||
			acctType: sku,
 | 
			
		||||
			acctKind: kind,
 | 
			
		||||
			loc:      location,
 | 
			
		||||
			gb:       10,
 | 
			
		||||
			accounts: []storage.Account{
 | 
			
		||||
				{Name: &name, Sku: &storage.Sku{Name: storage.SkuName(sku)}, Kind: storage.Kind(kind), Location: &location},
 | 
			
		||||
			},
 | 
			
		||||
			keys: storage.AccountListKeysResult{
 | 
			
		||||
				Keys: &[]storage.AccountKey{
 | 
			
		||||
					{Value: &value},
 | 
			
		||||
				},
 | 
			
		||||
			},
 | 
			
		||||
			err:       fmt.Errorf("create fileshare error"),
 | 
			
		||||
			expectErr: true,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			name:     "foo",
 | 
			
		||||
			acct:     "",
 | 
			
		||||
			acctType: sku,
 | 
			
		||||
			acctKind: kind,
 | 
			
		||||
			loc:      location,
 | 
			
		||||
			gb:       10,
 | 
			
		||||
			accounts: []storage.Account{
 | 
			
		||||
				{Name: &bogus, Sku: &storage.Sku{Name: storage.SkuName(sku)}, Location: &location},
 | 
			
		||||
			},
 | 
			
		||||
			expectErr: true,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			name:     "foo",
 | 
			
		||||
			acct:     "",
 | 
			
		||||
			acctType: sku,
 | 
			
		||||
			acctKind: kind,
 | 
			
		||||
			loc:      location,
 | 
			
		||||
			gb:       10,
 | 
			
		||||
			accounts: []storage.Account{
 | 
			
		||||
				{Name: &name, Sku: &storage.Sku{Name: storage.SkuName(sku)}, Location: &bogus},
 | 
			
		||||
			},
 | 
			
		||||
			expectErr: true,
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	for _, test := range tests {
 | 
			
		||||
		mockFileClient := mockfileclient.NewMockInterface(ctrl)
 | 
			
		||||
		cloud.FileClient = mockFileClient
 | 
			
		||||
		mockFileClient.EXPECT().CreateFileShare(gomock.Any(), gomock.Any(), gomock.Any()).Return(test.err).AnyTimes()
 | 
			
		||||
 | 
			
		||||
		mockStorageAccountsClient := mockstorageaccountclient.NewMockInterface(ctrl)
 | 
			
		||||
		cloud.StorageAccountClient = mockStorageAccountsClient
 | 
			
		||||
		mockStorageAccountsClient.EXPECT().ListKeys(gomock.Any(), "rg", gomock.Any()).Return(test.keys, nil).AnyTimes()
 | 
			
		||||
		mockStorageAccountsClient.EXPECT().ListByResourceGroup(gomock.Any(), "rg").Return(test.accounts, nil).AnyTimes()
 | 
			
		||||
		mockStorageAccountsClient.EXPECT().Create(gomock.Any(), "rg", gomock.Any(), gomock.Any()).Return(nil).AnyTimes()
 | 
			
		||||
 | 
			
		||||
		mockAccount := &AccountOptions{
 | 
			
		||||
			Name:          test.acct,
 | 
			
		||||
			Type:          test.acctType,
 | 
			
		||||
			Kind:          test.acctKind,
 | 
			
		||||
			ResourceGroup: test.rg,
 | 
			
		||||
			Location:      test.loc,
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		mockFileShare := &fileclient.ShareOptions{
 | 
			
		||||
			Name:       test.name,
 | 
			
		||||
			Protocol:   storage.SMB,
 | 
			
		||||
			RequestGiB: test.gb,
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		account, key, err := cloud.CreateFileShare(mockAccount, mockFileShare)
 | 
			
		||||
		if test.expectErr && err == nil {
 | 
			
		||||
			t.Errorf("unexpected non-error")
 | 
			
		||||
			continue
 | 
			
		||||
		}
 | 
			
		||||
		if !test.expectErr && err != nil {
 | 
			
		||||
			t.Errorf("unexpected error: %v", err)
 | 
			
		||||
			continue
 | 
			
		||||
		}
 | 
			
		||||
		if test.expectAcct != account {
 | 
			
		||||
			t.Errorf("Expected: %s, got %s", test.expectAcct, account)
 | 
			
		||||
		}
 | 
			
		||||
		if test.expectKey != key {
 | 
			
		||||
			t.Errorf("Expected: %s, got %s", test.expectKey, key)
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestDeleteFileShare(t *testing.T) {
 | 
			
		||||
	ctrl := gomock.NewController(t)
 | 
			
		||||
	defer ctrl.Finish()
 | 
			
		||||
 | 
			
		||||
	cloud := &Cloud{}
 | 
			
		||||
	tests := []struct {
 | 
			
		||||
		rg   string
 | 
			
		||||
		acct string
 | 
			
		||||
		name string
 | 
			
		||||
 | 
			
		||||
		err       error
 | 
			
		||||
		expectErr bool
 | 
			
		||||
	}{
 | 
			
		||||
		{
 | 
			
		||||
			rg:   "rg",
 | 
			
		||||
			acct: "bar",
 | 
			
		||||
			name: "foo",
 | 
			
		||||
 | 
			
		||||
			expectErr: false,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			rg:   "rg",
 | 
			
		||||
			acct: "bar",
 | 
			
		||||
			name: "",
 | 
			
		||||
 | 
			
		||||
			err:       fmt.Errorf("delete fileshare error"),
 | 
			
		||||
			expectErr: true,
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	for _, test := range tests {
 | 
			
		||||
		mockFileClient := mockfileclient.NewMockInterface(ctrl)
 | 
			
		||||
		cloud.FileClient = mockFileClient
 | 
			
		||||
		mockFileClient.EXPECT().DeleteFileShare(gomock.Any(), gomock.Any(), gomock.Any()).Return(test.err).Times(1)
 | 
			
		||||
 | 
			
		||||
		err := cloud.DeleteFileShare(test.rg, test.acct, test.name)
 | 
			
		||||
		if test.expectErr && err == nil {
 | 
			
		||||
			t.Errorf("unexpected non-error")
 | 
			
		||||
			continue
 | 
			
		||||
		}
 | 
			
		||||
		if !test.expectErr && err != nil {
 | 
			
		||||
			t.Errorf("unexpected error: %v", err)
 | 
			
		||||
			continue
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestResizeFileShare(t *testing.T) {
 | 
			
		||||
	ctrl := gomock.NewController(t)
 | 
			
		||||
	defer ctrl.Finish()
 | 
			
		||||
 | 
			
		||||
	cloud := &Cloud{}
 | 
			
		||||
	mockFileClient := mockfileclient.NewMockInterface(ctrl)
 | 
			
		||||
	mockFileClient.EXPECT().ResizeFileShare(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).AnyTimes()
 | 
			
		||||
	cloud.FileClient = mockFileClient
 | 
			
		||||
 | 
			
		||||
	tests := []struct {
 | 
			
		||||
		rg   string
 | 
			
		||||
		acct string
 | 
			
		||||
		name string
 | 
			
		||||
		gb   int
 | 
			
		||||
 | 
			
		||||
		expectErr bool
 | 
			
		||||
	}{
 | 
			
		||||
		{
 | 
			
		||||
			rg:   "rg",
 | 
			
		||||
			acct: "bar",
 | 
			
		||||
			name: "foo",
 | 
			
		||||
			gb:   10,
 | 
			
		||||
 | 
			
		||||
			expectErr: false,
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	for _, test := range tests {
 | 
			
		||||
		mockStorageAccountsClient := mockstorageaccountclient.NewMockInterface(ctrl)
 | 
			
		||||
		cloud.StorageAccountClient = mockStorageAccountsClient
 | 
			
		||||
 | 
			
		||||
		err := cloud.ResizeFileShare(test.rg, test.acct, test.name, test.gb)
 | 
			
		||||
		if test.expectErr && err == nil {
 | 
			
		||||
			t.Errorf("unexpected non-error")
 | 
			
		||||
			continue
 | 
			
		||||
		}
 | 
			
		||||
		if !test.expectErr && err != nil {
 | 
			
		||||
			t.Errorf("unexpected error: %v", err)
 | 
			
		||||
			continue
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestGetFileShare(t *testing.T) {
 | 
			
		||||
	ctrl := gomock.NewController(t)
 | 
			
		||||
	defer ctrl.Finish()
 | 
			
		||||
 | 
			
		||||
	cloud := &Cloud{}
 | 
			
		||||
	mockFileClient := mockfileclient.NewMockInterface(ctrl)
 | 
			
		||||
	mockFileClient.EXPECT().GetFileShare(gomock.Any(), gomock.Any(), gomock.Any()).Return(storage.FileShare{}, nil).AnyTimes()
 | 
			
		||||
	cloud.FileClient = mockFileClient
 | 
			
		||||
 | 
			
		||||
	tests := []struct {
 | 
			
		||||
		rg   string
 | 
			
		||||
		acct string
 | 
			
		||||
		name string
 | 
			
		||||
 | 
			
		||||
		expectErr bool
 | 
			
		||||
	}{
 | 
			
		||||
		{
 | 
			
		||||
			rg:   "rg",
 | 
			
		||||
			acct: "bar",
 | 
			
		||||
			name: "foo",
 | 
			
		||||
 | 
			
		||||
			expectErr: false,
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	for _, test := range tests {
 | 
			
		||||
		mockStorageAccountsClient := mockstorageaccountclient.NewMockInterface(ctrl)
 | 
			
		||||
		cloud.StorageAccountClient = mockStorageAccountsClient
 | 
			
		||||
 | 
			
		||||
		_, err := cloud.GetFileShare(test.rg, test.acct, test.name)
 | 
			
		||||
		if test.expectErr && err == nil {
 | 
			
		||||
			t.Errorf("unexpected non-error")
 | 
			
		||||
			continue
 | 
			
		||||
		}
 | 
			
		||||
		if !test.expectErr && err != nil {
 | 
			
		||||
			t.Errorf("unexpected error: %v", err)
 | 
			
		||||
			continue
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
@@ -1,214 +0,0 @@
 | 
			
		||||
//go:build !providerless
 | 
			
		||||
// +build !providerless
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
Copyright 2016 The Kubernetes Authors.
 | 
			
		||||
 | 
			
		||||
Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
you may not use this file except in compliance with the License.
 | 
			
		||||
You may obtain a copy of the License at
 | 
			
		||||
 | 
			
		||||
    http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
 | 
			
		||||
Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
See the License for the specific language governing permissions and
 | 
			
		||||
limitations under the License.
 | 
			
		||||
*/
 | 
			
		||||
 | 
			
		||||
package azure
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"strings"
 | 
			
		||||
 | 
			
		||||
	"github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-06-01/storage"
 | 
			
		||||
 | 
			
		||||
	"k8s.io/klog/v2"
 | 
			
		||||
	"k8s.io/utils/pointer"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// AccountOptions contains the fields which are used to create storage account.
 | 
			
		||||
type AccountOptions struct {
 | 
			
		||||
	Name, Type, Kind, ResourceGroup, Location string
 | 
			
		||||
	EnableHTTPSTrafficOnly                    bool
 | 
			
		||||
	Tags                                      map[string]string
 | 
			
		||||
	VirtualNetworkResourceIDs                 []string
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type accountWithLocation struct {
 | 
			
		||||
	Name, StorageType, Location string
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// getStorageAccounts get matching storage accounts
 | 
			
		||||
func (az *Cloud) getStorageAccounts(accountOptions *AccountOptions) ([]accountWithLocation, error) {
 | 
			
		||||
	ctx, cancel := getContextWithCancel()
 | 
			
		||||
	defer cancel()
 | 
			
		||||
	result, rerr := az.StorageAccountClient.ListByResourceGroup(ctx, accountOptions.ResourceGroup)
 | 
			
		||||
	if rerr != nil {
 | 
			
		||||
		return nil, rerr.Error()
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	accounts := []accountWithLocation{}
 | 
			
		||||
	for _, acct := range result {
 | 
			
		||||
		if acct.Name != nil && acct.Location != nil && acct.Sku != nil {
 | 
			
		||||
			storageType := string((*acct.Sku).Name)
 | 
			
		||||
			if accountOptions.Type != "" && !strings.EqualFold(accountOptions.Type, storageType) {
 | 
			
		||||
				continue
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			if accountOptions.Kind != "" && !strings.EqualFold(accountOptions.Kind, string(acct.Kind)) {
 | 
			
		||||
				continue
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			location := *acct.Location
 | 
			
		||||
			if accountOptions.Location != "" && !strings.EqualFold(accountOptions.Location, location) {
 | 
			
		||||
				continue
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			if len(accountOptions.VirtualNetworkResourceIDs) > 0 {
 | 
			
		||||
				if acct.AccountProperties == nil || acct.AccountProperties.NetworkRuleSet == nil ||
 | 
			
		||||
					acct.AccountProperties.NetworkRuleSet.VirtualNetworkRules == nil {
 | 
			
		||||
					continue
 | 
			
		||||
				}
 | 
			
		||||
 | 
			
		||||
				found := false
 | 
			
		||||
				for _, subnetID := range accountOptions.VirtualNetworkResourceIDs {
 | 
			
		||||
					for _, rule := range *acct.AccountProperties.NetworkRuleSet.VirtualNetworkRules {
 | 
			
		||||
						if strings.EqualFold(pointer.StringDeref(rule.VirtualNetworkResourceID, ""), subnetID) && rule.Action == storage.Allow {
 | 
			
		||||
							found = true
 | 
			
		||||
							break
 | 
			
		||||
						}
 | 
			
		||||
					}
 | 
			
		||||
				}
 | 
			
		||||
				if !found {
 | 
			
		||||
					continue
 | 
			
		||||
				}
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			accounts = append(accounts, accountWithLocation{Name: *acct.Name, StorageType: storageType, Location: location})
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return accounts, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// GetStorageAccesskey gets the storage account access key
 | 
			
		||||
func (az *Cloud) GetStorageAccesskey(account, resourceGroup string) (string, error) {
 | 
			
		||||
	ctx, cancel := getContextWithCancel()
 | 
			
		||||
	defer cancel()
 | 
			
		||||
 | 
			
		||||
	result, rerr := az.StorageAccountClient.ListKeys(ctx, resourceGroup, account)
 | 
			
		||||
	if rerr != nil {
 | 
			
		||||
		return "", rerr.Error()
 | 
			
		||||
	}
 | 
			
		||||
	if result.Keys == nil {
 | 
			
		||||
		return "", fmt.Errorf("empty keys")
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	for _, k := range *result.Keys {
 | 
			
		||||
		if k.Value != nil && *k.Value != "" {
 | 
			
		||||
			v := *k.Value
 | 
			
		||||
			if ind := strings.LastIndex(v, " "); ind >= 0 {
 | 
			
		||||
				v = v[(ind + 1):]
 | 
			
		||||
			}
 | 
			
		||||
			return v, nil
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	return "", fmt.Errorf("no valid keys")
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// EnsureStorageAccount search storage account, create one storage account(with genAccountNamePrefix) if not found, return accountName, accountKey
 | 
			
		||||
func (az *Cloud) EnsureStorageAccount(accountOptions *AccountOptions, genAccountNamePrefix string) (string, string, error) {
 | 
			
		||||
	if accountOptions == nil {
 | 
			
		||||
		return "", "", fmt.Errorf("account options is nil")
 | 
			
		||||
	}
 | 
			
		||||
	accountName := accountOptions.Name
 | 
			
		||||
	accountType := accountOptions.Type
 | 
			
		||||
	accountKind := accountOptions.Kind
 | 
			
		||||
	resourceGroup := accountOptions.ResourceGroup
 | 
			
		||||
	location := accountOptions.Location
 | 
			
		||||
	enableHTTPSTrafficOnly := accountOptions.EnableHTTPSTrafficOnly
 | 
			
		||||
 | 
			
		||||
	if len(accountName) == 0 {
 | 
			
		||||
		// find a storage account that matches accountType
 | 
			
		||||
		accounts, err := az.getStorageAccounts(accountOptions)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			return "", "", fmt.Errorf("could not list storage accounts for account type %s: %v", accountType, err)
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		if len(accounts) > 0 {
 | 
			
		||||
			accountName = accounts[0].Name
 | 
			
		||||
			klog.V(4).Infof("found a matching account %s type %s location %s", accounts[0].Name, accounts[0].StorageType, accounts[0].Location)
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		if len(accountName) == 0 {
 | 
			
		||||
			// set network rules for storage account
 | 
			
		||||
			var networkRuleSet *storage.NetworkRuleSet
 | 
			
		||||
			virtualNetworkRules := []storage.VirtualNetworkRule{}
 | 
			
		||||
			for _, subnetID := range accountOptions.VirtualNetworkResourceIDs {
 | 
			
		||||
				vnetRule := storage.VirtualNetworkRule{
 | 
			
		||||
					VirtualNetworkResourceID: &subnetID,
 | 
			
		||||
					Action:                   storage.Allow,
 | 
			
		||||
				}
 | 
			
		||||
				virtualNetworkRules = append(virtualNetworkRules, vnetRule)
 | 
			
		||||
				klog.V(4).Infof("subnetID(%s) has been set", subnetID)
 | 
			
		||||
			}
 | 
			
		||||
			if len(virtualNetworkRules) > 0 {
 | 
			
		||||
				networkRuleSet = &storage.NetworkRuleSet{
 | 
			
		||||
					VirtualNetworkRules: &virtualNetworkRules,
 | 
			
		||||
					DefaultAction:       storage.DefaultActionDeny,
 | 
			
		||||
				}
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			// not found a matching account, now create a new account in current resource group
 | 
			
		||||
			accountName = generateStorageAccountName(genAccountNamePrefix)
 | 
			
		||||
			if location == "" {
 | 
			
		||||
				location = az.Location
 | 
			
		||||
			}
 | 
			
		||||
			if accountType == "" {
 | 
			
		||||
				accountType = defaultStorageAccountType
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			// use StorageV2 by default per https://docs.microsoft.com/en-us/azure/storage/common/storage-account-options
 | 
			
		||||
			kind := defaultStorageAccountKind
 | 
			
		||||
			if accountKind != "" {
 | 
			
		||||
				kind = storage.Kind(accountKind)
 | 
			
		||||
			}
 | 
			
		||||
			if len(accountOptions.Tags) == 0 {
 | 
			
		||||
				accountOptions.Tags = make(map[string]string)
 | 
			
		||||
			}
 | 
			
		||||
			accountOptions.Tags["created-by"] = "azure"
 | 
			
		||||
			tags := convertMapToMapPointer(accountOptions.Tags)
 | 
			
		||||
 | 
			
		||||
			klog.V(2).Infof("azure - no matching account found, begin to create a new account %s in resource group %s, location: %s, accountType: %s, accountKind: %s, tags: %+v",
 | 
			
		||||
				accountName, resourceGroup, location, accountType, kind, accountOptions.Tags)
 | 
			
		||||
 | 
			
		||||
			cp := storage.AccountCreateParameters{
 | 
			
		||||
				Sku:  &storage.Sku{Name: storage.SkuName(accountType)},
 | 
			
		||||
				Kind: kind,
 | 
			
		||||
				AccountPropertiesCreateParameters: &storage.AccountPropertiesCreateParameters{
 | 
			
		||||
					EnableHTTPSTrafficOnly: &enableHTTPSTrafficOnly,
 | 
			
		||||
					NetworkRuleSet:         networkRuleSet,
 | 
			
		||||
				},
 | 
			
		||||
				Tags:     tags,
 | 
			
		||||
				Location: &location}
 | 
			
		||||
 | 
			
		||||
			ctx, cancel := getContextWithCancel()
 | 
			
		||||
			defer cancel()
 | 
			
		||||
			rerr := az.StorageAccountClient.Create(ctx, resourceGroup, accountName, cp)
 | 
			
		||||
			if rerr != nil {
 | 
			
		||||
				return "", "", fmt.Errorf("failed to create storage account %s, error: %v", accountName, rerr)
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// find the access key with this account
 | 
			
		||||
	accountKey, err := az.GetStorageAccesskey(accountName, resourceGroup)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return "", "", fmt.Errorf("could not get storage key for storage account %s: %v", accountName, err)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return accountName, accountKey, nil
 | 
			
		||||
}
 | 
			
		||||
@@ -1,303 +0,0 @@
 | 
			
		||||
//go:build !providerless
 | 
			
		||||
// +build !providerless
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
Copyright 2017 The Kubernetes Authors.
 | 
			
		||||
 | 
			
		||||
Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
you may not use this file except in compliance with the License.
 | 
			
		||||
You may obtain a copy of the License at
 | 
			
		||||
 | 
			
		||||
    http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
 | 
			
		||||
Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
See the License for the specific language governing permissions and
 | 
			
		||||
limitations under the License.
 | 
			
		||||
*/
 | 
			
		||||
 | 
			
		||||
package azure
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"testing"
 | 
			
		||||
 | 
			
		||||
	"k8s.io/legacy-cloud-providers/azure/clients/storageaccountclient/mockstorageaccountclient"
 | 
			
		||||
 | 
			
		||||
	"github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-06-01/storage"
 | 
			
		||||
	"github.com/golang/mock/gomock"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
func TestGetStorageAccessKeys(t *testing.T) {
 | 
			
		||||
	ctrl := gomock.NewController(t)
 | 
			
		||||
	defer ctrl.Finish()
 | 
			
		||||
 | 
			
		||||
	cloud := &Cloud{}
 | 
			
		||||
	value := "foo bar"
 | 
			
		||||
 | 
			
		||||
	tests := []struct {
 | 
			
		||||
		results     storage.AccountListKeysResult
 | 
			
		||||
		expectedKey string
 | 
			
		||||
		expectErr   bool
 | 
			
		||||
		err         error
 | 
			
		||||
	}{
 | 
			
		||||
		{storage.AccountListKeysResult{}, "", true, nil},
 | 
			
		||||
		{
 | 
			
		||||
			storage.AccountListKeysResult{
 | 
			
		||||
				Keys: &[]storage.AccountKey{
 | 
			
		||||
					{Value: &value},
 | 
			
		||||
				},
 | 
			
		||||
			},
 | 
			
		||||
			"bar",
 | 
			
		||||
			false,
 | 
			
		||||
			nil,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			storage.AccountListKeysResult{
 | 
			
		||||
				Keys: &[]storage.AccountKey{
 | 
			
		||||
					{},
 | 
			
		||||
					{Value: &value},
 | 
			
		||||
				},
 | 
			
		||||
			},
 | 
			
		||||
			"bar",
 | 
			
		||||
			false,
 | 
			
		||||
			nil,
 | 
			
		||||
		},
 | 
			
		||||
		{storage.AccountListKeysResult{}, "", true, fmt.Errorf("test error")},
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	for _, test := range tests {
 | 
			
		||||
		mockStorageAccountsClient := mockstorageaccountclient.NewMockInterface(ctrl)
 | 
			
		||||
		cloud.StorageAccountClient = mockStorageAccountsClient
 | 
			
		||||
		mockStorageAccountsClient.EXPECT().ListKeys(gomock.Any(), "rg", gomock.Any()).Return(test.results, nil).AnyTimes()
 | 
			
		||||
		key, err := cloud.GetStorageAccesskey("acct", "rg")
 | 
			
		||||
		if test.expectErr && err == nil {
 | 
			
		||||
			t.Errorf("Unexpected non-error")
 | 
			
		||||
			continue
 | 
			
		||||
		}
 | 
			
		||||
		if !test.expectErr && err != nil {
 | 
			
		||||
			t.Errorf("Unexpected error: %v", err)
 | 
			
		||||
			continue
 | 
			
		||||
		}
 | 
			
		||||
		if key != test.expectedKey {
 | 
			
		||||
			t.Errorf("expected: %s, saw %s", test.expectedKey, key)
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestGetStorageAccount(t *testing.T) {
 | 
			
		||||
	ctrl := gomock.NewController(t)
 | 
			
		||||
	defer ctrl.Finish()
 | 
			
		||||
 | 
			
		||||
	cloud := &Cloud{}
 | 
			
		||||
 | 
			
		||||
	name := "testAccount"
 | 
			
		||||
	location := "testLocation"
 | 
			
		||||
	networkID := "networkID"
 | 
			
		||||
	accountProperties := storage.AccountProperties{
 | 
			
		||||
		NetworkRuleSet: &storage.NetworkRuleSet{
 | 
			
		||||
			VirtualNetworkRules: &[]storage.VirtualNetworkRule{
 | 
			
		||||
				{
 | 
			
		||||
					VirtualNetworkResourceID: &networkID,
 | 
			
		||||
					Action:                   storage.Allow,
 | 
			
		||||
					State:                    "state",
 | 
			
		||||
				},
 | 
			
		||||
			},
 | 
			
		||||
		}}
 | 
			
		||||
 | 
			
		||||
	account := storage.Account{
 | 
			
		||||
		Sku: &storage.Sku{
 | 
			
		||||
			Name: "testSku",
 | 
			
		||||
			Tier: "testSkuTier",
 | 
			
		||||
		},
 | 
			
		||||
		Kind:              "testKind",
 | 
			
		||||
		Location:          &location,
 | 
			
		||||
		Name:              &name,
 | 
			
		||||
		AccountProperties: &accountProperties,
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	testResourceGroups := []storage.Account{account}
 | 
			
		||||
 | 
			
		||||
	accountOptions := &AccountOptions{
 | 
			
		||||
		ResourceGroup:             "rg",
 | 
			
		||||
		VirtualNetworkResourceIDs: []string{networkID},
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	mockStorageAccountsClient := mockstorageaccountclient.NewMockInterface(ctrl)
 | 
			
		||||
	cloud.StorageAccountClient = mockStorageAccountsClient
 | 
			
		||||
 | 
			
		||||
	mockStorageAccountsClient.EXPECT().ListByResourceGroup(gomock.Any(), "rg").Return(testResourceGroups, nil).Times(1)
 | 
			
		||||
 | 
			
		||||
	accountsWithLocations, err := cloud.getStorageAccounts(accountOptions)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		t.Errorf("unexpected error: %v", err)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if accountsWithLocations == nil {
 | 
			
		||||
		t.Error("unexpected error as returned accounts are nil")
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if len(accountsWithLocations) == 0 {
 | 
			
		||||
		t.Error("unexpected error as returned accounts slice is empty")
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	expectedAccountWithLocation := accountWithLocation{
 | 
			
		||||
		Name:        "testAccount",
 | 
			
		||||
		StorageType: "testSku",
 | 
			
		||||
		Location:    "testLocation",
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	accountWithLocation := accountsWithLocations[0]
 | 
			
		||||
	if accountWithLocation.Name != expectedAccountWithLocation.Name {
 | 
			
		||||
		t.Errorf("expected %s, but was %s", accountWithLocation.Name, expectedAccountWithLocation.Name)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if accountWithLocation.StorageType != expectedAccountWithLocation.StorageType {
 | 
			
		||||
		t.Errorf("expected %s, but was %s", accountWithLocation.StorageType, expectedAccountWithLocation.StorageType)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if accountWithLocation.Location != expectedAccountWithLocation.Location {
 | 
			
		||||
		t.Errorf("expected %s, but was %s", accountWithLocation.Location, expectedAccountWithLocation.Location)
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestGetStorageAccountEdgeCases(t *testing.T) {
 | 
			
		||||
	ctrl := gomock.NewController(t)
 | 
			
		||||
	defer ctrl.Finish()
 | 
			
		||||
 | 
			
		||||
	cloud := &Cloud{}
 | 
			
		||||
 | 
			
		||||
	// default account with name, location, sku, kind
 | 
			
		||||
	name := "testAccount"
 | 
			
		||||
	location := "testLocation"
 | 
			
		||||
	sku := &storage.Sku{
 | 
			
		||||
		Name: "testSku",
 | 
			
		||||
		Tier: "testSkuTier",
 | 
			
		||||
	}
 | 
			
		||||
	account := storage.Account{
 | 
			
		||||
		Sku:      sku,
 | 
			
		||||
		Kind:     "testKind",
 | 
			
		||||
		Location: &location,
 | 
			
		||||
		Name:     &name,
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	accountPropertiesWithoutNetworkRuleSet := storage.AccountProperties{NetworkRuleSet: nil}
 | 
			
		||||
	accountPropertiesWithoutVirtualNetworkRules := storage.AccountProperties{
 | 
			
		||||
		NetworkRuleSet: &storage.NetworkRuleSet{
 | 
			
		||||
			VirtualNetworkRules: nil,
 | 
			
		||||
		}}
 | 
			
		||||
 | 
			
		||||
	tests := []struct {
 | 
			
		||||
		testCase           string
 | 
			
		||||
		testAccountOptions *AccountOptions
 | 
			
		||||
		testResourceGroups []storage.Account
 | 
			
		||||
		expectedResult     []accountWithLocation
 | 
			
		||||
		expectedError      error
 | 
			
		||||
	}{
 | 
			
		||||
		{
 | 
			
		||||
			testCase: "account name is nil",
 | 
			
		||||
			testAccountOptions: &AccountOptions{
 | 
			
		||||
				ResourceGroup: "rg",
 | 
			
		||||
			},
 | 
			
		||||
			testResourceGroups: []storage.Account{},
 | 
			
		||||
			expectedResult:     []accountWithLocation{},
 | 
			
		||||
			expectedError:      nil,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			testCase: "account location is nil",
 | 
			
		||||
			testAccountOptions: &AccountOptions{
 | 
			
		||||
				ResourceGroup: "rg",
 | 
			
		||||
			},
 | 
			
		||||
			testResourceGroups: []storage.Account{{Name: &name}},
 | 
			
		||||
			expectedResult:     []accountWithLocation{},
 | 
			
		||||
			expectedError:      nil,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			testCase: "account sku is nil",
 | 
			
		||||
			testAccountOptions: &AccountOptions{
 | 
			
		||||
				ResourceGroup: "rg",
 | 
			
		||||
			},
 | 
			
		||||
			testResourceGroups: []storage.Account{{Name: &name, Location: &location}},
 | 
			
		||||
			expectedResult:     []accountWithLocation{},
 | 
			
		||||
			expectedError:      nil,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			testCase: "account options type is not empty and not equal account storage type",
 | 
			
		||||
			testAccountOptions: &AccountOptions{
 | 
			
		||||
				ResourceGroup: "rg",
 | 
			
		||||
				Type:          "testAccountOptionsType",
 | 
			
		||||
			},
 | 
			
		||||
			testResourceGroups: []storage.Account{account},
 | 
			
		||||
			expectedResult:     []accountWithLocation{},
 | 
			
		||||
			expectedError:      nil,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			testCase: "account options kind is not empty and not equal account type",
 | 
			
		||||
			testAccountOptions: &AccountOptions{
 | 
			
		||||
				ResourceGroup: "rg",
 | 
			
		||||
				Kind:          "testAccountOptionsKind",
 | 
			
		||||
			},
 | 
			
		||||
			testResourceGroups: []storage.Account{account},
 | 
			
		||||
			expectedResult:     []accountWithLocation{},
 | 
			
		||||
			expectedError:      nil,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			testCase: "account options location is not empty and not equal account location",
 | 
			
		||||
			testAccountOptions: &AccountOptions{
 | 
			
		||||
				ResourceGroup: "rg",
 | 
			
		||||
				Location:      "testAccountOptionsLocation",
 | 
			
		||||
			},
 | 
			
		||||
			testResourceGroups: []storage.Account{account},
 | 
			
		||||
			expectedResult:     []accountWithLocation{},
 | 
			
		||||
			expectedError:      nil,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			testCase: "account options account properties are nil",
 | 
			
		||||
			testAccountOptions: &AccountOptions{
 | 
			
		||||
				ResourceGroup:             "rg",
 | 
			
		||||
				VirtualNetworkResourceIDs: []string{"id"},
 | 
			
		||||
			},
 | 
			
		||||
			testResourceGroups: []storage.Account{},
 | 
			
		||||
			expectedResult:     []accountWithLocation{},
 | 
			
		||||
			expectedError:      nil,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			testCase: "account options account properties network rule set is nil",
 | 
			
		||||
			testAccountOptions: &AccountOptions{
 | 
			
		||||
				ResourceGroup:             "rg",
 | 
			
		||||
				VirtualNetworkResourceIDs: []string{"id"},
 | 
			
		||||
			},
 | 
			
		||||
			testResourceGroups: []storage.Account{{Name: &name, Kind: "kind", Location: &location, Sku: sku, AccountProperties: &accountPropertiesWithoutNetworkRuleSet}},
 | 
			
		||||
			expectedResult:     []accountWithLocation{},
 | 
			
		||||
			expectedError:      nil,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			testCase: "account options account properties virtual network rule is nil",
 | 
			
		||||
			testAccountOptions: &AccountOptions{
 | 
			
		||||
				ResourceGroup:             "rg",
 | 
			
		||||
				VirtualNetworkResourceIDs: []string{"id"},
 | 
			
		||||
			},
 | 
			
		||||
			testResourceGroups: []storage.Account{{Name: &name, Kind: "kind", Location: &location, Sku: sku, AccountProperties: &accountPropertiesWithoutVirtualNetworkRules}},
 | 
			
		||||
			expectedResult:     []accountWithLocation{},
 | 
			
		||||
			expectedError:      nil,
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	for _, test := range tests {
 | 
			
		||||
		t.Logf("running test case: %s", test.testCase)
 | 
			
		||||
		mockStorageAccountsClient := mockstorageaccountclient.NewMockInterface(ctrl)
 | 
			
		||||
		cloud.StorageAccountClient = mockStorageAccountsClient
 | 
			
		||||
 | 
			
		||||
		mockStorageAccountsClient.EXPECT().ListByResourceGroup(gomock.Any(), "rg").Return(test.testResourceGroups, nil).AnyTimes()
 | 
			
		||||
 | 
			
		||||
		accountsWithLocations, err := cloud.getStorageAccounts(test.testAccountOptions)
 | 
			
		||||
		if err != test.expectedError {
 | 
			
		||||
			t.Errorf("unexpected error: %v", err)
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		if len(accountsWithLocations) != len(test.expectedResult) {
 | 
			
		||||
			t.Error("unexpected error as returned accounts slice is not empty")
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							@@ -1,165 +0,0 @@
 | 
			
		||||
//go:build !providerless
 | 
			
		||||
// +build !providerless
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
Copyright 2019 The Kubernetes Authors.
 | 
			
		||||
 | 
			
		||||
Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
you may not use this file except in compliance with the License.
 | 
			
		||||
You may obtain a copy of the License at
 | 
			
		||||
 | 
			
		||||
    http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
 | 
			
		||||
Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
See the License for the specific language governing permissions and
 | 
			
		||||
limitations under the License.
 | 
			
		||||
*/
 | 
			
		||||
 | 
			
		||||
package azure
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"context"
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"strings"
 | 
			
		||||
	"sync"
 | 
			
		||||
 | 
			
		||||
	"k8s.io/klog/v2"
 | 
			
		||||
	"k8s.io/utils/pointer"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
const (
 | 
			
		||||
	tagsDelimiter        = ","
 | 
			
		||||
	tagKeyValueDelimiter = "="
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// lockMap used to lock on entries
 | 
			
		||||
type lockMap struct {
 | 
			
		||||
	sync.Mutex
 | 
			
		||||
	mutexMap map[string]*sync.Mutex
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// NewLockMap returns a new lock map
 | 
			
		||||
func newLockMap() *lockMap {
 | 
			
		||||
	return &lockMap{
 | 
			
		||||
		mutexMap: make(map[string]*sync.Mutex),
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// LockEntry acquires a lock associated with the specific entry
 | 
			
		||||
func (lm *lockMap) LockEntry(entry string) {
 | 
			
		||||
	lm.Lock()
 | 
			
		||||
	// check if entry does not exists, then add entry
 | 
			
		||||
	if _, exists := lm.mutexMap[entry]; !exists {
 | 
			
		||||
		lm.addEntry(entry)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	lm.Unlock()
 | 
			
		||||
	lm.lockEntry(entry)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// UnlockEntry release the lock associated with the specific entry
 | 
			
		||||
func (lm *lockMap) UnlockEntry(entry string) {
 | 
			
		||||
	lm.Lock()
 | 
			
		||||
	defer lm.Unlock()
 | 
			
		||||
 | 
			
		||||
	if _, exists := lm.mutexMap[entry]; !exists {
 | 
			
		||||
		return
 | 
			
		||||
	}
 | 
			
		||||
	lm.unlockEntry(entry)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (lm *lockMap) addEntry(entry string) {
 | 
			
		||||
	lm.mutexMap[entry] = &sync.Mutex{}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (lm *lockMap) lockEntry(entry string) {
 | 
			
		||||
	lm.mutexMap[entry].Lock()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (lm *lockMap) unlockEntry(entry string) {
 | 
			
		||||
	lm.mutexMap[entry].Unlock()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func getContextWithCancel() (context.Context, context.CancelFunc) {
 | 
			
		||||
	return context.WithCancel(context.Background())
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// ConvertTagsToMap convert the tags from string to map
 | 
			
		||||
// the valid tags format is "key1=value1,key2=value2", which could be converted to
 | 
			
		||||
// {"key1": "value1", "key2": "value2"}
 | 
			
		||||
func ConvertTagsToMap(tags string) (map[string]string, error) {
 | 
			
		||||
	m := make(map[string]string)
 | 
			
		||||
	if tags == "" {
 | 
			
		||||
		return m, nil
 | 
			
		||||
	}
 | 
			
		||||
	s := strings.Split(tags, tagsDelimiter)
 | 
			
		||||
	for _, tag := range s {
 | 
			
		||||
		kv := strings.Split(tag, tagKeyValueDelimiter)
 | 
			
		||||
		if len(kv) != 2 {
 | 
			
		||||
			return nil, fmt.Errorf("Tags '%s' are invalid, the format should like: 'key1=value1,key2=value2'", tags)
 | 
			
		||||
		}
 | 
			
		||||
		key := strings.TrimSpace(kv[0])
 | 
			
		||||
		if key == "" {
 | 
			
		||||
			return nil, fmt.Errorf("Tags '%s' are invalid, the format should like: 'key1=value1,key2=value2'", tags)
 | 
			
		||||
		}
 | 
			
		||||
		value := strings.TrimSpace(kv[1])
 | 
			
		||||
		m[key] = value
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return m, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func convertMapToMapPointer(origin map[string]string) map[string]*string {
 | 
			
		||||
	newly := make(map[string]*string)
 | 
			
		||||
	for k, v := range origin {
 | 
			
		||||
		value := v
 | 
			
		||||
		newly[k] = &value
 | 
			
		||||
	}
 | 
			
		||||
	return newly
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func parseTags(tags string) map[string]*string {
 | 
			
		||||
	kvs := strings.Split(tags, ",")
 | 
			
		||||
	formatted := make(map[string]*string)
 | 
			
		||||
	for _, kv := range kvs {
 | 
			
		||||
		res := strings.Split(kv, "=")
 | 
			
		||||
		if len(res) != 2 {
 | 
			
		||||
			klog.Warningf("parseTags: error when parsing key-value pair %s, would ignore this one", kv)
 | 
			
		||||
			continue
 | 
			
		||||
		}
 | 
			
		||||
		k, v := strings.TrimSpace(res[0]), strings.TrimSpace(res[1])
 | 
			
		||||
		if k == "" || v == "" {
 | 
			
		||||
			klog.Warningf("parseTags: error when parsing key-value pair %s-%s, would ignore this one", k, v)
 | 
			
		||||
			continue
 | 
			
		||||
		}
 | 
			
		||||
		formatted[strings.ToLower(k)] = pointer.String(v)
 | 
			
		||||
	}
 | 
			
		||||
	return formatted
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func findKeyInMapCaseInsensitive(targetMap map[string]*string, key string) (bool, string) {
 | 
			
		||||
	for k := range targetMap {
 | 
			
		||||
		if strings.EqualFold(k, key) {
 | 
			
		||||
			return true, k
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return false, ""
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func reconcileTags(currentTagsOnResource, newTags map[string]*string) (reconciledTags map[string]*string, changed bool) {
 | 
			
		||||
	for k, v := range newTags {
 | 
			
		||||
		found, key := findKeyInMapCaseInsensitive(currentTagsOnResource, k)
 | 
			
		||||
		if !found {
 | 
			
		||||
			currentTagsOnResource[k] = v
 | 
			
		||||
			changed = true
 | 
			
		||||
		} else if !strings.EqualFold(pointer.StringDeref(v, ""), pointer.StringDeref(currentTagsOnResource[key], "")) {
 | 
			
		||||
			currentTagsOnResource[key] = v
 | 
			
		||||
			changed = true
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return currentTagsOnResource, changed
 | 
			
		||||
}
 | 
			
		||||
@@ -1,216 +0,0 @@
 | 
			
		||||
//go:build !providerless
 | 
			
		||||
// +build !providerless
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
Copyright 2019 The Kubernetes Authors.
 | 
			
		||||
 | 
			
		||||
Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
you may not use this file except in compliance with the License.
 | 
			
		||||
You may obtain a copy of the License at
 | 
			
		||||
 | 
			
		||||
    http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
 | 
			
		||||
Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
See the License for the specific language governing permissions and
 | 
			
		||||
limitations under the License.
 | 
			
		||||
*/
 | 
			
		||||
 | 
			
		||||
package azure
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"reflect"
 | 
			
		||||
	"testing"
 | 
			
		||||
	"time"
 | 
			
		||||
 | 
			
		||||
	"github.com/stretchr/testify/assert"
 | 
			
		||||
	"k8s.io/utils/pointer"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
func TestSimpleLockEntry(t *testing.T) {
 | 
			
		||||
	testLockMap := newLockMap()
 | 
			
		||||
 | 
			
		||||
	callbackChan1 := make(chan interface{})
 | 
			
		||||
	go testLockMap.lockAndCallback(t, "entry1", callbackChan1)
 | 
			
		||||
	ensureCallbackHappens(t, callbackChan1)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestSimpleLockUnlockEntry(t *testing.T) {
 | 
			
		||||
	testLockMap := newLockMap()
 | 
			
		||||
 | 
			
		||||
	callbackChan1 := make(chan interface{})
 | 
			
		||||
	go testLockMap.lockAndCallback(t, "entry1", callbackChan1)
 | 
			
		||||
	ensureCallbackHappens(t, callbackChan1)
 | 
			
		||||
	testLockMap.UnlockEntry("entry1")
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestConcurrentLockEntry(t *testing.T) {
 | 
			
		||||
	testLockMap := newLockMap()
 | 
			
		||||
 | 
			
		||||
	callbackChan1 := make(chan interface{})
 | 
			
		||||
	callbackChan2 := make(chan interface{})
 | 
			
		||||
 | 
			
		||||
	go testLockMap.lockAndCallback(t, "entry1", callbackChan1)
 | 
			
		||||
	ensureCallbackHappens(t, callbackChan1)
 | 
			
		||||
 | 
			
		||||
	go testLockMap.lockAndCallback(t, "entry1", callbackChan2)
 | 
			
		||||
	ensureNoCallback(t, callbackChan2)
 | 
			
		||||
 | 
			
		||||
	testLockMap.UnlockEntry("entry1")
 | 
			
		||||
	ensureCallbackHappens(t, callbackChan2)
 | 
			
		||||
	testLockMap.UnlockEntry("entry1")
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (lm *lockMap) lockAndCallback(t *testing.T, entry string, callbackChan chan<- interface{}) {
 | 
			
		||||
	lm.LockEntry(entry)
 | 
			
		||||
	callbackChan <- true
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
var callbackTimeout = 2 * time.Second
 | 
			
		||||
 | 
			
		||||
func ensureCallbackHappens(t *testing.T, callbackChan <-chan interface{}) bool {
 | 
			
		||||
	select {
 | 
			
		||||
	case <-callbackChan:
 | 
			
		||||
		return true
 | 
			
		||||
	case <-time.After(callbackTimeout):
 | 
			
		||||
		t.Fatalf("timed out waiting for callback")
 | 
			
		||||
		return false
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func ensureNoCallback(t *testing.T, callbackChan <-chan interface{}) bool {
 | 
			
		||||
	select {
 | 
			
		||||
	case <-callbackChan:
 | 
			
		||||
		t.Fatalf("unexpected callback")
 | 
			
		||||
		return false
 | 
			
		||||
	case <-time.After(callbackTimeout):
 | 
			
		||||
		return true
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestConvertTagsToMap(t *testing.T) {
 | 
			
		||||
	testCases := []struct {
 | 
			
		||||
		desc           string
 | 
			
		||||
		tags           string
 | 
			
		||||
		expectedOutput map[string]string
 | 
			
		||||
		expectedError  bool
 | 
			
		||||
	}{
 | 
			
		||||
		{
 | 
			
		||||
			desc:           "should return empty map when tag is empty",
 | 
			
		||||
			tags:           "",
 | 
			
		||||
			expectedOutput: map[string]string{},
 | 
			
		||||
			expectedError:  false,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			desc: "sing valid tag should be converted",
 | 
			
		||||
			tags: "key=value",
 | 
			
		||||
			expectedOutput: map[string]string{
 | 
			
		||||
				"key": "value",
 | 
			
		||||
			},
 | 
			
		||||
			expectedError: false,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			desc: "multiple valid tags should be converted",
 | 
			
		||||
			tags: "key1=value1,key2=value2",
 | 
			
		||||
			expectedOutput: map[string]string{
 | 
			
		||||
				"key1": "value1",
 | 
			
		||||
				"key2": "value2",
 | 
			
		||||
			},
 | 
			
		||||
			expectedError: false,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			desc: "whitespaces should be trimmed",
 | 
			
		||||
			tags: "key1=value1, key2=value2",
 | 
			
		||||
			expectedOutput: map[string]string{
 | 
			
		||||
				"key1": "value1",
 | 
			
		||||
				"key2": "value2",
 | 
			
		||||
			},
 | 
			
		||||
			expectedError: false,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			desc:           "should return error for invalid format",
 | 
			
		||||
			tags:           "foo,bar",
 | 
			
		||||
			expectedOutput: nil,
 | 
			
		||||
			expectedError:  true,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			desc:           "should return error for when key is missed",
 | 
			
		||||
			tags:           "key1=value1,=bar",
 | 
			
		||||
			expectedOutput: nil,
 | 
			
		||||
			expectedError:  true,
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	for i, c := range testCases {
 | 
			
		||||
		m, err := ConvertTagsToMap(c.tags)
 | 
			
		||||
		if c.expectedError {
 | 
			
		||||
			assert.NotNil(t, err, "TestCase[%d]: %s", i, c.desc)
 | 
			
		||||
		} else {
 | 
			
		||||
			assert.Nil(t, err, "TestCase[%d]: %s", i, c.desc)
 | 
			
		||||
			if !reflect.DeepEqual(m, c.expectedOutput) {
 | 
			
		||||
				t.Errorf("got: %v, expected: %v, desc: %v", m, c.expectedOutput, c.desc)
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestReconcileTags(t *testing.T) {
 | 
			
		||||
	for _, testCase := range []struct {
 | 
			
		||||
		description                                  string
 | 
			
		||||
		currentTagsOnResource, newTags, expectedTags map[string]*string
 | 
			
		||||
		expectedChanged                              bool
 | 
			
		||||
	}{
 | 
			
		||||
		{
 | 
			
		||||
			description: "reconcileTags should add missing tags and update existing tags",
 | 
			
		||||
			currentTagsOnResource: map[string]*string{
 | 
			
		||||
				"a": pointer.String("b"),
 | 
			
		||||
			},
 | 
			
		||||
			newTags: map[string]*string{
 | 
			
		||||
				"a": pointer.String("c"),
 | 
			
		||||
				"b": pointer.String("d"),
 | 
			
		||||
			},
 | 
			
		||||
			expectedTags: map[string]*string{
 | 
			
		||||
				"a": pointer.String("c"),
 | 
			
		||||
				"b": pointer.String("d"),
 | 
			
		||||
			},
 | 
			
		||||
			expectedChanged: true,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			description: "reconcileTags should ignore the case of keys when comparing",
 | 
			
		||||
			currentTagsOnResource: map[string]*string{
 | 
			
		||||
				"A": pointer.String("b"),
 | 
			
		||||
				"c": pointer.String("d"),
 | 
			
		||||
			},
 | 
			
		||||
			newTags: map[string]*string{
 | 
			
		||||
				"a": pointer.String("b"),
 | 
			
		||||
				"C": pointer.String("d"),
 | 
			
		||||
			},
 | 
			
		||||
			expectedTags: map[string]*string{
 | 
			
		||||
				"A": pointer.String("b"),
 | 
			
		||||
				"c": pointer.String("d"),
 | 
			
		||||
			},
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			description: "reconcileTags should ignore the case of values when comparing",
 | 
			
		||||
			currentTagsOnResource: map[string]*string{
 | 
			
		||||
				"A": pointer.String("b"),
 | 
			
		||||
				"c": pointer.String("d"),
 | 
			
		||||
			},
 | 
			
		||||
			newTags: map[string]*string{
 | 
			
		||||
				"a": pointer.String("B"),
 | 
			
		||||
				"C": pointer.String("D"),
 | 
			
		||||
			},
 | 
			
		||||
			expectedTags: map[string]*string{
 | 
			
		||||
				"A": pointer.String("b"),
 | 
			
		||||
				"c": pointer.String("d"),
 | 
			
		||||
			},
 | 
			
		||||
		},
 | 
			
		||||
	} {
 | 
			
		||||
		t.Run(testCase.description, func(t *testing.T) {
 | 
			
		||||
			tags, changed := reconcileTags(testCase.currentTagsOnResource, testCase.newTags)
 | 
			
		||||
			assert.Equal(t, testCase.expectedChanged, changed)
 | 
			
		||||
			assert.Equal(t, testCase.expectedTags, tags)
 | 
			
		||||
		})
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
@@ -1,88 +0,0 @@
 | 
			
		||||
//go:build !providerless
 | 
			
		||||
// +build !providerless
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
Copyright 2017 The Kubernetes Authors.
 | 
			
		||||
 | 
			
		||||
Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
you may not use this file except in compliance with the License.
 | 
			
		||||
You may obtain a copy of the License at
 | 
			
		||||
 | 
			
		||||
    http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
 | 
			
		||||
Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
See the License for the specific language governing permissions and
 | 
			
		||||
limitations under the License.
 | 
			
		||||
*/
 | 
			
		||||
 | 
			
		||||
//go:generate mockgen -copyright_file=$BUILD_TAG_FILE -source=azure_vmsets.go  -destination=mockvmsets/azure_mock_vmsets.go -package=mockvmsets VMSet
 | 
			
		||||
package azure
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-12-01/compute"
 | 
			
		||||
	"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network"
 | 
			
		||||
 | 
			
		||||
	v1 "k8s.io/api/core/v1"
 | 
			
		||||
	"k8s.io/apimachinery/pkg/types"
 | 
			
		||||
	cloudprovider "k8s.io/cloud-provider"
 | 
			
		||||
	azcache "k8s.io/legacy-cloud-providers/azure/cache"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// VMSet defines functions all vmsets (including scale set and availability
 | 
			
		||||
// set) should be implemented.
 | 
			
		||||
 | 
			
		||||
type VMSet interface {
 | 
			
		||||
	// GetInstanceIDByNodeName gets the cloud provider ID by node name.
 | 
			
		||||
	// It must return ("", cloudprovider.InstanceNotFound) if the instance does
 | 
			
		||||
	// not exist or is no longer running.
 | 
			
		||||
	GetInstanceIDByNodeName(name string) (string, error)
 | 
			
		||||
	// GetInstanceTypeByNodeName gets the instance type by node name.
 | 
			
		||||
	GetInstanceTypeByNodeName(name string) (string, error)
 | 
			
		||||
	// GetIPByNodeName gets machine private IP and public IP by node name.
 | 
			
		||||
	GetIPByNodeName(name string) (string, string, error)
 | 
			
		||||
	// GetPrimaryInterface gets machine primary network interface by node name.
 | 
			
		||||
	GetPrimaryInterface(nodeName string) (network.Interface, error)
 | 
			
		||||
	// GetNodeNameByProviderID gets the node name by provider ID.
 | 
			
		||||
	GetNodeNameByProviderID(providerID string) (types.NodeName, error)
 | 
			
		||||
 | 
			
		||||
	// GetZoneByNodeName gets cloudprovider.Zone by node name.
 | 
			
		||||
	GetZoneByNodeName(name string) (cloudprovider.Zone, error)
 | 
			
		||||
 | 
			
		||||
	// GetPrimaryVMSetName returns the VM set name depending on the configured vmType.
 | 
			
		||||
	// It returns config.PrimaryScaleSetName for vmss and config.PrimaryAvailabilitySetName for standard vmType.
 | 
			
		||||
	GetPrimaryVMSetName() string
 | 
			
		||||
	// GetVMSetNames selects all possible availability sets or scale sets
 | 
			
		||||
	// (depending vmType configured) for service load balancer, if the service has
 | 
			
		||||
	// no loadbalancer mode annotation returns the primary VMSet. If service annotation
 | 
			
		||||
	// for loadbalancer exists then return the eligible VMSet.
 | 
			
		||||
	GetVMSetNames(service *v1.Service, nodes []*v1.Node) (availabilitySetNames *[]string, err error)
 | 
			
		||||
	// EnsureHostsInPool ensures the given Node's primary IP configurations are
 | 
			
		||||
	// participating in the specified LoadBalancer Backend Pool.
 | 
			
		||||
	EnsureHostsInPool(service *v1.Service, nodes []*v1.Node, backendPoolID string, vmSetName string, isInternal bool) error
 | 
			
		||||
	// EnsureHostInPool ensures the given VM's Primary NIC's Primary IP Configuration is
 | 
			
		||||
	// participating in the specified LoadBalancer Backend Pool.
 | 
			
		||||
	EnsureHostInPool(service *v1.Service, nodeName types.NodeName, backendPoolID string, vmSetName string, isInternal bool) (string, string, string, *compute.VirtualMachineScaleSetVM, error)
 | 
			
		||||
	// EnsureBackendPoolDeleted ensures the loadBalancer backendAddressPools deleted from the specified nodes.
 | 
			
		||||
	EnsureBackendPoolDeleted(service *v1.Service, backendPoolID, vmSetName string, backendAddressPools *[]network.BackendAddressPool, deleteFromVMSet bool) error
 | 
			
		||||
 | 
			
		||||
	// AttachDisk attaches a vhd to vm. The vhd must exist, can be identified by diskName, diskURI, and lun.
 | 
			
		||||
	AttachDisk(isManagedDisk bool, diskName, diskURI string, nodeName types.NodeName, lun int32, cachingMode compute.CachingTypes, diskEncryptionSetID string, writeAcceleratorEnabled bool) error
 | 
			
		||||
	// DetachDisk detaches a vhd from host. The vhd can be identified by diskName or diskURI.
 | 
			
		||||
	DetachDisk(diskName, diskURI string, nodeName types.NodeName) error
 | 
			
		||||
	// GetDataDisks gets a list of data disks attached to the node.
 | 
			
		||||
	GetDataDisks(nodeName types.NodeName, crt azcache.AzureCacheReadType) ([]compute.DataDisk, error)
 | 
			
		||||
 | 
			
		||||
	// GetPowerStatusByNodeName returns the power state of the specified node.
 | 
			
		||||
	GetPowerStatusByNodeName(name string) (string, error)
 | 
			
		||||
 | 
			
		||||
	// GetProvisioningStateByNodeName returns the provisioningState for the specified node.
 | 
			
		||||
	GetProvisioningStateByNodeName(name string) (string, error)
 | 
			
		||||
 | 
			
		||||
	// GetPrivateIPsByNodeName returns a slice of all private ips assigned to node (ipv6 and ipv4)
 | 
			
		||||
	GetPrivateIPsByNodeName(name string) ([]string, error)
 | 
			
		||||
 | 
			
		||||
	// GetNodeNameByIPConfigurationID gets the nodeName and vmSetName by IP configuration ID.
 | 
			
		||||
	GetNodeNameByIPConfigurationID(ipConfigurationID string) (string, string, error)
 | 
			
		||||
}
 | 
			
		||||
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							@@ -1,350 +0,0 @@
 | 
			
		||||
//go:build !providerless
 | 
			
		||||
// +build !providerless
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
Copyright 2018 The Kubernetes Authors.
 | 
			
		||||
 | 
			
		||||
Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
you may not use this file except in compliance with the License.
 | 
			
		||||
You may obtain a copy of the License at
 | 
			
		||||
 | 
			
		||||
    http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
 | 
			
		||||
Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
See the License for the specific language governing permissions and
 | 
			
		||||
limitations under the License.
 | 
			
		||||
*/
 | 
			
		||||
 | 
			
		||||
package azure
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"context"
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"strings"
 | 
			
		||||
	"sync"
 | 
			
		||||
	"time"
 | 
			
		||||
 | 
			
		||||
	"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-12-01/compute"
 | 
			
		||||
 | 
			
		||||
	"k8s.io/apimachinery/pkg/util/sets"
 | 
			
		||||
	"k8s.io/klog/v2"
 | 
			
		||||
	azcache "k8s.io/legacy-cloud-providers/azure/cache"
 | 
			
		||||
	"k8s.io/utils/pointer"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
var (
 | 
			
		||||
	vmssNameSeparator = "_"
 | 
			
		||||
 | 
			
		||||
	vmssKey                 = "k8svmssKey"
 | 
			
		||||
	availabilitySetNodesKey = "k8sAvailabilitySetNodesKey"
 | 
			
		||||
 | 
			
		||||
	availabilitySetNodesCacheTTLDefaultInSeconds = 900
 | 
			
		||||
	vmssCacheTTLDefaultInSeconds                 = 600
 | 
			
		||||
	vmssVirtualMachinesCacheTTLDefaultInSeconds  = 600
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
type vmssVirtualMachinesEntry struct {
 | 
			
		||||
	resourceGroup  string
 | 
			
		||||
	vmssName       string
 | 
			
		||||
	instanceID     string
 | 
			
		||||
	virtualMachine *compute.VirtualMachineScaleSetVM
 | 
			
		||||
	lastUpdate     time.Time
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type vmssEntry struct {
 | 
			
		||||
	vmss          *compute.VirtualMachineScaleSet
 | 
			
		||||
	resourceGroup string
 | 
			
		||||
	lastUpdate    time.Time
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
type availabilitySetEntry struct {
 | 
			
		||||
	vmNames   sets.String
 | 
			
		||||
	nodeNames sets.String
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (ss *scaleSet) newVMSSCache() (*azcache.TimedCache, error) {
 | 
			
		||||
	getter := func(key string) (interface{}, error) {
 | 
			
		||||
		localCache := &sync.Map{} // [vmssName]*vmssEntry
 | 
			
		||||
 | 
			
		||||
		allResourceGroups, err := ss.GetResourceGroups()
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			return nil, err
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		for _, resourceGroup := range allResourceGroups.List() {
 | 
			
		||||
			allScaleSets, rerr := ss.VirtualMachineScaleSetsClient.List(context.Background(), resourceGroup)
 | 
			
		||||
			if rerr != nil {
 | 
			
		||||
				klog.Errorf("VirtualMachineScaleSetsClient.List failed: %v", rerr)
 | 
			
		||||
				return nil, rerr.Error()
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			for i := range allScaleSets {
 | 
			
		||||
				scaleSet := allScaleSets[i]
 | 
			
		||||
				if scaleSet.Name == nil || *scaleSet.Name == "" {
 | 
			
		||||
					klog.Warning("failed to get the name of VMSS")
 | 
			
		||||
					continue
 | 
			
		||||
				}
 | 
			
		||||
				localCache.Store(*scaleSet.Name, &vmssEntry{
 | 
			
		||||
					vmss:          &scaleSet,
 | 
			
		||||
					resourceGroup: resourceGroup,
 | 
			
		||||
					lastUpdate:    time.Now().UTC(),
 | 
			
		||||
				})
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		return localCache, nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if ss.Config.VmssCacheTTLInSeconds == 0 {
 | 
			
		||||
		ss.Config.VmssCacheTTLInSeconds = vmssCacheTTLDefaultInSeconds
 | 
			
		||||
	}
 | 
			
		||||
	return azcache.NewTimedcache(time.Duration(ss.Config.VmssCacheTTLInSeconds)*time.Second, getter)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func extractVmssVMName(name string) (string, string, error) {
 | 
			
		||||
	split := strings.SplitAfter(name, vmssNameSeparator)
 | 
			
		||||
	if len(split) < 2 {
 | 
			
		||||
		klog.V(3).Infof("Failed to extract vmssVMName %q", name)
 | 
			
		||||
		return "", "", ErrorNotVmssInstance
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	ssName := strings.Join(split[0:len(split)-1], "")
 | 
			
		||||
	// removing the trailing `vmssNameSeparator` since we used SplitAfter
 | 
			
		||||
	ssName = ssName[:len(ssName)-1]
 | 
			
		||||
	instanceID := split[len(split)-1]
 | 
			
		||||
	return ssName, instanceID, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// getVMSSVMCache returns an *azcache.TimedCache and cache key for a VMSS (creating that cache if new).
 | 
			
		||||
func (ss *scaleSet) getVMSSVMCache(resourceGroup, vmssName string) (string, *azcache.TimedCache, error) {
 | 
			
		||||
	cacheKey := strings.ToLower(fmt.Sprintf("%s/%s", resourceGroup, vmssName))
 | 
			
		||||
	if entry, ok := ss.vmssVMCache.Load(cacheKey); ok {
 | 
			
		||||
		cache := entry.(*azcache.TimedCache)
 | 
			
		||||
		return cacheKey, cache, nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	cache, err := ss.newVMSSVirtualMachinesCache(resourceGroup, vmssName, cacheKey)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return "", nil, err
 | 
			
		||||
	}
 | 
			
		||||
	ss.vmssVMCache.Store(cacheKey, cache)
 | 
			
		||||
	return cacheKey, cache, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// gcVMSSVMCache delete stale VMSS VMs caches from deleted VMSSes.
 | 
			
		||||
func (ss *scaleSet) gcVMSSVMCache() error {
 | 
			
		||||
	cached, err := ss.vmssCache.Get(vmssKey, azcache.CacheReadTypeUnsafe)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	vmsses := cached.(*sync.Map)
 | 
			
		||||
	removed := map[string]bool{}
 | 
			
		||||
	ss.vmssVMCache.Range(func(key, value interface{}) bool {
 | 
			
		||||
		cacheKey := key.(string)
 | 
			
		||||
		vlistIdx := cacheKey[strings.LastIndex(cacheKey, "/")+1:]
 | 
			
		||||
		if _, ok := vmsses.Load(vlistIdx); !ok {
 | 
			
		||||
			removed[cacheKey] = true
 | 
			
		||||
		}
 | 
			
		||||
		return true
 | 
			
		||||
	})
 | 
			
		||||
 | 
			
		||||
	for key := range removed {
 | 
			
		||||
		ss.vmssVMCache.Delete(key)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// newVMSSVirtualMachinesCache instantiates a new VMs cache for VMs belonging to the provided VMSS.
 | 
			
		||||
func (ss *scaleSet) newVMSSVirtualMachinesCache(resourceGroupName, vmssName, cacheKey string) (*azcache.TimedCache, error) {
 | 
			
		||||
	vmssVirtualMachinesCacheTTL := time.Duration(ss.Config.VmssVirtualMachinesCacheTTLInSeconds) * time.Second
 | 
			
		||||
 | 
			
		||||
	getter := func(key string) (interface{}, error) {
 | 
			
		||||
		localCache := &sync.Map{} // [nodeName]*vmssVirtualMachinesEntry
 | 
			
		||||
 | 
			
		||||
		oldCache := make(map[string]vmssVirtualMachinesEntry)
 | 
			
		||||
 | 
			
		||||
		if vmssCache, ok := ss.vmssVMCache.Load(cacheKey); ok {
 | 
			
		||||
			// get old cache before refreshing the cache
 | 
			
		||||
			cache := vmssCache.(*azcache.TimedCache)
 | 
			
		||||
			entry, exists, err := cache.Store.GetByKey(cacheKey)
 | 
			
		||||
			if err != nil {
 | 
			
		||||
				return nil, err
 | 
			
		||||
			}
 | 
			
		||||
			if exists {
 | 
			
		||||
				cached := entry.(*azcache.AzureCacheEntry).Data
 | 
			
		||||
				if cached != nil {
 | 
			
		||||
					virtualMachines := cached.(*sync.Map)
 | 
			
		||||
					virtualMachines.Range(func(key, value interface{}) bool {
 | 
			
		||||
						oldCache[key.(string)] = *value.(*vmssVirtualMachinesEntry)
 | 
			
		||||
						return true
 | 
			
		||||
					})
 | 
			
		||||
				}
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		vms, err := ss.listScaleSetVMs(vmssName, resourceGroupName)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			return nil, err
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		for i := range vms {
 | 
			
		||||
			vm := vms[i]
 | 
			
		||||
			if vm.OsProfile == nil || vm.OsProfile.ComputerName == nil {
 | 
			
		||||
				klog.Warningf("failed to get computerName for vmssVM (%q)", vmssName)
 | 
			
		||||
				continue
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			computerName := strings.ToLower(*vm.OsProfile.ComputerName)
 | 
			
		||||
			if vm.NetworkProfile == nil || vm.NetworkProfile.NetworkInterfaces == nil {
 | 
			
		||||
				klog.Warningf("skip caching vmssVM %s since its network profile hasn't initialized yet (probably still under creating)", computerName)
 | 
			
		||||
				continue
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			vmssVMCacheEntry := &vmssVirtualMachinesEntry{
 | 
			
		||||
				resourceGroup:  resourceGroupName,
 | 
			
		||||
				vmssName:       vmssName,
 | 
			
		||||
				instanceID:     pointer.StringDeref(vm.InstanceID, ""),
 | 
			
		||||
				virtualMachine: &vm,
 | 
			
		||||
				lastUpdate:     time.Now().UTC(),
 | 
			
		||||
			}
 | 
			
		||||
			// set cache entry to nil when the VM is under deleting.
 | 
			
		||||
			if vm.VirtualMachineScaleSetVMProperties != nil &&
 | 
			
		||||
				strings.EqualFold(pointer.StringDeref(vm.VirtualMachineScaleSetVMProperties.ProvisioningState, ""), string(compute.ProvisioningStateDeleting)) {
 | 
			
		||||
				klog.V(4).Infof("VMSS virtualMachine %q is under deleting, setting its cache to nil", computerName)
 | 
			
		||||
				vmssVMCacheEntry.virtualMachine = nil
 | 
			
		||||
			}
 | 
			
		||||
			localCache.Store(computerName, vmssVMCacheEntry)
 | 
			
		||||
 | 
			
		||||
			delete(oldCache, computerName)
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		// add old missing cache data with nil entries to prevent aggressive
 | 
			
		||||
		// ARM calls during cache invalidation
 | 
			
		||||
		for name, vmEntry := range oldCache {
 | 
			
		||||
			// if the nil cache entry has existed for vmssVirtualMachinesCacheTTL in the cache
 | 
			
		||||
			// then it should not be added back to the cache
 | 
			
		||||
			if vmEntry.virtualMachine == nil && time.Since(vmEntry.lastUpdate) > vmssVirtualMachinesCacheTTL {
 | 
			
		||||
				klog.V(5).Infof("ignoring expired entries from old cache for %s", name)
 | 
			
		||||
				continue
 | 
			
		||||
			}
 | 
			
		||||
			lastUpdate := time.Now().UTC()
 | 
			
		||||
			if vmEntry.virtualMachine == nil {
 | 
			
		||||
				// if this is already a nil entry then keep the time the nil
 | 
			
		||||
				// entry was first created, so we can cleanup unwanted entries
 | 
			
		||||
				lastUpdate = vmEntry.lastUpdate
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			klog.V(5).Infof("adding old entries to new cache for %s", name)
 | 
			
		||||
			localCache.Store(name, &vmssVirtualMachinesEntry{
 | 
			
		||||
				resourceGroup:  vmEntry.resourceGroup,
 | 
			
		||||
				vmssName:       vmEntry.vmssName,
 | 
			
		||||
				instanceID:     vmEntry.instanceID,
 | 
			
		||||
				virtualMachine: nil,
 | 
			
		||||
				lastUpdate:     lastUpdate,
 | 
			
		||||
			})
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		return localCache, nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return azcache.NewTimedcache(vmssVirtualMachinesCacheTTL, getter)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (ss *scaleSet) deleteCacheForNode(nodeName string) error {
 | 
			
		||||
	node, err := ss.getNodeIdentityByNodeName(nodeName, azcache.CacheReadTypeUnsafe)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		klog.Errorf("deleteCacheForNode(%s) failed with error: %v", nodeName, err)
 | 
			
		||||
		return err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	cacheKey, timedcache, err := ss.getVMSSVMCache(node.resourceGroup, node.vmssName)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		klog.Errorf("deleteCacheForNode(%s) failed with error: %v", nodeName, err)
 | 
			
		||||
		return err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	vmcache, err := timedcache.Get(cacheKey, azcache.CacheReadTypeUnsafe)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		klog.Errorf("deleteCacheForNode(%s) failed with error: %v", nodeName, err)
 | 
			
		||||
		return err
 | 
			
		||||
	}
 | 
			
		||||
	virtualMachines := vmcache.(*sync.Map)
 | 
			
		||||
	virtualMachines.Delete(nodeName)
 | 
			
		||||
 | 
			
		||||
	if err := ss.gcVMSSVMCache(); err != nil {
 | 
			
		||||
		klog.Errorf("deleteCacheForNode(%s) failed to gc stale vmss caches: %v", nodeName, err)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (ss *scaleSet) newAvailabilitySetNodesCache() (*azcache.TimedCache, error) {
 | 
			
		||||
	getter := func(key string) (interface{}, error) {
 | 
			
		||||
		vmNames := sets.NewString()
 | 
			
		||||
		resourceGroups, err := ss.GetResourceGroups()
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			return nil, err
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		for _, resourceGroup := range resourceGroups.List() {
 | 
			
		||||
			vmList, err := ss.Cloud.ListVirtualMachines(resourceGroup)
 | 
			
		||||
			if err != nil {
 | 
			
		||||
				return nil, err
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			for _, vm := range vmList {
 | 
			
		||||
				if vm.Name != nil {
 | 
			
		||||
					vmNames.Insert(*vm.Name)
 | 
			
		||||
				}
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		// store all the node names in the cluster when the cache data was created.
 | 
			
		||||
		nodeNames, err := ss.GetNodeNames()
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			return nil, err
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		localCache := availabilitySetEntry{
 | 
			
		||||
			vmNames:   vmNames,
 | 
			
		||||
			nodeNames: nodeNames,
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		return localCache, nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if ss.Config.AvailabilitySetNodesCacheTTLInSeconds == 0 {
 | 
			
		||||
		ss.Config.AvailabilitySetNodesCacheTTLInSeconds = availabilitySetNodesCacheTTLDefaultInSeconds
 | 
			
		||||
	}
 | 
			
		||||
	return azcache.NewTimedcache(time.Duration(ss.Config.AvailabilitySetNodesCacheTTLInSeconds)*time.Second, getter)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (ss *scaleSet) isNodeManagedByAvailabilitySet(nodeName string, crt azcache.AzureCacheReadType) (bool, error) {
 | 
			
		||||
	// Assume all nodes are managed by VMSS when DisableAvailabilitySetNodes is enabled.
 | 
			
		||||
	if ss.DisableAvailabilitySetNodes {
 | 
			
		||||
		klog.V(2).Infof("Assuming node %q is managed by VMSS since DisableAvailabilitySetNodes is set to true", nodeName)
 | 
			
		||||
		return false, nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	cached, err := ss.availabilitySetNodesCache.Get(availabilitySetNodesKey, crt)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return false, err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	cachedNodes := cached.(availabilitySetEntry).nodeNames
 | 
			
		||||
	// if the node is not in the cache, assume the node has joined after the last cache refresh and attempt to refresh the cache.
 | 
			
		||||
	if !cachedNodes.Has(nodeName) {
 | 
			
		||||
		klog.V(2).Infof("Node %s has joined the cluster since the last VM cache refresh, refreshing the cache", nodeName)
 | 
			
		||||
		cached, err = ss.availabilitySetNodesCache.Get(availabilitySetNodesKey, azcache.CacheReadTypeForceRefresh)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			return false, err
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	cachedVMs := cached.(availabilitySetEntry).vmNames
 | 
			
		||||
	return cachedVMs.Has(nodeName), nil
 | 
			
		||||
}
 | 
			
		||||
@@ -1,169 +0,0 @@
 | 
			
		||||
//go:build !providerless
 | 
			
		||||
// +build !providerless
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
Copyright 2018 The Kubernetes Authors.
 | 
			
		||||
 | 
			
		||||
Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
you may not use this file except in compliance with the License.
 | 
			
		||||
You may obtain a copy of the License at
 | 
			
		||||
 | 
			
		||||
    http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
 | 
			
		||||
Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
See the License for the specific language governing permissions and
 | 
			
		||||
limitations under the License.
 | 
			
		||||
*/
 | 
			
		||||
 | 
			
		||||
package azure
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"sync"
 | 
			
		||||
	"testing"
 | 
			
		||||
 | 
			
		||||
	"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-12-01/compute"
 | 
			
		||||
	"github.com/golang/mock/gomock"
 | 
			
		||||
	"github.com/stretchr/testify/assert"
 | 
			
		||||
 | 
			
		||||
	cloudprovider "k8s.io/cloud-provider"
 | 
			
		||||
	azcache "k8s.io/legacy-cloud-providers/azure/cache"
 | 
			
		||||
	"k8s.io/legacy-cloud-providers/azure/clients/vmssclient/mockvmssclient"
 | 
			
		||||
	"k8s.io/legacy-cloud-providers/azure/clients/vmssvmclient/mockvmssvmclient"
 | 
			
		||||
	"k8s.io/utils/pointer"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
func TestExtractVmssVMName(t *testing.T) {
 | 
			
		||||
	cases := []struct {
 | 
			
		||||
		description        string
 | 
			
		||||
		vmName             string
 | 
			
		||||
		expectError        bool
 | 
			
		||||
		expectedScaleSet   string
 | 
			
		||||
		expectedInstanceID string
 | 
			
		||||
	}{
 | 
			
		||||
		{
 | 
			
		||||
			description: "wrong vmss VM name should report error",
 | 
			
		||||
			vmName:      "vm1234",
 | 
			
		||||
			expectError: true,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			description: "wrong VM name separator should report error",
 | 
			
		||||
			vmName:      "vm-1234",
 | 
			
		||||
			expectError: true,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			description:        "correct vmss VM name should return correct scaleSet and instanceID",
 | 
			
		||||
			vmName:             "vm_1234",
 | 
			
		||||
			expectedScaleSet:   "vm",
 | 
			
		||||
			expectedInstanceID: "1234",
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			description:        "correct vmss VM name with Extra Separator should return correct scaleSet and instanceID",
 | 
			
		||||
			vmName:             "vm_test_1234",
 | 
			
		||||
			expectedScaleSet:   "vm_test",
 | 
			
		||||
			expectedInstanceID: "1234",
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	for _, c := range cases {
 | 
			
		||||
		ssName, instanceID, err := extractVmssVMName(c.vmName)
 | 
			
		||||
		if c.expectError {
 | 
			
		||||
			assert.Error(t, err, c.description)
 | 
			
		||||
			continue
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		assert.Equal(t, c.expectedScaleSet, ssName, c.description)
 | 
			
		||||
		assert.Equal(t, c.expectedInstanceID, instanceID, c.description)
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestVMSSVMCache(t *testing.T) {
 | 
			
		||||
	ctrl := gomock.NewController(t)
 | 
			
		||||
	defer ctrl.Finish()
 | 
			
		||||
 | 
			
		||||
	vmssName := "vmss"
 | 
			
		||||
	vmList := []string{"vmssee6c2000000", "vmssee6c2000001", "vmssee6c2000002"}
 | 
			
		||||
	ss, err := newTestScaleSet(ctrl)
 | 
			
		||||
	assert.NoError(t, err)
 | 
			
		||||
 | 
			
		||||
	mockVMSSClient := mockvmssclient.NewMockInterface(ctrl)
 | 
			
		||||
	mockVMSSVMClient := mockvmssvmclient.NewMockInterface(ctrl)
 | 
			
		||||
	ss.cloud.VirtualMachineScaleSetsClient = mockVMSSClient
 | 
			
		||||
	ss.cloud.VirtualMachineScaleSetVMsClient = mockVMSSVMClient
 | 
			
		||||
 | 
			
		||||
	expectedScaleSet := buildTestVMSS(vmssName, "vmssee6c2")
 | 
			
		||||
	mockVMSSClient.EXPECT().List(gomock.Any(), gomock.Any()).Return([]compute.VirtualMachineScaleSet{expectedScaleSet}, nil).AnyTimes()
 | 
			
		||||
 | 
			
		||||
	expectedVMs, _, _ := buildTestVirtualMachineEnv(ss.cloud, vmssName, "", 0, vmList, "", false)
 | 
			
		||||
	mockVMSSVMClient.EXPECT().List(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(expectedVMs, nil).AnyTimes()
 | 
			
		||||
 | 
			
		||||
	// validate getting VMSS VM via cache.
 | 
			
		||||
	for i := range expectedVMs {
 | 
			
		||||
		vm := expectedVMs[i]
 | 
			
		||||
		vmName := pointer.StringDeref(vm.OsProfile.ComputerName, "")
 | 
			
		||||
		ssName, instanceID, realVM, err := ss.getVmssVM(vmName, azcache.CacheReadTypeDefault)
 | 
			
		||||
		assert.NoError(t, err)
 | 
			
		||||
		assert.Equal(t, "vmss", ssName)
 | 
			
		||||
		assert.Equal(t, pointer.StringDeref(vm.InstanceID, ""), instanceID)
 | 
			
		||||
		assert.Equal(t, &vm, realVM)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// validate deleteCacheForNode().
 | 
			
		||||
	vm := expectedVMs[0]
 | 
			
		||||
	vmName := pointer.StringDeref(vm.OsProfile.ComputerName, "")
 | 
			
		||||
	err = ss.deleteCacheForNode(vmName)
 | 
			
		||||
	assert.NoError(t, err)
 | 
			
		||||
 | 
			
		||||
	// the VM should be removed from cache after deleteCacheForNode().
 | 
			
		||||
	cacheKey, cache, err := ss.getVMSSVMCache("rg", vmssName)
 | 
			
		||||
	assert.NoError(t, err)
 | 
			
		||||
	cached, err := cache.Get(cacheKey, azcache.CacheReadTypeDefault)
 | 
			
		||||
	assert.NoError(t, err)
 | 
			
		||||
	cachedVirtualMachines := cached.(*sync.Map)
 | 
			
		||||
	_, ok := cachedVirtualMachines.Load(vmName)
 | 
			
		||||
	assert.Equal(t, false, ok)
 | 
			
		||||
 | 
			
		||||
	// the VM should be back after another cache refresh.
 | 
			
		||||
	ssName, instanceID, realVM, err := ss.getVmssVM(vmName, azcache.CacheReadTypeDefault)
 | 
			
		||||
	assert.NoError(t, err)
 | 
			
		||||
	assert.Equal(t, "vmss", ssName)
 | 
			
		||||
	assert.Equal(t, pointer.StringDeref(vm.InstanceID, ""), instanceID)
 | 
			
		||||
	assert.Equal(t, &vm, realVM)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestVMSSVMCacheWithDeletingNodes(t *testing.T) {
 | 
			
		||||
	ctrl := gomock.NewController(t)
 | 
			
		||||
	defer ctrl.Finish()
 | 
			
		||||
 | 
			
		||||
	vmssName := "vmss"
 | 
			
		||||
	vmList := []string{"vmssee6c2000000", "vmssee6c2000001", "vmssee6c2000002"}
 | 
			
		||||
	ss, err := newTestScaleSetWithState(ctrl)
 | 
			
		||||
	assert.NoError(t, err)
 | 
			
		||||
 | 
			
		||||
	mockVMSSClient := mockvmssclient.NewMockInterface(ctrl)
 | 
			
		||||
	mockVMSSVMClient := mockvmssvmclient.NewMockInterface(ctrl)
 | 
			
		||||
	ss.cloud.VirtualMachineScaleSetsClient = mockVMSSClient
 | 
			
		||||
	ss.cloud.VirtualMachineScaleSetVMsClient = mockVMSSVMClient
 | 
			
		||||
 | 
			
		||||
	expectedScaleSet := compute.VirtualMachineScaleSet{
 | 
			
		||||
		Name:                             &vmssName,
 | 
			
		||||
		VirtualMachineScaleSetProperties: &compute.VirtualMachineScaleSetProperties{},
 | 
			
		||||
	}
 | 
			
		||||
	mockVMSSClient.EXPECT().List(gomock.Any(), gomock.Any()).Return([]compute.VirtualMachineScaleSet{expectedScaleSet}, nil).AnyTimes()
 | 
			
		||||
 | 
			
		||||
	expectedVMs, _, _ := buildTestVirtualMachineEnv(ss.cloud, vmssName, "", 0, vmList, string(compute.ProvisioningStateDeleting), false)
 | 
			
		||||
	mockVMSSVMClient.EXPECT().List(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(expectedVMs, nil).AnyTimes()
 | 
			
		||||
 | 
			
		||||
	for i := range expectedVMs {
 | 
			
		||||
		vm := expectedVMs[i]
 | 
			
		||||
		vmName := pointer.StringDeref(vm.OsProfile.ComputerName, "")
 | 
			
		||||
		assert.Equal(t, vm.ProvisioningState, pointer.String(string(compute.ProvisioningStateDeleting)))
 | 
			
		||||
 | 
			
		||||
		ssName, instanceID, realVM, err := ss.getVmssVM(vmName, azcache.CacheReadTypeDefault)
 | 
			
		||||
		assert.Nil(t, realVM)
 | 
			
		||||
		assert.Equal(t, "", ssName)
 | 
			
		||||
		assert.Equal(t, instanceID, ssName)
 | 
			
		||||
		assert.Equal(t, cloudprovider.InstanceNotFound, err)
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							@@ -1,356 +0,0 @@
 | 
			
		||||
//go:build !providerless
 | 
			
		||||
// +build !providerless
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
Copyright 2016 The Kubernetes Authors.
 | 
			
		||||
 | 
			
		||||
Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
you may not use this file except in compliance with the License.
 | 
			
		||||
You may obtain a copy of the License at
 | 
			
		||||
 | 
			
		||||
    http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
 | 
			
		||||
Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
See the License for the specific language governing permissions and
 | 
			
		||||
limitations under the License.
 | 
			
		||||
*/
 | 
			
		||||
 | 
			
		||||
package azure
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"net/http"
 | 
			
		||||
	"regexp"
 | 
			
		||||
	"strings"
 | 
			
		||||
	"time"
 | 
			
		||||
 | 
			
		||||
	"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-12-01/compute"
 | 
			
		||||
	"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network"
 | 
			
		||||
 | 
			
		||||
	"k8s.io/apimachinery/pkg/types"
 | 
			
		||||
	cloudprovider "k8s.io/cloud-provider"
 | 
			
		||||
	"k8s.io/klog/v2"
 | 
			
		||||
	azcache "k8s.io/legacy-cloud-providers/azure/cache"
 | 
			
		||||
	"k8s.io/legacy-cloud-providers/azure/retry"
 | 
			
		||||
	"k8s.io/utils/pointer"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
var (
 | 
			
		||||
	vmCacheTTLDefaultInSeconds           = 60
 | 
			
		||||
	loadBalancerCacheTTLDefaultInSeconds = 120
 | 
			
		||||
	nsgCacheTTLDefaultInSeconds          = 120
 | 
			
		||||
	routeTableCacheTTLDefaultInSeconds   = 120
 | 
			
		||||
 | 
			
		||||
	azureNodeProviderIDRE    = regexp.MustCompile(`^azure:///subscriptions/(?:.*)/resourceGroups/(?:.*)/providers/Microsoft.Compute/(?:.*)`)
 | 
			
		||||
	azureResourceGroupNameRE = regexp.MustCompile(`.*/subscriptions/(?:.*)/resourceGroups/(.+)/providers/(?:.*)`)
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// checkExistsFromError inspects an error and returns a true if err is nil,
 | 
			
		||||
// false if error is an autorest.Error with StatusCode=404 and will return the
 | 
			
		||||
// error back if error is another status code or another type of error.
 | 
			
		||||
func checkResourceExistsFromError(err *retry.Error) (bool, *retry.Error) {
 | 
			
		||||
	if err == nil {
 | 
			
		||||
		return true, nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if err.HTTPStatusCode == http.StatusNotFound {
 | 
			
		||||
		return false, nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return false, err
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// / getVirtualMachine calls 'VirtualMachinesClient.Get' with a timed cache
 | 
			
		||||
// / The service side has throttling control that delays responses if there are multiple requests onto certain vm
 | 
			
		||||
// / resource request in short period.
 | 
			
		||||
func (az *Cloud) getVirtualMachine(nodeName types.NodeName, crt azcache.AzureCacheReadType) (vm compute.VirtualMachine, err error) {
 | 
			
		||||
	vmName := string(nodeName)
 | 
			
		||||
	cachedVM, err := az.vmCache.Get(vmName, crt)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return vm, err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if cachedVM == nil {
 | 
			
		||||
		return vm, cloudprovider.InstanceNotFound
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return *(cachedVM.(*compute.VirtualMachine)), nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (az *Cloud) getRouteTable(crt azcache.AzureCacheReadType) (routeTable network.RouteTable, exists bool, err error) {
 | 
			
		||||
	cachedRt, err := az.rtCache.Get(az.RouteTableName, crt)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return routeTable, false, err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if cachedRt == nil {
 | 
			
		||||
		return routeTable, false, nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return *(cachedRt.(*network.RouteTable)), true, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (az *Cloud) getPublicIPAddress(pipResourceGroup string, pipName string) (network.PublicIPAddress, bool, error) {
 | 
			
		||||
	resourceGroup := az.ResourceGroup
 | 
			
		||||
	if pipResourceGroup != "" {
 | 
			
		||||
		resourceGroup = pipResourceGroup
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	ctx, cancel := getContextWithCancel()
 | 
			
		||||
	defer cancel()
 | 
			
		||||
	pip, err := az.PublicIPAddressesClient.Get(ctx, resourceGroup, pipName, "")
 | 
			
		||||
	exists, rerr := checkResourceExistsFromError(err)
 | 
			
		||||
	if rerr != nil {
 | 
			
		||||
		return pip, false, rerr.Error()
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if !exists {
 | 
			
		||||
		klog.V(2).Infof("Public IP %q not found", pipName)
 | 
			
		||||
		return pip, false, nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return pip, exists, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (az *Cloud) getSubnet(virtualNetworkName string, subnetName string) (network.Subnet, bool, error) {
 | 
			
		||||
	var rg string
 | 
			
		||||
	if len(az.VnetResourceGroup) > 0 {
 | 
			
		||||
		rg = az.VnetResourceGroup
 | 
			
		||||
	} else {
 | 
			
		||||
		rg = az.ResourceGroup
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	ctx, cancel := getContextWithCancel()
 | 
			
		||||
	defer cancel()
 | 
			
		||||
	subnet, err := az.SubnetsClient.Get(ctx, rg, virtualNetworkName, subnetName, "")
 | 
			
		||||
	exists, rerr := checkResourceExistsFromError(err)
 | 
			
		||||
	if rerr != nil {
 | 
			
		||||
		return subnet, false, rerr.Error()
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if !exists {
 | 
			
		||||
		klog.V(2).Infof("Subnet %q not found", subnetName)
 | 
			
		||||
		return subnet, false, nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return subnet, exists, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (az *Cloud) getAzureLoadBalancer(name string, crt azcache.AzureCacheReadType) (lb network.LoadBalancer, exists bool, err error) {
 | 
			
		||||
	cachedLB, err := az.lbCache.Get(name, crt)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return lb, false, err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if cachedLB == nil {
 | 
			
		||||
		return lb, false, nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return *(cachedLB.(*network.LoadBalancer)), true, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (az *Cloud) getSecurityGroup(crt azcache.AzureCacheReadType) (network.SecurityGroup, error) {
 | 
			
		||||
	nsg := network.SecurityGroup{}
 | 
			
		||||
	if az.SecurityGroupName == "" {
 | 
			
		||||
		return nsg, fmt.Errorf("securityGroupName is not configured")
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	securityGroup, err := az.nsgCache.Get(az.SecurityGroupName, crt)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return nsg, err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if securityGroup == nil {
 | 
			
		||||
		return nsg, fmt.Errorf("nsg %q not found", az.SecurityGroupName)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return *(securityGroup.(*network.SecurityGroup)), nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (az *Cloud) newVMCache() (*azcache.TimedCache, error) {
 | 
			
		||||
	getter := func(key string) (interface{}, error) {
 | 
			
		||||
		// Currently InstanceView request are used by azure_zones, while the calls come after non-InstanceView
 | 
			
		||||
		// request. If we first send an InstanceView request and then a non InstanceView request, the second
 | 
			
		||||
		// request will still hit throttling. This is what happens now for cloud controller manager: In this
 | 
			
		||||
		// case we do get instance view every time to fulfill the azure_zones requirement without hitting
 | 
			
		||||
		// throttling.
 | 
			
		||||
		// Consider adding separate parameter for controlling 'InstanceView' once node update issue #56276 is fixed
 | 
			
		||||
		ctx, cancel := getContextWithCancel()
 | 
			
		||||
		defer cancel()
 | 
			
		||||
 | 
			
		||||
		resourceGroup, err := az.GetNodeResourceGroup(key)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			return nil, err
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		vm, verr := az.VirtualMachinesClient.Get(ctx, resourceGroup, key, compute.InstanceView)
 | 
			
		||||
		exists, rerr := checkResourceExistsFromError(verr)
 | 
			
		||||
		if rerr != nil {
 | 
			
		||||
			return nil, rerr.Error()
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		if !exists {
 | 
			
		||||
			klog.V(2).Infof("Virtual machine %q not found", key)
 | 
			
		||||
			return nil, nil
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		if vm.VirtualMachineProperties != nil &&
 | 
			
		||||
			strings.EqualFold(pointer.StringDeref(vm.VirtualMachineProperties.ProvisioningState, ""), string(compute.ProvisioningStateDeleting)) {
 | 
			
		||||
			klog.V(2).Infof("Virtual machine %q is under deleting", key)
 | 
			
		||||
			return nil, nil
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		return &vm, nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if az.VMCacheTTLInSeconds == 0 {
 | 
			
		||||
		az.VMCacheTTLInSeconds = vmCacheTTLDefaultInSeconds
 | 
			
		||||
	}
 | 
			
		||||
	return azcache.NewTimedcache(time.Duration(az.VMCacheTTLInSeconds)*time.Second, getter)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (az *Cloud) newLBCache() (*azcache.TimedCache, error) {
 | 
			
		||||
	getter := func(key string) (interface{}, error) {
 | 
			
		||||
		ctx, cancel := getContextWithCancel()
 | 
			
		||||
		defer cancel()
 | 
			
		||||
 | 
			
		||||
		lb, err := az.LoadBalancerClient.Get(ctx, az.getLoadBalancerResourceGroup(), key, "")
 | 
			
		||||
		exists, rerr := checkResourceExistsFromError(err)
 | 
			
		||||
		if rerr != nil {
 | 
			
		||||
			return nil, rerr.Error()
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		if !exists {
 | 
			
		||||
			klog.V(2).Infof("Load balancer %q not found", key)
 | 
			
		||||
			return nil, nil
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		return &lb, nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if az.LoadBalancerCacheTTLInSeconds == 0 {
 | 
			
		||||
		az.LoadBalancerCacheTTLInSeconds = loadBalancerCacheTTLDefaultInSeconds
 | 
			
		||||
	}
 | 
			
		||||
	return azcache.NewTimedcache(time.Duration(az.LoadBalancerCacheTTLInSeconds)*time.Second, getter)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (az *Cloud) newNSGCache() (*azcache.TimedCache, error) {
 | 
			
		||||
	getter := func(key string) (interface{}, error) {
 | 
			
		||||
		ctx, cancel := getContextWithCancel()
 | 
			
		||||
		defer cancel()
 | 
			
		||||
		nsg, err := az.SecurityGroupsClient.Get(ctx, az.SecurityGroupResourceGroup, key, "")
 | 
			
		||||
		exists, rerr := checkResourceExistsFromError(err)
 | 
			
		||||
		if rerr != nil {
 | 
			
		||||
			return nil, rerr.Error()
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		if !exists {
 | 
			
		||||
			klog.V(2).Infof("Security group %q not found", key)
 | 
			
		||||
			return nil, nil
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		return &nsg, nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if az.NsgCacheTTLInSeconds == 0 {
 | 
			
		||||
		az.NsgCacheTTLInSeconds = nsgCacheTTLDefaultInSeconds
 | 
			
		||||
	}
 | 
			
		||||
	return azcache.NewTimedcache(time.Duration(az.NsgCacheTTLInSeconds)*time.Second, getter)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (az *Cloud) newRouteTableCache() (*azcache.TimedCache, error) {
 | 
			
		||||
	getter := func(key string) (interface{}, error) {
 | 
			
		||||
		ctx, cancel := getContextWithCancel()
 | 
			
		||||
		defer cancel()
 | 
			
		||||
		rt, err := az.RouteTablesClient.Get(ctx, az.RouteTableResourceGroup, key, "")
 | 
			
		||||
		exists, rerr := checkResourceExistsFromError(err)
 | 
			
		||||
		if rerr != nil {
 | 
			
		||||
			return nil, rerr.Error()
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		if !exists {
 | 
			
		||||
			klog.V(2).Infof("Route table %q not found", key)
 | 
			
		||||
			return nil, nil
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		return &rt, nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if az.RouteTableCacheTTLInSeconds == 0 {
 | 
			
		||||
		az.RouteTableCacheTTLInSeconds = routeTableCacheTTLDefaultInSeconds
 | 
			
		||||
	}
 | 
			
		||||
	return azcache.NewTimedcache(time.Duration(az.RouteTableCacheTTLInSeconds)*time.Second, getter)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (az *Cloud) useStandardLoadBalancer() bool {
 | 
			
		||||
	return strings.EqualFold(az.LoadBalancerSku, loadBalancerSkuStandard)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (az *Cloud) excludeMasterNodesFromStandardLB() bool {
 | 
			
		||||
	return az.ExcludeMasterFromStandardLB != nil && *az.ExcludeMasterFromStandardLB
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (az *Cloud) disableLoadBalancerOutboundSNAT() bool {
 | 
			
		||||
	if !az.useStandardLoadBalancer() || az.DisableOutboundSNAT == nil {
 | 
			
		||||
		return false
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return *az.DisableOutboundSNAT
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// IsNodeUnmanaged returns true if the node is not managed by Azure cloud provider.
 | 
			
		||||
// Those nodes includes on-prem or VMs from other clouds. They will not be added to load balancer
 | 
			
		||||
// backends. Azure routes and managed disks are also not supported for them.
 | 
			
		||||
func (az *Cloud) IsNodeUnmanaged(nodeName string) (bool, error) {
 | 
			
		||||
	unmanagedNodes, err := az.GetUnmanagedNodes()
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return false, err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return unmanagedNodes.Has(nodeName), nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// IsNodeUnmanagedByProviderID returns true if the node is not managed by Azure cloud provider.
 | 
			
		||||
// All managed node's providerIDs are in format 'azure:///subscriptions/<id>/resourceGroups/<rg>/providers/Microsoft.Compute/.*'
 | 
			
		||||
func (az *Cloud) IsNodeUnmanagedByProviderID(providerID string) bool {
 | 
			
		||||
	return !azureNodeProviderIDRE.Match([]byte(providerID))
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// convertResourceGroupNameToLower converts the resource group name in the resource ID to be lowered.
 | 
			
		||||
func convertResourceGroupNameToLower(resourceID string) (string, error) {
 | 
			
		||||
	matches := azureResourceGroupNameRE.FindStringSubmatch(resourceID)
 | 
			
		||||
	if len(matches) != 2 {
 | 
			
		||||
		return "", fmt.Errorf("%q isn't in Azure resource ID format %q", resourceID, azureResourceGroupNameRE.String())
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	resourceGroup := matches[1]
 | 
			
		||||
	return strings.Replace(resourceID, resourceGroup, strings.ToLower(resourceGroup), 1), nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// isBackendPoolOnSameLB checks whether newBackendPoolID is on the same load balancer as existingBackendPools.
 | 
			
		||||
// Since both public and internal LBs are supported, lbName and lbName-internal are treated as same.
 | 
			
		||||
// If not same, the lbName for existingBackendPools would also be returned.
 | 
			
		||||
func isBackendPoolOnSameLB(newBackendPoolID string, existingBackendPools []string) (bool, string, error) {
 | 
			
		||||
	matches := backendPoolIDRE.FindStringSubmatch(newBackendPoolID)
 | 
			
		||||
	if len(matches) != 2 {
 | 
			
		||||
		return false, "", fmt.Errorf("new backendPoolID %q is in wrong format", newBackendPoolID)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	newLBName := matches[1]
 | 
			
		||||
	newLBNameTrimmed := strings.TrimSuffix(newLBName, InternalLoadBalancerNameSuffix)
 | 
			
		||||
	for _, backendPool := range existingBackendPools {
 | 
			
		||||
		matches := backendPoolIDRE.FindStringSubmatch(backendPool)
 | 
			
		||||
		if len(matches) != 2 {
 | 
			
		||||
			return false, "", fmt.Errorf("existing backendPoolID %q is in wrong format", backendPool)
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		lbName := matches[1]
 | 
			
		||||
		if !strings.EqualFold(strings.TrimSuffix(lbName, InternalLoadBalancerNameSuffix), newLBNameTrimmed) {
 | 
			
		||||
			return false, lbName, nil
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return true, "", nil
 | 
			
		||||
}
 | 
			
		||||
@@ -1,292 +0,0 @@
 | 
			
		||||
//go:build !providerless
 | 
			
		||||
// +build !providerless
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
Copyright 2016 The Kubernetes Authors.
 | 
			
		||||
 | 
			
		||||
Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
you may not use this file except in compliance with the License.
 | 
			
		||||
You may obtain a copy of the License at
 | 
			
		||||
 | 
			
		||||
    http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
 | 
			
		||||
Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
See the License for the specific language governing permissions and
 | 
			
		||||
limitations under the License.
 | 
			
		||||
*/
 | 
			
		||||
 | 
			
		||||
package azure
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"net/http"
 | 
			
		||||
	"reflect"
 | 
			
		||||
	"testing"
 | 
			
		||||
 | 
			
		||||
	"github.com/golang/mock/gomock"
 | 
			
		||||
	"github.com/stretchr/testify/assert"
 | 
			
		||||
	"k8s.io/apimachinery/pkg/util/sets"
 | 
			
		||||
	"k8s.io/legacy-cloud-providers/azure/retry"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
func TestExtractNotFound(t *testing.T) {
 | 
			
		||||
	notFound := &retry.Error{HTTPStatusCode: http.StatusNotFound}
 | 
			
		||||
	otherHTTP := &retry.Error{HTTPStatusCode: http.StatusForbidden}
 | 
			
		||||
	otherErr := &retry.Error{HTTPStatusCode: http.StatusTooManyRequests}
 | 
			
		||||
 | 
			
		||||
	tests := []struct {
 | 
			
		||||
		err         *retry.Error
 | 
			
		||||
		expectedErr *retry.Error
 | 
			
		||||
		exists      bool
 | 
			
		||||
	}{
 | 
			
		||||
		{nil, nil, true},
 | 
			
		||||
		{otherErr, otherErr, false},
 | 
			
		||||
		{notFound, nil, false},
 | 
			
		||||
		{otherHTTP, otherHTTP, false},
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	for _, test := range tests {
 | 
			
		||||
		exists, err := checkResourceExistsFromError(test.err)
 | 
			
		||||
		if test.exists != exists {
 | 
			
		||||
			t.Errorf("expected: %v, saw: %v", test.exists, exists)
 | 
			
		||||
		}
 | 
			
		||||
		if !reflect.DeepEqual(test.expectedErr, err) {
 | 
			
		||||
			t.Errorf("expected err: %v, saw: %v", test.expectedErr, err)
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestIsNodeUnmanaged(t *testing.T) {
 | 
			
		||||
	ctrl := gomock.NewController(t)
 | 
			
		||||
	defer ctrl.Finish()
 | 
			
		||||
 | 
			
		||||
	tests := []struct {
 | 
			
		||||
		name           string
 | 
			
		||||
		unmanagedNodes sets.String
 | 
			
		||||
		node           string
 | 
			
		||||
		expected       bool
 | 
			
		||||
		expectErr      bool
 | 
			
		||||
	}{
 | 
			
		||||
		{
 | 
			
		||||
			name:           "unmanaged node should return true",
 | 
			
		||||
			unmanagedNodes: sets.NewString("node1", "node2"),
 | 
			
		||||
			node:           "node1",
 | 
			
		||||
			expected:       true,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			name:           "managed node should return false",
 | 
			
		||||
			unmanagedNodes: sets.NewString("node1", "node2"),
 | 
			
		||||
			node:           "node3",
 | 
			
		||||
			expected:       false,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			name:           "empty unmanagedNodes should return true",
 | 
			
		||||
			unmanagedNodes: sets.NewString(),
 | 
			
		||||
			node:           "node3",
 | 
			
		||||
			expected:       false,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			name:           "no synced informer should report error",
 | 
			
		||||
			unmanagedNodes: sets.NewString(),
 | 
			
		||||
			node:           "node1",
 | 
			
		||||
			expectErr:      true,
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	az := GetTestCloud(ctrl)
 | 
			
		||||
	for _, test := range tests {
 | 
			
		||||
		az.unmanagedNodes = test.unmanagedNodes
 | 
			
		||||
		if test.expectErr {
 | 
			
		||||
			az.nodeInformerSynced = func() bool {
 | 
			
		||||
				return false
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		real, err := az.IsNodeUnmanaged(test.node)
 | 
			
		||||
		if test.expectErr {
 | 
			
		||||
			assert.Error(t, err, test.name)
 | 
			
		||||
			continue
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		assert.NoError(t, err, test.name)
 | 
			
		||||
		assert.Equal(t, test.expected, real, test.name)
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestIsNodeUnmanagedByProviderID(t *testing.T) {
 | 
			
		||||
	ctrl := gomock.NewController(t)
 | 
			
		||||
	defer ctrl.Finish()
 | 
			
		||||
 | 
			
		||||
	tests := []struct {
 | 
			
		||||
		providerID string
 | 
			
		||||
		expected   bool
 | 
			
		||||
		name       string
 | 
			
		||||
	}{
 | 
			
		||||
		{
 | 
			
		||||
			providerID: CloudProviderName + ":///subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/myResourceGroupName/providers/Microsoft.Compute/virtualMachines/k8s-agent-AAAAAAAA-0",
 | 
			
		||||
			expected:   false,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			providerID: CloudProviderName + "://",
 | 
			
		||||
			expected:   true,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			providerID: ":///subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/myResourceGroupName/providers/Microsoft.Compute/virtualMachines/k8s-agent-AAAAAAAA-0",
 | 
			
		||||
			expected:   true,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			providerID: "aws:///subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/myResourceGroupName/providers/Microsoft.Compute/virtualMachines/k8s-agent-AAAAAAAA-0",
 | 
			
		||||
			expected:   true,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			providerID: "k8s-agent-AAAAAAAA-0",
 | 
			
		||||
			expected:   true,
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	az := GetTestCloud(ctrl)
 | 
			
		||||
	for _, test := range tests {
 | 
			
		||||
		isUnmanagedNode := az.IsNodeUnmanagedByProviderID(test.providerID)
 | 
			
		||||
		assert.Equal(t, test.expected, isUnmanagedNode, test.providerID)
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestConvertResourceGroupNameToLower(t *testing.T) {
 | 
			
		||||
	tests := []struct {
 | 
			
		||||
		desc        string
 | 
			
		||||
		resourceID  string
 | 
			
		||||
		expected    string
 | 
			
		||||
		expectError bool
 | 
			
		||||
	}{
 | 
			
		||||
		{
 | 
			
		||||
			desc:        "empty string should report error",
 | 
			
		||||
			resourceID:  "",
 | 
			
		||||
			expectError: true,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			desc:        "resourceID not in Azure format should report error",
 | 
			
		||||
			resourceID:  "invalid-id",
 | 
			
		||||
			expectError: true,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			desc:        "providerID not in Azure format should report error",
 | 
			
		||||
			resourceID:  "azure://invalid-id",
 | 
			
		||||
			expectError: true,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			desc:       "resource group name in VM providerID should be converted",
 | 
			
		||||
			resourceID: CloudProviderName + ":///subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/myResourceGroupName/providers/Microsoft.Compute/virtualMachines/k8s-agent-AAAAAAAA-0",
 | 
			
		||||
			expected:   CloudProviderName + ":///subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/myresourcegroupname/providers/Microsoft.Compute/virtualMachines/k8s-agent-AAAAAAAA-0",
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			desc:       "resource group name in VM resourceID should be converted",
 | 
			
		||||
			resourceID: "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/myResourceGroupName/providers/Microsoft.Compute/virtualMachines/k8s-agent-AAAAAAAA-0",
 | 
			
		||||
			expected:   "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/myresourcegroupname/providers/Microsoft.Compute/virtualMachines/k8s-agent-AAAAAAAA-0",
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			desc:       "resource group name in VMSS providerID should be converted",
 | 
			
		||||
			resourceID: CloudProviderName + ":///subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/myResourceGroupName/providers/Microsoft.Compute/virtualMachineScaleSets/myScaleSetName/virtualMachines/156",
 | 
			
		||||
			expected:   CloudProviderName + ":///subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/myresourcegroupname/providers/Microsoft.Compute/virtualMachineScaleSets/myScaleSetName/virtualMachines/156",
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			desc:       "resource group name in VMSS resourceID should be converted",
 | 
			
		||||
			resourceID: "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/myResourceGroupName/providers/Microsoft.Compute/virtualMachineScaleSets/myScaleSetName/virtualMachines/156",
 | 
			
		||||
			expected:   "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/myresourcegroupname/providers/Microsoft.Compute/virtualMachineScaleSets/myScaleSetName/virtualMachines/156",
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	for _, test := range tests {
 | 
			
		||||
		real, err := convertResourceGroupNameToLower(test.resourceID)
 | 
			
		||||
		if test.expectError {
 | 
			
		||||
			assert.NotNil(t, err, test.desc)
 | 
			
		||||
			continue
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		assert.Nil(t, err, test.desc)
 | 
			
		||||
		assert.Equal(t, test.expected, real, test.desc)
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestIsBackendPoolOnSameLB(t *testing.T) {
 | 
			
		||||
	tests := []struct {
 | 
			
		||||
		backendPoolID        string
 | 
			
		||||
		existingBackendPools []string
 | 
			
		||||
		expected             bool
 | 
			
		||||
		expectedLBName       string
 | 
			
		||||
		expectError          bool
 | 
			
		||||
	}{
 | 
			
		||||
		{
 | 
			
		||||
			backendPoolID: "/subscriptions/sub/resourceGroups/rg/providers/Microsoft.Network/loadBalancers/lb1/backendAddressPools/pool1",
 | 
			
		||||
			existingBackendPools: []string{
 | 
			
		||||
				"/subscriptions/sub/resourceGroups/rg/providers/Microsoft.Network/loadBalancers/lb1/backendAddressPools/pool2",
 | 
			
		||||
			},
 | 
			
		||||
			expected:       true,
 | 
			
		||||
			expectedLBName: "",
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			backendPoolID: "/subscriptions/sub/resourceGroups/rg/providers/Microsoft.Network/loadBalancers/lb1-internal/backendAddressPools/pool1",
 | 
			
		||||
			existingBackendPools: []string{
 | 
			
		||||
				"/subscriptions/sub/resourceGroups/rg/providers/Microsoft.Network/loadBalancers/lb1/backendAddressPools/pool2",
 | 
			
		||||
			},
 | 
			
		||||
			expected:       true,
 | 
			
		||||
			expectedLBName: "",
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			backendPoolID: "/subscriptions/sub/resourceGroups/rg/providers/Microsoft.Network/loadBalancers/lb1/backendAddressPools/pool1",
 | 
			
		||||
			existingBackendPools: []string{
 | 
			
		||||
				"/subscriptions/sub/resourceGroups/rg/providers/Microsoft.Network/loadBalancers/lb1-internal/backendAddressPools/pool2",
 | 
			
		||||
			},
 | 
			
		||||
			expected:       true,
 | 
			
		||||
			expectedLBName: "",
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			backendPoolID: "/subscriptions/sub/resourceGroups/rg/providers/Microsoft.Network/loadBalancers/lb1/backendAddressPools/pool1",
 | 
			
		||||
			existingBackendPools: []string{
 | 
			
		||||
				"/subscriptions/sub/resourceGroups/rg/providers/Microsoft.Network/loadBalancers/lb2/backendAddressPools/pool2",
 | 
			
		||||
			},
 | 
			
		||||
			expected:       false,
 | 
			
		||||
			expectedLBName: "lb2",
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			backendPoolID: "wrong-backendpool-id",
 | 
			
		||||
			existingBackendPools: []string{
 | 
			
		||||
				"/subscriptions/sub/resourceGroups/rg/providers/Microsoft.Network/loadBalancers/lb1/backendAddressPools/pool2",
 | 
			
		||||
			},
 | 
			
		||||
			expectError: true,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			backendPoolID: "/subscriptions/sub/resourceGroups/rg/providers/Microsoft.Network/loadBalancers/lb1/backendAddressPools/pool1",
 | 
			
		||||
			existingBackendPools: []string{
 | 
			
		||||
				"wrong-existing-backendpool-id",
 | 
			
		||||
			},
 | 
			
		||||
			expectError: true,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			backendPoolID: "wrong-backendpool-id",
 | 
			
		||||
			existingBackendPools: []string{
 | 
			
		||||
				"wrong-existing-backendpool-id",
 | 
			
		||||
			},
 | 
			
		||||
			expectError: true,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			backendPoolID: "/subscriptions/sub/resourceGroups/rg/providers/Microsoft.Network/loadBalancers/malformed-lb1-internal/backendAddressPools/pool1",
 | 
			
		||||
			existingBackendPools: []string{
 | 
			
		||||
				"/subscriptions/sub/resourceGroups/rg/providers/Microsoft.Network/loadBalancers/malformed-lb1-lanretni/backendAddressPools/pool2",
 | 
			
		||||
			},
 | 
			
		||||
			expected:       false,
 | 
			
		||||
			expectedLBName: "malformed-lb1-lanretni",
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	for _, test := range tests {
 | 
			
		||||
		isSameLB, lbName, err := isBackendPoolOnSameLB(test.backendPoolID, test.existingBackendPools)
 | 
			
		||||
		if test.expectError {
 | 
			
		||||
			assert.Error(t, err)
 | 
			
		||||
			continue
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		assert.Equal(t, test.expected, isSameLB)
 | 
			
		||||
		assert.Equal(t, test.expectedLBName, lbName)
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
@@ -1,131 +0,0 @@
 | 
			
		||||
//go:build !providerless
 | 
			
		||||
// +build !providerless
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
Copyright 2016 The Kubernetes Authors.
 | 
			
		||||
 | 
			
		||||
Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
you may not use this file except in compliance with the License.
 | 
			
		||||
You may obtain a copy of the License at
 | 
			
		||||
 | 
			
		||||
    http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
 | 
			
		||||
Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
See the License for the specific language governing permissions and
 | 
			
		||||
limitations under the License.
 | 
			
		||||
*/
 | 
			
		||||
 | 
			
		||||
package azure
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"context"
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"os"
 | 
			
		||||
	"strconv"
 | 
			
		||||
	"strings"
 | 
			
		||||
 | 
			
		||||
	"k8s.io/apimachinery/pkg/types"
 | 
			
		||||
	cloudprovider "k8s.io/cloud-provider"
 | 
			
		||||
	"k8s.io/klog/v2"
 | 
			
		||||
	azcache "k8s.io/legacy-cloud-providers/azure/cache"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// makeZone returns the zone value in format of <region>-<zone-id>.
 | 
			
		||||
func (az *Cloud) makeZone(location string, zoneID int) string {
 | 
			
		||||
	return fmt.Sprintf("%s-%d", strings.ToLower(location), zoneID)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// isAvailabilityZone returns true if the zone is in format of <region>-<zone-id>.
 | 
			
		||||
func (az *Cloud) isAvailabilityZone(zone string) bool {
 | 
			
		||||
	return strings.HasPrefix(zone, fmt.Sprintf("%s-", az.Location))
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// GetZoneID returns the ID of zone from node's zone label.
 | 
			
		||||
func (az *Cloud) GetZoneID(zoneLabel string) string {
 | 
			
		||||
	if !az.isAvailabilityZone(zoneLabel) {
 | 
			
		||||
		return ""
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return strings.TrimPrefix(zoneLabel, fmt.Sprintf("%s-", az.Location))
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// GetZone returns the Zone containing the current availability zone and locality region that the program is running in.
 | 
			
		||||
// If the node is not running with availability zones, then it will fall back to fault domain.
 | 
			
		||||
func (az *Cloud) GetZone(ctx context.Context) (cloudprovider.Zone, error) {
 | 
			
		||||
	if az.UseInstanceMetadata {
 | 
			
		||||
		metadata, err := az.metadata.GetMetadata(azcache.CacheReadTypeUnsafe)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			return cloudprovider.Zone{}, err
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		if metadata.Compute == nil {
 | 
			
		||||
			az.metadata.imsCache.Delete(metadataCacheKey)
 | 
			
		||||
			return cloudprovider.Zone{}, fmt.Errorf("failure of getting compute information from instance metadata")
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		zone := ""
 | 
			
		||||
		location := metadata.Compute.Location
 | 
			
		||||
		if metadata.Compute.Zone != "" {
 | 
			
		||||
			zoneID, err := strconv.Atoi(metadata.Compute.Zone)
 | 
			
		||||
			if err != nil {
 | 
			
		||||
				return cloudprovider.Zone{}, fmt.Errorf("failed to parse zone ID %q: %v", metadata.Compute.Zone, err)
 | 
			
		||||
			}
 | 
			
		||||
			zone = az.makeZone(location, zoneID)
 | 
			
		||||
		} else {
 | 
			
		||||
			klog.V(3).Infof("Availability zone is not enabled for the node, falling back to fault domain")
 | 
			
		||||
			zone = metadata.Compute.FaultDomain
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		return cloudprovider.Zone{
 | 
			
		||||
			FailureDomain: strings.ToLower(zone),
 | 
			
		||||
			Region:        strings.ToLower(location),
 | 
			
		||||
		}, nil
 | 
			
		||||
	}
 | 
			
		||||
	// if UseInstanceMetadata is false, get Zone name by calling ARM
 | 
			
		||||
	hostname, err := os.Hostname()
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return cloudprovider.Zone{}, fmt.Errorf("failure getting hostname from kernel")
 | 
			
		||||
	}
 | 
			
		||||
	return az.VMSet.GetZoneByNodeName(strings.ToLower(hostname))
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// GetZoneByProviderID implements Zones.GetZoneByProviderID
 | 
			
		||||
// This is particularly useful in external cloud providers where the kubelet
 | 
			
		||||
// does not initialize node data.
 | 
			
		||||
func (az *Cloud) GetZoneByProviderID(ctx context.Context, providerID string) (cloudprovider.Zone, error) {
 | 
			
		||||
	if providerID == "" {
 | 
			
		||||
		return cloudprovider.Zone{}, errNodeNotInitialized
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Returns nil for unmanaged nodes because azure cloud provider couldn't fetch information for them.
 | 
			
		||||
	if az.IsNodeUnmanagedByProviderID(providerID) {
 | 
			
		||||
		klog.V(2).Infof("GetZoneByProviderID: omitting unmanaged node %q", providerID)
 | 
			
		||||
		return cloudprovider.Zone{}, nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	nodeName, err := az.VMSet.GetNodeNameByProviderID(providerID)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return cloudprovider.Zone{}, err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return az.GetZoneByNodeName(ctx, nodeName)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// GetZoneByNodeName implements Zones.GetZoneByNodeName
 | 
			
		||||
// This is particularly useful in external cloud providers where the kubelet
 | 
			
		||||
// does not initialize node data.
 | 
			
		||||
func (az *Cloud) GetZoneByNodeName(ctx context.Context, nodeName types.NodeName) (cloudprovider.Zone, error) {
 | 
			
		||||
	// Returns "" for unmanaged nodes because azure cloud provider couldn't fetch information for them.
 | 
			
		||||
	unmanaged, err := az.IsNodeUnmanaged(string(nodeName))
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return cloudprovider.Zone{}, err
 | 
			
		||||
	}
 | 
			
		||||
	if unmanaged {
 | 
			
		||||
		klog.V(2).Infof("GetZoneByNodeName: omitting unmanaged node %q", nodeName)
 | 
			
		||||
		return cloudprovider.Zone{}, nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return az.VMSet.GetZoneByNodeName(string(nodeName))
 | 
			
		||||
}
 | 
			
		||||
@@ -1,245 +0,0 @@
 | 
			
		||||
//go:build !providerless
 | 
			
		||||
// +build !providerless
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
Copyright 2018 The Kubernetes Authors.
 | 
			
		||||
 | 
			
		||||
Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
you may not use this file except in compliance with the License.
 | 
			
		||||
You may obtain a copy of the License at
 | 
			
		||||
 | 
			
		||||
    http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
 | 
			
		||||
Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
See the License for the specific language governing permissions and
 | 
			
		||||
limitations under the License.
 | 
			
		||||
*/
 | 
			
		||||
 | 
			
		||||
package azure
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"context"
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"net"
 | 
			
		||||
	"net/http"
 | 
			
		||||
	"testing"
 | 
			
		||||
 | 
			
		||||
	"k8s.io/apimachinery/pkg/util/sets"
 | 
			
		||||
	cloudprovider "k8s.io/cloud-provider"
 | 
			
		||||
	"k8s.io/legacy-cloud-providers/azure/clients/vmclient/mockvmclient"
 | 
			
		||||
	"k8s.io/utils/pointer"
 | 
			
		||||
 | 
			
		||||
	"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-12-01/compute"
 | 
			
		||||
	"github.com/golang/mock/gomock"
 | 
			
		||||
	"github.com/stretchr/testify/assert"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
const (
 | 
			
		||||
	testAvailabilitySetNodeProviderID = "azure:///subscriptions/sub/resourceGroups/rg/providers/Microsoft.Compute/virtualMachines/vm-0"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
func TestIsAvailabilityZone(t *testing.T) {
 | 
			
		||||
	location := "eastus"
 | 
			
		||||
	az := &Cloud{
 | 
			
		||||
		Config: Config{
 | 
			
		||||
			Location: location,
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
	tests := []struct {
 | 
			
		||||
		desc     string
 | 
			
		||||
		zone     string
 | 
			
		||||
		expected bool
 | 
			
		||||
	}{
 | 
			
		||||
		{"empty string should return false", "", false},
 | 
			
		||||
		{"wrong format should return false", "123", false},
 | 
			
		||||
		{"wrong location should return false", "chinanorth-1", false},
 | 
			
		||||
		{"correct zone should return true", "eastus-1", true},
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	for _, test := range tests {
 | 
			
		||||
		actual := az.isAvailabilityZone(test.zone)
 | 
			
		||||
		if actual != test.expected {
 | 
			
		||||
			t.Errorf("test [%q] get unexpected result: %v != %v", test.desc, actual, test.expected)
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestGetZoneID(t *testing.T) {
 | 
			
		||||
	location := "eastus"
 | 
			
		||||
	az := &Cloud{
 | 
			
		||||
		Config: Config{
 | 
			
		||||
			Location: location,
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
	tests := []struct {
 | 
			
		||||
		desc     string
 | 
			
		||||
		zone     string
 | 
			
		||||
		expected string
 | 
			
		||||
	}{
 | 
			
		||||
		{"empty string should return empty string", "", ""},
 | 
			
		||||
		{"wrong format should return empty string", "123", ""},
 | 
			
		||||
		{"wrong location should return empty string", "chinanorth-1", ""},
 | 
			
		||||
		{"correct zone should return zone ID", "eastus-1", "1"},
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	for _, test := range tests {
 | 
			
		||||
		actual := az.GetZoneID(test.zone)
 | 
			
		||||
		if actual != test.expected {
 | 
			
		||||
			t.Errorf("test [%q] get unexpected result: %q != %q", test.desc, actual, test.expected)
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestGetZone(t *testing.T) {
 | 
			
		||||
	cloud := &Cloud{
 | 
			
		||||
		Config: Config{
 | 
			
		||||
			Location:            "eastus",
 | 
			
		||||
			UseInstanceMetadata: true,
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
	testcases := []struct {
 | 
			
		||||
		name        string
 | 
			
		||||
		zone        string
 | 
			
		||||
		location    string
 | 
			
		||||
		faultDomain string
 | 
			
		||||
		expected    string
 | 
			
		||||
		isNilResp   bool
 | 
			
		||||
		expectedErr error
 | 
			
		||||
	}{
 | 
			
		||||
		{
 | 
			
		||||
			name:     "GetZone should get real zone if only node's zone is set",
 | 
			
		||||
			zone:     "1",
 | 
			
		||||
			location: "eastus",
 | 
			
		||||
			expected: "eastus-1",
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			name:        "GetZone should get real zone if both node's zone and FD are set",
 | 
			
		||||
			zone:        "1",
 | 
			
		||||
			location:    "eastus",
 | 
			
		||||
			faultDomain: "99",
 | 
			
		||||
			expected:    "eastus-1",
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			name:        "GetZone should get faultDomain if node's zone isn't set",
 | 
			
		||||
			location:    "eastus",
 | 
			
		||||
			faultDomain: "99",
 | 
			
		||||
			expected:    "99",
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			name:     "GetZone should get availability zone in lower cases",
 | 
			
		||||
			location: "EastUS",
 | 
			
		||||
			zone:     "1",
 | 
			
		||||
			expected: "eastus-1",
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			name:        "GetZone should report an error if there is no `Compute` in the response",
 | 
			
		||||
			isNilResp:   true,
 | 
			
		||||
			expectedErr: fmt.Errorf("failure of getting compute information from instance metadata"),
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			name:        "GetZone should report an error if the zone is invalid",
 | 
			
		||||
			zone:        "a",
 | 
			
		||||
			location:    "eastus",
 | 
			
		||||
			faultDomain: "99",
 | 
			
		||||
			expected:    "",
 | 
			
		||||
			expectedErr: fmt.Errorf("failed to parse zone ID \"a\": strconv.Atoi: parsing \"a\": invalid syntax"),
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	for _, test := range testcases {
 | 
			
		||||
		listener, err := net.Listen("tcp", "127.0.0.1:0")
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			t.Errorf("Test [%s] unexpected error: %v", test.name, err)
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		respString := fmt.Sprintf(`{"compute":{"zone":"%s", "platformFaultDomain":"%s", "location":"%s"}}`, test.zone, test.faultDomain, test.location)
 | 
			
		||||
		if test.isNilResp {
 | 
			
		||||
			respString = "{}"
 | 
			
		||||
		}
 | 
			
		||||
		mux := http.NewServeMux()
 | 
			
		||||
		mux.Handle("/", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
 | 
			
		||||
			fmt.Fprint(w, respString)
 | 
			
		||||
		}))
 | 
			
		||||
		go func() {
 | 
			
		||||
			http.Serve(listener, mux)
 | 
			
		||||
		}()
 | 
			
		||||
		defer listener.Close()
 | 
			
		||||
 | 
			
		||||
		cloud.metadata, err = NewInstanceMetadataService("http://" + listener.Addr().String() + "/")
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			t.Errorf("Test [%s] unexpected error: %v", test.name, err)
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		zone, err := cloud.GetZone(context.Background())
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			if test.expectedErr == nil {
 | 
			
		||||
				t.Errorf("Test [%s] unexpected error: %v", test.name, err)
 | 
			
		||||
			} else {
 | 
			
		||||
				assert.Equal(t, test.expectedErr, err)
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
		if zone.FailureDomain != test.expected {
 | 
			
		||||
			t.Errorf("Test [%s] unexpected zone: %s, expected %q", test.name, zone.FailureDomain, test.expected)
 | 
			
		||||
		}
 | 
			
		||||
		if err == nil && zone.Region != cloud.Location {
 | 
			
		||||
			t.Errorf("Test [%s] unexpected region: %s, expected: %s", test.name, zone.Region, cloud.Location)
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestMakeZone(t *testing.T) {
 | 
			
		||||
	az := &Cloud{}
 | 
			
		||||
	zone := az.makeZone("EASTUS", 2)
 | 
			
		||||
	assert.Equal(t, "eastus-2", zone)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestGetZoneByProviderID(t *testing.T) {
 | 
			
		||||
	ctrl := gomock.NewController(t)
 | 
			
		||||
	defer ctrl.Finish()
 | 
			
		||||
 | 
			
		||||
	az := GetTestCloud(ctrl)
 | 
			
		||||
 | 
			
		||||
	zone, err := az.GetZoneByProviderID(context.Background(), "")
 | 
			
		||||
	assert.Equal(t, errNodeNotInitialized, err)
 | 
			
		||||
	assert.Equal(t, cloudprovider.Zone{}, zone)
 | 
			
		||||
 | 
			
		||||
	zone, err = az.GetZoneByProviderID(context.Background(), "invalid/id")
 | 
			
		||||
	assert.NoError(t, err)
 | 
			
		||||
	assert.Equal(t, cloudprovider.Zone{}, zone)
 | 
			
		||||
 | 
			
		||||
	mockVMClient := az.VirtualMachinesClient.(*mockvmclient.MockInterface)
 | 
			
		||||
	mockVMClient.EXPECT().Get(gomock.Any(), az.ResourceGroup, "vm-0", gomock.Any()).Return(compute.VirtualMachine{
 | 
			
		||||
		Zones:    &[]string{"1"},
 | 
			
		||||
		Location: pointer.String("eastus"),
 | 
			
		||||
	}, nil)
 | 
			
		||||
	zone, err = az.GetZoneByProviderID(context.Background(), testAvailabilitySetNodeProviderID)
 | 
			
		||||
	assert.NoError(t, err)
 | 
			
		||||
	assert.Equal(t, cloudprovider.Zone{
 | 
			
		||||
		FailureDomain: "eastus-1",
 | 
			
		||||
		Region:        "eastus",
 | 
			
		||||
	}, zone)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestAvailabilitySetGetZoneByNodeName(t *testing.T) {
 | 
			
		||||
	az := &Cloud{
 | 
			
		||||
		unmanagedNodes: sets.String{"vm-0": sets.Empty{}},
 | 
			
		||||
		nodeInformerSynced: func() bool {
 | 
			
		||||
			return true
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
	zone, err := az.GetZoneByNodeName(context.Background(), "vm-0")
 | 
			
		||||
	assert.NoError(t, err)
 | 
			
		||||
	assert.Equal(t, cloudprovider.Zone{}, zone)
 | 
			
		||||
 | 
			
		||||
	az = &Cloud{
 | 
			
		||||
		unmanagedNodes: sets.String{"vm-0": sets.Empty{}},
 | 
			
		||||
		nodeInformerSynced: func() bool {
 | 
			
		||||
			return false
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
	zone, err = az.GetZoneByNodeName(context.Background(), "vm-0")
 | 
			
		||||
	assert.Equal(t, fmt.Errorf("node informer is not synced when trying to GetUnmanagedNodes"), err)
 | 
			
		||||
	assert.Equal(t, cloudprovider.Zone{}, zone)
 | 
			
		||||
}
 | 
			
		||||
@@ -1,178 +0,0 @@
 | 
			
		||||
//go:build !providerless
 | 
			
		||||
// +build !providerless
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
Copyright 2017 The Kubernetes Authors.
 | 
			
		||||
 | 
			
		||||
Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
you may not use this file except in compliance with the License.
 | 
			
		||||
You may obtain a copy of the License at
 | 
			
		||||
 | 
			
		||||
    http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
 | 
			
		||||
Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
See the License for the specific language governing permissions and
 | 
			
		||||
limitations under the License.
 | 
			
		||||
*/
 | 
			
		||||
 | 
			
		||||
package cache
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"sync"
 | 
			
		||||
	"time"
 | 
			
		||||
 | 
			
		||||
	"k8s.io/client-go/tools/cache"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// AzureCacheReadType defines the read type for cache data
 | 
			
		||||
type AzureCacheReadType int
 | 
			
		||||
 | 
			
		||||
const (
 | 
			
		||||
	// CacheReadTypeDefault returns data from cache if cache entry not expired
 | 
			
		||||
	// if cache entry expired, then it will refetch the data using getter
 | 
			
		||||
	// save the entry in cache and then return
 | 
			
		||||
	CacheReadTypeDefault AzureCacheReadType = iota
 | 
			
		||||
	// CacheReadTypeUnsafe returns data from cache even if the cache entry is
 | 
			
		||||
	// active/expired. If entry doesn't exist in cache, then data is fetched
 | 
			
		||||
	// using getter, saved in cache and returned
 | 
			
		||||
	CacheReadTypeUnsafe
 | 
			
		||||
	// CacheReadTypeForceRefresh force refreshes the cache even if the cache entry
 | 
			
		||||
	// is not expired
 | 
			
		||||
	CacheReadTypeForceRefresh
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// GetFunc defines a getter function for timedCache.
 | 
			
		||||
type GetFunc func(key string) (interface{}, error)
 | 
			
		||||
 | 
			
		||||
// AzureCacheEntry is the internal structure stores inside TTLStore.
 | 
			
		||||
type AzureCacheEntry struct {
 | 
			
		||||
	Key  string
 | 
			
		||||
	Data interface{}
 | 
			
		||||
 | 
			
		||||
	// The lock to ensure not updating same entry simultaneously.
 | 
			
		||||
	Lock sync.Mutex
 | 
			
		||||
	// time when entry was fetched and created
 | 
			
		||||
	CreatedOn time.Time
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// cacheKeyFunc defines the key function required in TTLStore.
 | 
			
		||||
func cacheKeyFunc(obj interface{}) (string, error) {
 | 
			
		||||
	return obj.(*AzureCacheEntry).Key, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// TimedCache is a cache with TTL.
 | 
			
		||||
type TimedCache struct {
 | 
			
		||||
	Store  cache.Store
 | 
			
		||||
	Lock   sync.Mutex
 | 
			
		||||
	Getter GetFunc
 | 
			
		||||
	TTL    time.Duration
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// NewTimedcache creates a new TimedCache.
 | 
			
		||||
func NewTimedcache(ttl time.Duration, getter GetFunc) (*TimedCache, error) {
 | 
			
		||||
	if getter == nil {
 | 
			
		||||
		return nil, fmt.Errorf("getter is not provided")
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return &TimedCache{
 | 
			
		||||
		Getter: getter,
 | 
			
		||||
		// switch to using NewStore instead of NewTTLStore so that we can
 | 
			
		||||
		// reuse entries for calls that are fine with reading expired/stalled data.
 | 
			
		||||
		// with NewTTLStore, entries are not returned if they have already expired.
 | 
			
		||||
		Store: cache.NewStore(cacheKeyFunc),
 | 
			
		||||
		TTL:   ttl,
 | 
			
		||||
	}, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// getInternal returns AzureCacheEntry by key. If the key is not cached yet,
 | 
			
		||||
// it returns a AzureCacheEntry with nil data.
 | 
			
		||||
func (t *TimedCache) getInternal(key string) (*AzureCacheEntry, error) {
 | 
			
		||||
	entry, exists, err := t.Store.GetByKey(key)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return nil, err
 | 
			
		||||
	}
 | 
			
		||||
	// if entry exists, return the entry
 | 
			
		||||
	if exists {
 | 
			
		||||
		return entry.(*AzureCacheEntry), nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// lock here to ensure if entry doesn't exist, we add a new entry
 | 
			
		||||
	// avoiding overwrites
 | 
			
		||||
	t.Lock.Lock()
 | 
			
		||||
	defer t.Lock.Unlock()
 | 
			
		||||
 | 
			
		||||
	// Another goroutine might have written the same key.
 | 
			
		||||
	entry, exists, err = t.Store.GetByKey(key)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return nil, err
 | 
			
		||||
	}
 | 
			
		||||
	if exists {
 | 
			
		||||
		return entry.(*AzureCacheEntry), nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Still not found, add new entry with nil data.
 | 
			
		||||
	// Note the data will be filled later by getter.
 | 
			
		||||
	newEntry := &AzureCacheEntry{
 | 
			
		||||
		Key:  key,
 | 
			
		||||
		Data: nil,
 | 
			
		||||
	}
 | 
			
		||||
	t.Store.Add(newEntry)
 | 
			
		||||
	return newEntry, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Get returns the requested item by key.
 | 
			
		||||
func (t *TimedCache) Get(key string, crt AzureCacheReadType) (interface{}, error) {
 | 
			
		||||
	entry, err := t.getInternal(key)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return nil, err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	entry.Lock.Lock()
 | 
			
		||||
	defer entry.Lock.Unlock()
 | 
			
		||||
 | 
			
		||||
	// entry exists and if cache is not force refreshed
 | 
			
		||||
	if entry.Data != nil && crt != CacheReadTypeForceRefresh {
 | 
			
		||||
		// allow unsafe read, so return data even if expired
 | 
			
		||||
		if crt == CacheReadTypeUnsafe {
 | 
			
		||||
			return entry.Data, nil
 | 
			
		||||
		}
 | 
			
		||||
		// if cached data is not expired, return cached data
 | 
			
		||||
		if crt == CacheReadTypeDefault && time.Since(entry.CreatedOn) < t.TTL {
 | 
			
		||||
			return entry.Data, nil
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	// Data is not cached yet, cache data is expired or requested force refresh
 | 
			
		||||
	// cache it by getter. entry is locked before getting to ensure concurrent
 | 
			
		||||
	// gets don't result in multiple ARM calls.
 | 
			
		||||
	data, err := t.Getter(key)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return nil, err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// set the data in cache and also set the last update time
 | 
			
		||||
	// to now as the data was recently fetched
 | 
			
		||||
	entry.Data = data
 | 
			
		||||
	entry.CreatedOn = time.Now().UTC()
 | 
			
		||||
 | 
			
		||||
	return entry.Data, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Delete removes an item from the cache.
 | 
			
		||||
func (t *TimedCache) Delete(key string) error {
 | 
			
		||||
	return t.Store.Delete(&AzureCacheEntry{
 | 
			
		||||
		Key: key,
 | 
			
		||||
	})
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Set sets the data cache for the key.
 | 
			
		||||
// It is only used for testing.
 | 
			
		||||
func (t *TimedCache) Set(key string, data interface{}) {
 | 
			
		||||
	t.Store.Add(&AzureCacheEntry{
 | 
			
		||||
		Key:       key,
 | 
			
		||||
		Data:      data,
 | 
			
		||||
		CreatedOn: time.Now().UTC(),
 | 
			
		||||
	})
 | 
			
		||||
}
 | 
			
		||||
@@ -1,229 +0,0 @@
 | 
			
		||||
//go:build !providerless
 | 
			
		||||
// +build !providerless
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
Copyright 2017 The Kubernetes Authors.
 | 
			
		||||
 | 
			
		||||
Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
you may not use this file except in compliance with the License.
 | 
			
		||||
You may obtain a copy of the License at
 | 
			
		||||
 | 
			
		||||
    http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
 | 
			
		||||
Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
See the License for the specific language governing permissions and
 | 
			
		||||
limitations under the License.
 | 
			
		||||
*/
 | 
			
		||||
 | 
			
		||||
package cache
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"sync"
 | 
			
		||||
	"testing"
 | 
			
		||||
	"time"
 | 
			
		||||
 | 
			
		||||
	"github.com/stretchr/testify/assert"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
var (
 | 
			
		||||
	fakeCacheTTL = 2 * time.Second
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
type fakeDataObj struct{}
 | 
			
		||||
 | 
			
		||||
type fakeDataSource struct {
 | 
			
		||||
	called int
 | 
			
		||||
	data   map[string]*fakeDataObj
 | 
			
		||||
	lock   sync.Mutex
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (fake *fakeDataSource) get(key string) (interface{}, error) {
 | 
			
		||||
	fake.lock.Lock()
 | 
			
		||||
	defer fake.lock.Unlock()
 | 
			
		||||
 | 
			
		||||
	fake.called = fake.called + 1
 | 
			
		||||
	if v, ok := fake.data[key]; ok {
 | 
			
		||||
		return v, nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return nil, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (fake *fakeDataSource) set(data map[string]*fakeDataObj) {
 | 
			
		||||
	fake.lock.Lock()
 | 
			
		||||
	defer fake.lock.Unlock()
 | 
			
		||||
 | 
			
		||||
	fake.data = data
 | 
			
		||||
	fake.called = 0
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func newFakeCache(t *testing.T) (*fakeDataSource, *TimedCache) {
 | 
			
		||||
	dataSource := &fakeDataSource{
 | 
			
		||||
		data: make(map[string]*fakeDataObj),
 | 
			
		||||
	}
 | 
			
		||||
	getter := dataSource.get
 | 
			
		||||
	cache, err := NewTimedcache(fakeCacheTTL, getter)
 | 
			
		||||
	assert.NoError(t, err)
 | 
			
		||||
	return dataSource, cache
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestCacheGet(t *testing.T) {
 | 
			
		||||
	val := &fakeDataObj{}
 | 
			
		||||
	cases := []struct {
 | 
			
		||||
		name     string
 | 
			
		||||
		data     map[string]*fakeDataObj
 | 
			
		||||
		key      string
 | 
			
		||||
		expected interface{}
 | 
			
		||||
	}{
 | 
			
		||||
		{
 | 
			
		||||
			name:     "cache should return nil for empty data source",
 | 
			
		||||
			key:      "key1",
 | 
			
		||||
			expected: nil,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			name:     "cache should return nil for non exist key",
 | 
			
		||||
			data:     map[string]*fakeDataObj{"key2": val},
 | 
			
		||||
			key:      "key1",
 | 
			
		||||
			expected: nil,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			name:     "cache should return data for existing key",
 | 
			
		||||
			data:     map[string]*fakeDataObj{"key1": val},
 | 
			
		||||
			key:      "key1",
 | 
			
		||||
			expected: val,
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	for _, c := range cases {
 | 
			
		||||
		dataSource, cache := newFakeCache(t)
 | 
			
		||||
		dataSource.set(c.data)
 | 
			
		||||
		val, err := cache.Get(c.key, CacheReadTypeDefault)
 | 
			
		||||
		assert.NoError(t, err, c.name)
 | 
			
		||||
		assert.Equal(t, c.expected, val, c.name)
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestCacheGetError(t *testing.T) {
 | 
			
		||||
	getError := fmt.Errorf("getError")
 | 
			
		||||
	getter := func(key string) (interface{}, error) {
 | 
			
		||||
		return nil, getError
 | 
			
		||||
	}
 | 
			
		||||
	cache, err := NewTimedcache(fakeCacheTTL, getter)
 | 
			
		||||
	assert.NoError(t, err)
 | 
			
		||||
 | 
			
		||||
	val, err := cache.Get("key", CacheReadTypeDefault)
 | 
			
		||||
	assert.Error(t, err)
 | 
			
		||||
	assert.Equal(t, getError, err)
 | 
			
		||||
	assert.Nil(t, val)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestCacheDelete(t *testing.T) {
 | 
			
		||||
	key := "key1"
 | 
			
		||||
	val := &fakeDataObj{}
 | 
			
		||||
	data := map[string]*fakeDataObj{
 | 
			
		||||
		key: val,
 | 
			
		||||
	}
 | 
			
		||||
	dataSource, cache := newFakeCache(t)
 | 
			
		||||
	dataSource.set(data)
 | 
			
		||||
 | 
			
		||||
	v, err := cache.Get(key, CacheReadTypeDefault)
 | 
			
		||||
	assert.NoError(t, err)
 | 
			
		||||
	assert.Equal(t, val, v, "cache should get correct data")
 | 
			
		||||
 | 
			
		||||
	dataSource.set(nil)
 | 
			
		||||
	cache.Delete(key)
 | 
			
		||||
	v, err = cache.Get(key, CacheReadTypeDefault)
 | 
			
		||||
	assert.NoError(t, err)
 | 
			
		||||
	assert.Equal(t, 1, dataSource.called)
 | 
			
		||||
	assert.Equal(t, nil, v, "cache should get nil after data is removed")
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestCacheExpired(t *testing.T) {
 | 
			
		||||
	key := "key1"
 | 
			
		||||
	val := &fakeDataObj{}
 | 
			
		||||
	data := map[string]*fakeDataObj{
 | 
			
		||||
		key: val,
 | 
			
		||||
	}
 | 
			
		||||
	dataSource, cache := newFakeCache(t)
 | 
			
		||||
	dataSource.set(data)
 | 
			
		||||
 | 
			
		||||
	v, err := cache.Get(key, CacheReadTypeDefault)
 | 
			
		||||
	assert.NoError(t, err)
 | 
			
		||||
	assert.Equal(t, 1, dataSource.called)
 | 
			
		||||
	assert.Equal(t, val, v, "cache should get correct data")
 | 
			
		||||
 | 
			
		||||
	time.Sleep(fakeCacheTTL)
 | 
			
		||||
	v, err = cache.Get(key, CacheReadTypeDefault)
 | 
			
		||||
	assert.NoError(t, err)
 | 
			
		||||
	assert.Equal(t, 2, dataSource.called)
 | 
			
		||||
	assert.Equal(t, val, v, "cache should get correct data even after expired")
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestCacheAllowUnsafeRead(t *testing.T) {
 | 
			
		||||
	key := "key1"
 | 
			
		||||
	val := &fakeDataObj{}
 | 
			
		||||
	data := map[string]*fakeDataObj{
 | 
			
		||||
		key: val,
 | 
			
		||||
	}
 | 
			
		||||
	dataSource, cache := newFakeCache(t)
 | 
			
		||||
	dataSource.set(data)
 | 
			
		||||
 | 
			
		||||
	v, err := cache.Get(key, CacheReadTypeDefault)
 | 
			
		||||
	assert.NoError(t, err)
 | 
			
		||||
	assert.Equal(t, 1, dataSource.called)
 | 
			
		||||
	assert.Equal(t, val, v, "cache should get correct data")
 | 
			
		||||
 | 
			
		||||
	time.Sleep(fakeCacheTTL)
 | 
			
		||||
	v, err = cache.Get(key, CacheReadTypeUnsafe)
 | 
			
		||||
	assert.NoError(t, err)
 | 
			
		||||
	assert.Equal(t, 1, dataSource.called)
 | 
			
		||||
	assert.Equal(t, val, v, "cache should return expired as allow unsafe read is allowed")
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestCacheNoConcurrentGet(t *testing.T) {
 | 
			
		||||
	key := "key1"
 | 
			
		||||
	val := &fakeDataObj{}
 | 
			
		||||
	data := map[string]*fakeDataObj{
 | 
			
		||||
		key: val,
 | 
			
		||||
	}
 | 
			
		||||
	dataSource, cache := newFakeCache(t)
 | 
			
		||||
	dataSource.set(data)
 | 
			
		||||
 | 
			
		||||
	time.Sleep(fakeCacheTTL)
 | 
			
		||||
	var wg sync.WaitGroup
 | 
			
		||||
	for i := 0; i < 5; i++ {
 | 
			
		||||
		wg.Add(1)
 | 
			
		||||
		go func() {
 | 
			
		||||
			defer wg.Done()
 | 
			
		||||
			_, _ = cache.Get(key, CacheReadTypeDefault)
 | 
			
		||||
		}()
 | 
			
		||||
	}
 | 
			
		||||
	v, err := cache.Get(key, CacheReadTypeDefault)
 | 
			
		||||
	wg.Wait()
 | 
			
		||||
	assert.NoError(t, err)
 | 
			
		||||
	assert.Equal(t, 1, dataSource.called)
 | 
			
		||||
	assert.Equal(t, val, v, "cache should get correct data")
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestCacheForceRefresh(t *testing.T) {
 | 
			
		||||
	key := "key1"
 | 
			
		||||
	val := &fakeDataObj{}
 | 
			
		||||
	data := map[string]*fakeDataObj{
 | 
			
		||||
		key: val,
 | 
			
		||||
	}
 | 
			
		||||
	dataSource, cache := newFakeCache(t)
 | 
			
		||||
	dataSource.set(data)
 | 
			
		||||
 | 
			
		||||
	v, err := cache.Get(key, CacheReadTypeDefault)
 | 
			
		||||
	assert.NoError(t, err)
 | 
			
		||||
	assert.Equal(t, 1, dataSource.called)
 | 
			
		||||
	assert.Equal(t, val, v, "cache should get correct data")
 | 
			
		||||
 | 
			
		||||
	v, err = cache.Get(key, CacheReadTypeForceRefresh)
 | 
			
		||||
	assert.NoError(t, err)
 | 
			
		||||
	assert.Equal(t, 2, dataSource.called)
 | 
			
		||||
	assert.Equal(t, val, v, "should refetch unexpired data as forced refresh")
 | 
			
		||||
}
 | 
			
		||||
@@ -1,18 +0,0 @@
 | 
			
		||||
/*
 | 
			
		||||
Copyright 2019 The Kubernetes Authors.
 | 
			
		||||
 | 
			
		||||
Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
you may not use this file except in compliance with the License.
 | 
			
		||||
You may obtain a copy of the License at
 | 
			
		||||
 | 
			
		||||
    http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
 | 
			
		||||
Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
See the License for the specific language governing permissions and
 | 
			
		||||
limitations under the License.
 | 
			
		||||
*/
 | 
			
		||||
 | 
			
		||||
// Package cache is an implementation of Azure caches.
 | 
			
		||||
package cache // import "k8s.io/legacy-cloud-providers/azure/cache"
 | 
			
		||||
@@ -1,701 +0,0 @@
 | 
			
		||||
//go:build !providerless
 | 
			
		||||
// +build !providerless
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
Copyright 2019 The Kubernetes Authors.
 | 
			
		||||
 | 
			
		||||
Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
you may not use this file except in compliance with the License.
 | 
			
		||||
You may obtain a copy of the License at
 | 
			
		||||
 | 
			
		||||
    http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
 | 
			
		||||
Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
See the License for the specific language governing permissions and
 | 
			
		||||
limitations under the License.
 | 
			
		||||
*/
 | 
			
		||||
 | 
			
		||||
package armclient
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"bytes"
 | 
			
		||||
	"context"
 | 
			
		||||
	"encoding/json"
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"io/ioutil"
 | 
			
		||||
	"net/http"
 | 
			
		||||
	"strings"
 | 
			
		||||
	"sync"
 | 
			
		||||
	"time"
 | 
			
		||||
	"unicode"
 | 
			
		||||
 | 
			
		||||
	"github.com/Azure/go-autorest/autorest"
 | 
			
		||||
	"github.com/Azure/go-autorest/autorest/azure"
 | 
			
		||||
 | 
			
		||||
	"k8s.io/client-go/pkg/version"
 | 
			
		||||
	"k8s.io/klog/v2"
 | 
			
		||||
	"k8s.io/legacy-cloud-providers/azure/retry"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
var _ Interface = &Client{}
 | 
			
		||||
 | 
			
		||||
// Client implements ARM client Interface.
 | 
			
		||||
type Client struct {
 | 
			
		||||
	client  autorest.Client
 | 
			
		||||
	backoff *retry.Backoff
 | 
			
		||||
 | 
			
		||||
	baseURI      string
 | 
			
		||||
	apiVersion   string
 | 
			
		||||
	clientRegion string
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// New creates a ARM client
 | 
			
		||||
func New(authorizer autorest.Authorizer, baseURI, userAgent, apiVersion, clientRegion string, clientBackoff *retry.Backoff) *Client {
 | 
			
		||||
	restClient := autorest.NewClientWithUserAgent(userAgent)
 | 
			
		||||
	restClient.PollingDelay = 5 * time.Second
 | 
			
		||||
	restClient.RetryAttempts = 3
 | 
			
		||||
	restClient.RetryDuration = time.Second * 1
 | 
			
		||||
	restClient.Authorizer = authorizer
 | 
			
		||||
 | 
			
		||||
	if userAgent == "" {
 | 
			
		||||
		restClient.UserAgent = GetUserAgent(restClient)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	backoff := clientBackoff
 | 
			
		||||
	if backoff == nil {
 | 
			
		||||
		backoff = &retry.Backoff{}
 | 
			
		||||
	}
 | 
			
		||||
	if backoff.Steps == 0 {
 | 
			
		||||
		// 1 steps means no retry.
 | 
			
		||||
		backoff.Steps = 1
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return &Client{
 | 
			
		||||
		client:       restClient,
 | 
			
		||||
		baseURI:      baseURI,
 | 
			
		||||
		backoff:      backoff,
 | 
			
		||||
		apiVersion:   apiVersion,
 | 
			
		||||
		clientRegion: NormalizeAzureRegion(clientRegion),
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// GetUserAgent gets the autorest client with a user agent that
 | 
			
		||||
// includes "kubernetes" and the full kubernetes git version string
 | 
			
		||||
// example:
 | 
			
		||||
// Azure-SDK-for-Go/7.0.1 arm-network/2016-09-01; kubernetes-cloudprovider/v1.17.0;
 | 
			
		||||
func GetUserAgent(client autorest.Client) string {
 | 
			
		||||
	k8sVersion := version.Get().GitVersion
 | 
			
		||||
	return fmt.Sprintf("%s; kubernetes-cloudprovider/%s", client.UserAgent, k8sVersion)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// NormalizeAzureRegion returns a normalized Azure region with white spaces removed and converted to lower case
 | 
			
		||||
func NormalizeAzureRegion(name string) string {
 | 
			
		||||
	region := ""
 | 
			
		||||
	for _, runeValue := range name {
 | 
			
		||||
		if !unicode.IsSpace(runeValue) {
 | 
			
		||||
			region += string(runeValue)
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	return strings.ToLower(region)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// sendRequest sends a http request to ARM service.
 | 
			
		||||
// Although Azure SDK supports retries per https://github.com/azure/azure-sdk-for-go#request-retry-policy, we
 | 
			
		||||
// disable it since we want to fully control the retry policies.
 | 
			
		||||
func (c *Client) sendRequest(ctx context.Context, request *http.Request) (*http.Response, *retry.Error) {
 | 
			
		||||
	sendBackoff := *c.backoff
 | 
			
		||||
	response, err := autorest.SendWithSender(
 | 
			
		||||
		c.client,
 | 
			
		||||
		request,
 | 
			
		||||
		retry.DoExponentialBackoffRetry(&sendBackoff),
 | 
			
		||||
	)
 | 
			
		||||
 | 
			
		||||
	if response == nil && err == nil {
 | 
			
		||||
		return response, retry.NewError(false, fmt.Errorf("Empty response and no HTTP code"))
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return response, retry.GetError(response, err)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Send sends a http request to ARM service with possible retry to regional ARM endpoint.
 | 
			
		||||
func (c *Client) Send(ctx context.Context, request *http.Request) (*http.Response, *retry.Error) {
 | 
			
		||||
	response, rerr := c.sendRequest(ctx, request)
 | 
			
		||||
	if rerr != nil {
 | 
			
		||||
		return response, rerr
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if response.StatusCode != http.StatusNotFound || c.clientRegion == "" {
 | 
			
		||||
		return response, rerr
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	bodyBytes, _ := ioutil.ReadAll(response.Body)
 | 
			
		||||
	defer func() {
 | 
			
		||||
		response.Body = ioutil.NopCloser(bytes.NewBuffer(bodyBytes))
 | 
			
		||||
	}()
 | 
			
		||||
 | 
			
		||||
	bodyString := string(bodyBytes)
 | 
			
		||||
	klog.V(5).Infof("Send.sendRequest original error message: %s", bodyString)
 | 
			
		||||
 | 
			
		||||
	// Hack: retry the regional ARM endpoint in case of ARM traffic split and arm resource group replication is too slow
 | 
			
		||||
	var body map[string]interface{}
 | 
			
		||||
	if e := json.Unmarshal(bodyBytes, &body); e != nil {
 | 
			
		||||
		klog.V(5).Infof("Send.sendRequest: error in parsing response body string: %s, Skip retrying regional host", e)
 | 
			
		||||
		return response, rerr
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if err, ok := body["error"].(map[string]interface{}); !ok ||
 | 
			
		||||
		err["code"] == nil ||
 | 
			
		||||
		!strings.EqualFold(err["code"].(string), "ResourceGroupNotFound") {
 | 
			
		||||
		klog.V(5).Infof("Send.sendRequest: response body does not contain ResourceGroupNotFound error code. Skip retrying regional host")
 | 
			
		||||
		return response, rerr
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	currentHost := request.URL.Host
 | 
			
		||||
	if request.Host != "" {
 | 
			
		||||
		currentHost = request.Host
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if strings.HasPrefix(strings.ToLower(currentHost), c.clientRegion) {
 | 
			
		||||
		klog.V(5).Infof("Send.sendRequest: current host %s is regional host. Skip retrying regional host.", currentHost)
 | 
			
		||||
		return response, rerr
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	request.Host = fmt.Sprintf("%s.%s", c.clientRegion, strings.ToLower(currentHost))
 | 
			
		||||
	klog.V(5).Infof("Send.sendRegionalRequest on ResourceGroupNotFound error. Retrying regional host: %s", request.Host)
 | 
			
		||||
	regionalResponse, regionalError := c.sendRequest(ctx, request)
 | 
			
		||||
 | 
			
		||||
	// only use the result if the regional request actually goes through and returns 2xx status code, for two reasons:
 | 
			
		||||
	// 1. the retry on regional ARM host approach is a hack.
 | 
			
		||||
	// 2. the concatenated regional uri could be wrong as the rule is not officially declared by ARM.
 | 
			
		||||
	if regionalResponse == nil || regionalResponse.StatusCode > 299 {
 | 
			
		||||
		regionalErrStr := ""
 | 
			
		||||
		if regionalError != nil {
 | 
			
		||||
			regionalErrStr = regionalError.Error().Error()
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		klog.V(5).Infof("Send.sendRegionalRequest failed to get response from regional host, error: '%s'. Ignoring the result.", regionalErrStr)
 | 
			
		||||
		return response, rerr
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return regionalResponse, regionalError
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// PreparePutRequest prepares put request
 | 
			
		||||
func (c *Client) PreparePutRequest(ctx context.Context, decorators ...autorest.PrepareDecorator) (*http.Request, error) {
 | 
			
		||||
	decorators = append(
 | 
			
		||||
		[]autorest.PrepareDecorator{
 | 
			
		||||
			autorest.AsContentType("application/json; charset=utf-8"),
 | 
			
		||||
			autorest.AsPut(),
 | 
			
		||||
			autorest.WithBaseURL(c.baseURI)},
 | 
			
		||||
		decorators...)
 | 
			
		||||
	return c.prepareRequest(ctx, decorators...)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// PreparePatchRequest prepares patch request
 | 
			
		||||
func (c *Client) PreparePatchRequest(ctx context.Context, decorators ...autorest.PrepareDecorator) (*http.Request, error) {
 | 
			
		||||
	decorators = append(
 | 
			
		||||
		[]autorest.PrepareDecorator{
 | 
			
		||||
			autorest.AsContentType("application/json; charset=utf-8"),
 | 
			
		||||
			autorest.AsPatch(),
 | 
			
		||||
			autorest.WithBaseURL(c.baseURI)},
 | 
			
		||||
		decorators...)
 | 
			
		||||
	return c.prepareRequest(ctx, decorators...)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// PreparePostRequest prepares post request
 | 
			
		||||
func (c *Client) PreparePostRequest(ctx context.Context, decorators ...autorest.PrepareDecorator) (*http.Request, error) {
 | 
			
		||||
	decorators = append(
 | 
			
		||||
		[]autorest.PrepareDecorator{
 | 
			
		||||
			autorest.AsContentType("application/json; charset=utf-8"),
 | 
			
		||||
			autorest.AsPost(),
 | 
			
		||||
			autorest.WithBaseURL(c.baseURI)},
 | 
			
		||||
		decorators...)
 | 
			
		||||
	return c.prepareRequest(ctx, decorators...)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// PrepareGetRequest prepares get request
 | 
			
		||||
func (c *Client) PrepareGetRequest(ctx context.Context, decorators ...autorest.PrepareDecorator) (*http.Request, error) {
 | 
			
		||||
	decorators = append(
 | 
			
		||||
		[]autorest.PrepareDecorator{
 | 
			
		||||
			autorest.AsGet(),
 | 
			
		||||
			autorest.WithBaseURL(c.baseURI)},
 | 
			
		||||
		decorators...)
 | 
			
		||||
	return c.prepareRequest(ctx, decorators...)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// PrepareDeleteRequest preparse delete request
 | 
			
		||||
func (c *Client) PrepareDeleteRequest(ctx context.Context, decorators ...autorest.PrepareDecorator) (*http.Request, error) {
 | 
			
		||||
	decorators = append(
 | 
			
		||||
		[]autorest.PrepareDecorator{
 | 
			
		||||
			autorest.AsDelete(),
 | 
			
		||||
			autorest.WithBaseURL(c.baseURI)},
 | 
			
		||||
		decorators...)
 | 
			
		||||
	return c.prepareRequest(ctx, decorators...)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// PrepareHeadRequest prepares head request
 | 
			
		||||
func (c *Client) PrepareHeadRequest(ctx context.Context, decorators ...autorest.PrepareDecorator) (*http.Request, error) {
 | 
			
		||||
	decorators = append(
 | 
			
		||||
		[]autorest.PrepareDecorator{
 | 
			
		||||
			autorest.AsHead(),
 | 
			
		||||
			autorest.WithBaseURL(c.baseURI)},
 | 
			
		||||
		decorators...)
 | 
			
		||||
	return c.prepareRequest(ctx, decorators...)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// WaitForAsyncOperationCompletion waits for an operation completion
 | 
			
		||||
func (c *Client) WaitForAsyncOperationCompletion(ctx context.Context, future *azure.Future, asyncOperationName string) error {
 | 
			
		||||
	err := future.WaitForCompletionRef(ctx, c.client)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		klog.V(5).Infof("Received error in WaitForCompletionRef: '%v'", err)
 | 
			
		||||
		return err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	var done bool
 | 
			
		||||
	done, err = future.DoneWithContext(ctx, c.client)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		klog.V(5).Infof("Received error in DoneWithContext: '%v'", err)
 | 
			
		||||
		return autorest.NewErrorWithError(err, asyncOperationName, "Result", future.Response(), "Polling failure")
 | 
			
		||||
	}
 | 
			
		||||
	if !done {
 | 
			
		||||
		return azure.NewAsyncOpIncompleteError(asyncOperationName)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// WaitForAsyncOperationResult waits for an operation result.
 | 
			
		||||
func (c *Client) WaitForAsyncOperationResult(ctx context.Context, future *azure.Future, asyncOperationName string) (*http.Response, error) {
 | 
			
		||||
	err := c.WaitForAsyncOperationCompletion(ctx, future, asyncOperationName)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		klog.V(5).Infof("Received error in WaitForAsyncOperationCompletion: '%v'", err)
 | 
			
		||||
		return nil, err
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	sendBackoff := *c.backoff
 | 
			
		||||
	sender := autorest.DecorateSender(
 | 
			
		||||
		c.client,
 | 
			
		||||
		retry.DoExponentialBackoffRetry(&sendBackoff),
 | 
			
		||||
	)
 | 
			
		||||
	return future.GetResult(sender)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// SendAsync send a request and return a future object representing the async result as well as the origin http response
 | 
			
		||||
func (c *Client) SendAsync(ctx context.Context, request *http.Request) (*azure.Future, *http.Response, *retry.Error) {
 | 
			
		||||
	asyncResponse, rerr := c.Send(ctx, request)
 | 
			
		||||
	if rerr != nil {
 | 
			
		||||
		klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "sendAsync.send", request.URL.String(), rerr.Error())
 | 
			
		||||
		return nil, nil, rerr
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	future, err := azure.NewFutureFromResponse(asyncResponse)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "sendAsync.respond", request.URL.String(), err)
 | 
			
		||||
		return nil, asyncResponse, retry.GetError(asyncResponse, err)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return &future, asyncResponse, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// GetResource get a resource by resource ID
 | 
			
		||||
func (c *Client) GetResource(ctx context.Context, resourceID, expand string) (*http.Response, *retry.Error) {
 | 
			
		||||
	decorators := []autorest.PrepareDecorator{
 | 
			
		||||
		autorest.WithPathParameters("{resourceID}", map[string]interface{}{"resourceID": resourceID}),
 | 
			
		||||
	}
 | 
			
		||||
	if expand != "" {
 | 
			
		||||
		queryParameters := map[string]interface{}{
 | 
			
		||||
			"$expand": autorest.Encode("query", expand),
 | 
			
		||||
		}
 | 
			
		||||
		decorators = append(decorators, autorest.WithQueryParameters(queryParameters))
 | 
			
		||||
	}
 | 
			
		||||
	request, err := c.PrepareGetRequest(ctx, decorators...)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "get.prepare", resourceID, err)
 | 
			
		||||
		return nil, retry.NewError(false, err)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return c.Send(ctx, request)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// GetResourceWithDecorators get a resource with decorators by resource ID
 | 
			
		||||
func (c *Client) GetResourceWithDecorators(ctx context.Context, resourceID string, decorators []autorest.PrepareDecorator) (*http.Response, *retry.Error) {
 | 
			
		||||
	getDecorators := []autorest.PrepareDecorator{
 | 
			
		||||
		autorest.WithPathParameters("{resourceID}", map[string]interface{}{"resourceID": resourceID}),
 | 
			
		||||
	}
 | 
			
		||||
	getDecorators = append(getDecorators, decorators...)
 | 
			
		||||
	request, err := c.PrepareGetRequest(ctx, getDecorators...)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "get.prepare", resourceID, err)
 | 
			
		||||
		return nil, retry.NewError(false, err)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return c.Send(ctx, request)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// PutResource puts a resource by resource ID
 | 
			
		||||
func (c *Client) PutResource(ctx context.Context, resourceID string, parameters interface{}) (*http.Response, *retry.Error) {
 | 
			
		||||
	putDecorators := []autorest.PrepareDecorator{
 | 
			
		||||
		autorest.WithPathParameters("{resourceID}", map[string]interface{}{"resourceID": resourceID}),
 | 
			
		||||
		autorest.WithJSON(parameters),
 | 
			
		||||
	}
 | 
			
		||||
	return c.PutResourceWithDecorators(ctx, resourceID, parameters, putDecorators)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// PutResources puts a list of resources from resources map[resourceID]parameters.
 | 
			
		||||
// Those resources sync requests are sequential while async requests are concurrent. It's especially
 | 
			
		||||
// useful when the ARM API doesn't support concurrent requests.
 | 
			
		||||
func (c *Client) PutResources(ctx context.Context, resources map[string]interface{}) map[string]*PutResourcesResponse {
 | 
			
		||||
	if len(resources) == 0 {
 | 
			
		||||
		return nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Sequential sync requests.
 | 
			
		||||
	futures := make(map[string]*azure.Future)
 | 
			
		||||
	responses := make(map[string]*PutResourcesResponse)
 | 
			
		||||
	for resourceID, parameters := range resources {
 | 
			
		||||
		decorators := []autorest.PrepareDecorator{
 | 
			
		||||
			autorest.WithPathParameters("{resourceID}", map[string]interface{}{"resourceID": resourceID}),
 | 
			
		||||
			autorest.WithJSON(parameters),
 | 
			
		||||
		}
 | 
			
		||||
		request, err := c.PreparePutRequest(ctx, decorators...)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "put.prepare", resourceID, err)
 | 
			
		||||
			responses[resourceID] = &PutResourcesResponse{
 | 
			
		||||
				Error: retry.NewError(false, err),
 | 
			
		||||
			}
 | 
			
		||||
			continue
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		future, resp, clientErr := c.SendAsync(ctx, request)
 | 
			
		||||
		defer c.CloseResponse(ctx, resp)
 | 
			
		||||
		if clientErr != nil {
 | 
			
		||||
			klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "put.send", resourceID, clientErr.Error())
 | 
			
		||||
			responses[resourceID] = &PutResourcesResponse{
 | 
			
		||||
				Error: clientErr,
 | 
			
		||||
			}
 | 
			
		||||
			continue
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		futures[resourceID] = future
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Concurrent async requests.
 | 
			
		||||
	wg := sync.WaitGroup{}
 | 
			
		||||
	var responseLock sync.Mutex
 | 
			
		||||
	for resourceID, future := range futures {
 | 
			
		||||
		wg.Add(1)
 | 
			
		||||
		go func(resourceID string, future *azure.Future) {
 | 
			
		||||
			defer wg.Done()
 | 
			
		||||
			response, err := c.WaitForAsyncOperationResult(ctx, future, "armclient.PutResource")
 | 
			
		||||
			if err != nil {
 | 
			
		||||
				if response != nil {
 | 
			
		||||
					klog.V(5).Infof("Received error in WaitForAsyncOperationResult: '%s', response code %d", err.Error(), response.StatusCode)
 | 
			
		||||
				} else {
 | 
			
		||||
					klog.V(5).Infof("Received error in WaitForAsyncOperationResult: '%s', no response", err.Error())
 | 
			
		||||
				}
 | 
			
		||||
 | 
			
		||||
				retriableErr := retry.GetError(response, err)
 | 
			
		||||
				if !retriableErr.Retriable &&
 | 
			
		||||
					strings.Contains(strings.ToUpper(err.Error()), strings.ToUpper("InternalServerError")) {
 | 
			
		||||
					klog.V(5).Infof("Received InternalServerError in WaitForAsyncOperationResult: '%s', setting error retriable", err.Error())
 | 
			
		||||
					retriableErr.Retriable = true
 | 
			
		||||
				}
 | 
			
		||||
 | 
			
		||||
				responseLock.Lock()
 | 
			
		||||
				responses[resourceID] = &PutResourcesResponse{
 | 
			
		||||
					Error: retriableErr,
 | 
			
		||||
				}
 | 
			
		||||
				responseLock.Unlock()
 | 
			
		||||
				return
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			responseLock.Lock()
 | 
			
		||||
			responses[resourceID] = &PutResourcesResponse{
 | 
			
		||||
				Response: response,
 | 
			
		||||
			}
 | 
			
		||||
			responseLock.Unlock()
 | 
			
		||||
		}(resourceID, future)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	wg.Wait()
 | 
			
		||||
	return responses
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// PutResourceWithDecorators puts a resource by resource ID
 | 
			
		||||
func (c *Client) PutResourceWithDecorators(ctx context.Context, resourceID string, parameters interface{}, decorators []autorest.PrepareDecorator) (*http.Response, *retry.Error) {
 | 
			
		||||
	request, err := c.PreparePutRequest(ctx, decorators...)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "put.prepare", resourceID, err)
 | 
			
		||||
		return nil, retry.NewError(false, err)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	future, resp, clientErr := c.SendAsync(ctx, request)
 | 
			
		||||
	defer c.CloseResponse(ctx, resp)
 | 
			
		||||
	if clientErr != nil {
 | 
			
		||||
		klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "put.send", resourceID, clientErr.Error())
 | 
			
		||||
		return nil, clientErr
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	response, err := c.WaitForAsyncOperationResult(ctx, future, "armclient.PutResource")
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		if response != nil {
 | 
			
		||||
			klog.V(5).Infof("Received error in WaitForAsyncOperationResult: '%s', response code %d", err.Error(), response.StatusCode)
 | 
			
		||||
		} else {
 | 
			
		||||
			klog.V(5).Infof("Received error in WaitForAsyncOperationResult: '%s', no response", err.Error())
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		retriableErr := retry.GetError(response, err)
 | 
			
		||||
		if !retriableErr.Retriable &&
 | 
			
		||||
			strings.Contains(strings.ToUpper(err.Error()), strings.ToUpper("InternalServerError")) {
 | 
			
		||||
			klog.V(5).Infof("Received InternalServerError in WaitForAsyncOperationResult: '%s', setting error retriable", err.Error())
 | 
			
		||||
			retriableErr.Retriable = true
 | 
			
		||||
		}
 | 
			
		||||
		return nil, retriableErr
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return response, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// PatchResource patches a resource by resource ID
 | 
			
		||||
func (c *Client) PatchResource(ctx context.Context, resourceID string, parameters interface{}) (*http.Response, *retry.Error) {
 | 
			
		||||
	decorators := []autorest.PrepareDecorator{
 | 
			
		||||
		autorest.WithPathParameters("{resourceID}", map[string]interface{}{"resourceID": resourceID}),
 | 
			
		||||
		autorest.WithJSON(parameters),
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	request, err := c.PreparePatchRequest(ctx, decorators...)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "patch.prepare", resourceID, err)
 | 
			
		||||
		return nil, retry.NewError(false, err)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	future, resp, clientErr := c.SendAsync(ctx, request)
 | 
			
		||||
	defer c.CloseResponse(ctx, resp)
 | 
			
		||||
	if clientErr != nil {
 | 
			
		||||
		klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "patch.send", resourceID, clientErr.Error())
 | 
			
		||||
		return nil, clientErr
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	response, err := c.WaitForAsyncOperationResult(ctx, future, "armclient.PatchResource")
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		if response != nil {
 | 
			
		||||
			klog.V(5).Infof("Received error in WaitForAsyncOperationResult: '%s', response code %d", err.Error(), response.StatusCode)
 | 
			
		||||
		} else {
 | 
			
		||||
			klog.V(5).Infof("Received error in WaitForAsyncOperationResult: '%s', no response", err.Error())
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		retriableErr := retry.GetError(response, err)
 | 
			
		||||
		if !retriableErr.Retriable &&
 | 
			
		||||
			strings.Contains(strings.ToUpper(err.Error()), strings.ToUpper("InternalServerError")) {
 | 
			
		||||
			klog.V(5).Infof("Received InternalServerError in WaitForAsyncOperationResult: '%s', setting error retriable", err.Error())
 | 
			
		||||
			retriableErr.Retriable = true
 | 
			
		||||
		}
 | 
			
		||||
		return nil, retriableErr
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return response, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// PutResourceAsync puts a resource by resource ID in async mode
 | 
			
		||||
func (c *Client) PutResourceAsync(ctx context.Context, resourceID string, parameters interface{}) (*azure.Future, *retry.Error) {
 | 
			
		||||
	decorators := []autorest.PrepareDecorator{
 | 
			
		||||
		autorest.WithPathParameters("{resourceID}", map[string]interface{}{"resourceID": resourceID}),
 | 
			
		||||
		autorest.WithJSON(parameters),
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	request, err := c.PreparePutRequest(ctx, decorators...)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "put.prepare", resourceID, err)
 | 
			
		||||
		return nil, retry.NewError(false, err)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	future, resp, rErr := c.SendAsync(ctx, request)
 | 
			
		||||
	defer c.CloseResponse(ctx, resp)
 | 
			
		||||
	if rErr != nil {
 | 
			
		||||
		klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "put.send", resourceID, err)
 | 
			
		||||
		return nil, rErr
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return future, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// PostResource posts a resource by resource ID
 | 
			
		||||
func (c *Client) PostResource(ctx context.Context, resourceID, action string, parameters interface{}) (*http.Response, *retry.Error) {
 | 
			
		||||
	pathParameters := map[string]interface{}{
 | 
			
		||||
		"resourceID": resourceID,
 | 
			
		||||
		"action":     action,
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	decorators := []autorest.PrepareDecorator{
 | 
			
		||||
		autorest.WithPathParameters("{resourceID}/{action}", pathParameters),
 | 
			
		||||
		autorest.WithJSON(parameters),
 | 
			
		||||
	}
 | 
			
		||||
	request, err := c.PreparePostRequest(ctx, decorators...)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "post.prepare", resourceID, err)
 | 
			
		||||
		return nil, retry.NewError(false, err)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return c.sendRequest(ctx, request)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// DeleteResource deletes a resource by resource ID
 | 
			
		||||
func (c *Client) DeleteResource(ctx context.Context, resourceID, ifMatch string) *retry.Error {
 | 
			
		||||
	future, clientErr := c.DeleteResourceAsync(ctx, resourceID, ifMatch)
 | 
			
		||||
	if clientErr != nil {
 | 
			
		||||
		klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "delete.request", resourceID, clientErr.Error())
 | 
			
		||||
		return clientErr
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if future == nil {
 | 
			
		||||
		return nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if err := c.WaitForAsyncOperationCompletion(ctx, future, "armclient.DeleteResource"); err != nil {
 | 
			
		||||
		klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "delete.wait", resourceID, clientErr.Error())
 | 
			
		||||
		return retry.NewError(true, err)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// HeadResource heads a resource by resource ID
 | 
			
		||||
func (c *Client) HeadResource(ctx context.Context, resourceID string) (*http.Response, *retry.Error) {
 | 
			
		||||
	decorators := []autorest.PrepareDecorator{
 | 
			
		||||
		autorest.WithPathParameters("{resourceID}", map[string]interface{}{"resourceID": resourceID}),
 | 
			
		||||
	}
 | 
			
		||||
	request, err := c.PrepareHeadRequest(ctx, decorators...)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "head.prepare", resourceID, err)
 | 
			
		||||
		return nil, retry.NewError(false, err)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return c.sendRequest(ctx, request)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// DeleteResourceAsync delete a resource by resource ID and returns a future representing the async result
 | 
			
		||||
func (c *Client) DeleteResourceAsync(ctx context.Context, resourceID, ifMatch string) (*azure.Future, *retry.Error) {
 | 
			
		||||
	decorators := []autorest.PrepareDecorator{
 | 
			
		||||
		autorest.WithPathParameters("{resourceID}", map[string]interface{}{"resourceID": resourceID}),
 | 
			
		||||
	}
 | 
			
		||||
	if len(ifMatch) > 0 {
 | 
			
		||||
		decorators = append(decorators, autorest.WithHeader("If-Match", autorest.String(ifMatch)))
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	deleteRequest, err := c.PrepareDeleteRequest(ctx, decorators...)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "deleteAsync.prepare", resourceID, err)
 | 
			
		||||
		return nil, retry.NewError(false, err)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	resp, rerr := c.sendRequest(ctx, deleteRequest)
 | 
			
		||||
	defer c.CloseResponse(ctx, resp)
 | 
			
		||||
	if rerr != nil {
 | 
			
		||||
		klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "deleteAsync.send", resourceID, rerr.Error())
 | 
			
		||||
		return nil, rerr
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	err = autorest.Respond(
 | 
			
		||||
		resp,
 | 
			
		||||
		azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent, http.StatusNotFound))
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "deleteAsync.respond", resourceID, err)
 | 
			
		||||
		return nil, retry.GetError(resp, err)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if resp.StatusCode == http.StatusNotFound {
 | 
			
		||||
		return nil, nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	future, err := azure.NewFutureFromResponse(resp)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "deleteAsync.future", resourceID, err)
 | 
			
		||||
		return nil, retry.GetError(resp, err)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return &future, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// CloseResponse closes a response
 | 
			
		||||
func (c *Client) CloseResponse(ctx context.Context, response *http.Response) {
 | 
			
		||||
	if response != nil && response.Body != nil {
 | 
			
		||||
		if err := response.Body.Close(); err != nil {
 | 
			
		||||
			klog.Errorf("Error closing the response body: %v", err)
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (c *Client) prepareRequest(ctx context.Context, decorators ...autorest.PrepareDecorator) (*http.Request, error) {
 | 
			
		||||
	decorators = append(
 | 
			
		||||
		decorators,
 | 
			
		||||
		withAPIVersion(c.apiVersion))
 | 
			
		||||
	preparer := autorest.CreatePreparer(decorators...)
 | 
			
		||||
	return preparer.Prepare((&http.Request{}).WithContext(ctx))
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func withAPIVersion(apiVersion string) autorest.PrepareDecorator {
 | 
			
		||||
	const apiVersionKey = "api-version"
 | 
			
		||||
	return func(p autorest.Preparer) autorest.Preparer {
 | 
			
		||||
		return autorest.PreparerFunc(func(r *http.Request) (*http.Request, error) {
 | 
			
		||||
			r, err := p.Prepare(r)
 | 
			
		||||
			if err == nil {
 | 
			
		||||
				if r.URL == nil {
 | 
			
		||||
					return r, fmt.Errorf("Error in withAPIVersion: Invoked with a nil URL")
 | 
			
		||||
				}
 | 
			
		||||
 | 
			
		||||
				v := r.URL.Query()
 | 
			
		||||
				if len(v.Get(apiVersionKey)) > 0 {
 | 
			
		||||
					return r, nil
 | 
			
		||||
				}
 | 
			
		||||
 | 
			
		||||
				v.Add(apiVersionKey, apiVersion)
 | 
			
		||||
				r.URL.RawQuery = v.Encode()
 | 
			
		||||
			}
 | 
			
		||||
			return r, err
 | 
			
		||||
		})
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// GetResourceID gets Azure resource ID
 | 
			
		||||
func GetResourceID(subscriptionID, resourceGroupName, resourceType, resourceName string) string {
 | 
			
		||||
	return fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/%s/%s",
 | 
			
		||||
		autorest.Encode("path", subscriptionID),
 | 
			
		||||
		autorest.Encode("path", resourceGroupName),
 | 
			
		||||
		resourceType,
 | 
			
		||||
		autorest.Encode("path", resourceName))
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// GetChildResourceID gets Azure child resource ID
 | 
			
		||||
func GetChildResourceID(subscriptionID, resourceGroupName, resourceType, resourceName, childResourceType, childResourceName string) string {
 | 
			
		||||
	return fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/%s/%s/%s/%s",
 | 
			
		||||
		autorest.Encode("path", subscriptionID),
 | 
			
		||||
		autorest.Encode("path", resourceGroupName),
 | 
			
		||||
		resourceType,
 | 
			
		||||
		autorest.Encode("path", resourceName),
 | 
			
		||||
		childResourceType,
 | 
			
		||||
		autorest.Encode("path", childResourceName))
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// GetChildResourcesListID gets Azure child resources list ID
 | 
			
		||||
func GetChildResourcesListID(subscriptionID, resourceGroupName, resourceType, resourceName, childResourceType string) string {
 | 
			
		||||
	return fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/%s/%s/%s",
 | 
			
		||||
		autorest.Encode("path", subscriptionID),
 | 
			
		||||
		autorest.Encode("path", resourceGroupName),
 | 
			
		||||
		resourceType,
 | 
			
		||||
		autorest.Encode("path", resourceName),
 | 
			
		||||
		childResourceType)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// GetProviderResourceID gets Azure RP resource ID
 | 
			
		||||
func GetProviderResourceID(subscriptionID, providerNamespace string) string {
 | 
			
		||||
	return fmt.Sprintf("/subscriptions/%s/providers/%s",
 | 
			
		||||
		autorest.Encode("path", subscriptionID),
 | 
			
		||||
		providerNamespace)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// GetProviderResourcesListID gets Azure RP resources list ID
 | 
			
		||||
func GetProviderResourcesListID(subscriptionID string) string {
 | 
			
		||||
	return fmt.Sprintf("/subscriptions/%s/providers", autorest.Encode("path", subscriptionID))
 | 
			
		||||
}
 | 
			
		||||
@@ -1,573 +0,0 @@
 | 
			
		||||
//go:build !providerless
 | 
			
		||||
// +build !providerless
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
Copyright 2019 The Kubernetes Authors.
 | 
			
		||||
 | 
			
		||||
Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
you may not use this file except in compliance with the License.
 | 
			
		||||
You may obtain a copy of the License at
 | 
			
		||||
 | 
			
		||||
    http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
 | 
			
		||||
Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
See the License for the specific language governing permissions and
 | 
			
		||||
limitations under the License.
 | 
			
		||||
*/
 | 
			
		||||
 | 
			
		||||
package armclient
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"context"
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"io/ioutil"
 | 
			
		||||
	"net/http"
 | 
			
		||||
	"net/http/httptest"
 | 
			
		||||
	"testing"
 | 
			
		||||
	"time"
 | 
			
		||||
 | 
			
		||||
	"github.com/Azure/go-autorest/autorest"
 | 
			
		||||
	"github.com/stretchr/testify/assert"
 | 
			
		||||
	"k8s.io/legacy-cloud-providers/azure/retry"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
const (
 | 
			
		||||
	testResourceID = "/subscriptions/subscription/resourceGroups/rg/providers/Microsoft.Network/publicIPAddresses/testPIP"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
func TestNew(t *testing.T) {
 | 
			
		||||
	backoff := &retry.Backoff{Steps: 3}
 | 
			
		||||
	armClient := New(nil, "", "test", "2019-01-01", "eastus", backoff)
 | 
			
		||||
	assert.NotNil(t, armClient.backoff)
 | 
			
		||||
	assert.Equal(t, 3, armClient.backoff.Steps, "Backoff steps should be same as the value passed in")
 | 
			
		||||
 | 
			
		||||
	backoff = &retry.Backoff{Steps: 0}
 | 
			
		||||
	armClient = New(nil, "", "test", "2019-01-01", "eastus", backoff)
 | 
			
		||||
	assert.NotNil(t, armClient.backoff)
 | 
			
		||||
	assert.Equal(t, 1, armClient.backoff.Steps, "Backoff steps should be default to 1 if it is 0")
 | 
			
		||||
 | 
			
		||||
	armClient = New(nil, "", "test", "2019-01-01", "eastus", nil)
 | 
			
		||||
	assert.NotNil(t, armClient.backoff)
 | 
			
		||||
	assert.Equal(t, 1, armClient.backoff.Steps, "Backoff steps should be default to 1 if it is not set")
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestSend(t *testing.T) {
 | 
			
		||||
	count := 0
 | 
			
		||||
	server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
 | 
			
		||||
		if count <= 1 {
 | 
			
		||||
			http.Error(w, "failed", http.StatusInternalServerError)
 | 
			
		||||
			count++
 | 
			
		||||
		}
 | 
			
		||||
	}))
 | 
			
		||||
 | 
			
		||||
	backoff := &retry.Backoff{Steps: 3}
 | 
			
		||||
	armClient := New(nil, server.URL, "test", "2019-01-01", "eastus", backoff)
 | 
			
		||||
	pathParameters := map[string]interface{}{
 | 
			
		||||
		"resourceGroupName": autorest.Encode("path", "testgroup"),
 | 
			
		||||
		"subscriptionId":    autorest.Encode("path", "testid"),
 | 
			
		||||
		"resourceName":      autorest.Encode("path", "testname"),
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	decorators := []autorest.PrepareDecorator{
 | 
			
		||||
		autorest.WithPathParameters(
 | 
			
		||||
			"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vNets/{resourceName}", pathParameters),
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	ctx := context.Background()
 | 
			
		||||
	request, err := armClient.PrepareGetRequest(ctx, decorators...)
 | 
			
		||||
	assert.NoError(t, err)
 | 
			
		||||
 | 
			
		||||
	response, rerr := armClient.Send(ctx, request)
 | 
			
		||||
	assert.Nil(t, rerr)
 | 
			
		||||
	assert.Equal(t, 2, count)
 | 
			
		||||
	assert.Equal(t, http.StatusOK, response.StatusCode)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestSendFailure(t *testing.T) {
 | 
			
		||||
	count := 0
 | 
			
		||||
	server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
 | 
			
		||||
		http.Error(w, "failed", http.StatusInternalServerError)
 | 
			
		||||
		count++
 | 
			
		||||
	}))
 | 
			
		||||
 | 
			
		||||
	backoff := &retry.Backoff{Steps: 3}
 | 
			
		||||
	armClient := New(nil, server.URL, "test", "2019-01-01", "eastus", backoff)
 | 
			
		||||
	pathParameters := map[string]interface{}{
 | 
			
		||||
		"resourceGroupName": autorest.Encode("path", "testgroup"),
 | 
			
		||||
		"subscriptionId":    autorest.Encode("path", "testid"),
 | 
			
		||||
		"resourceName":      autorest.Encode("path", "testname"),
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	decorators := []autorest.PrepareDecorator{
 | 
			
		||||
		autorest.WithPathParameters(
 | 
			
		||||
			"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vNets/{resourceName}", pathParameters),
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	ctx := context.Background()
 | 
			
		||||
	request, err := armClient.PreparePatchRequest(ctx, decorators...)
 | 
			
		||||
	assert.NoError(t, err)
 | 
			
		||||
 | 
			
		||||
	response, rerr := armClient.Send(ctx, request)
 | 
			
		||||
	assert.NotNil(t, rerr)
 | 
			
		||||
	assert.Equal(t, 3, count)
 | 
			
		||||
	assert.Equal(t, http.StatusInternalServerError, response.StatusCode)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestSendThrottled(t *testing.T) {
 | 
			
		||||
	count := 0
 | 
			
		||||
	server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
 | 
			
		||||
		w.Header().Set(retry.RetryAfterHeaderKey, "30")
 | 
			
		||||
		http.Error(w, "failed", http.StatusTooManyRequests)
 | 
			
		||||
		count++
 | 
			
		||||
	}))
 | 
			
		||||
 | 
			
		||||
	backoff := &retry.Backoff{Steps: 3}
 | 
			
		||||
	armClient := New(nil, server.URL, "test", "2019-01-01", "eastus", backoff)
 | 
			
		||||
	pathParameters := map[string]interface{}{
 | 
			
		||||
		"resourceGroupName": autorest.Encode("path", "testgroup"),
 | 
			
		||||
		"subscriptionId":    autorest.Encode("path", "testid"),
 | 
			
		||||
		"resourceName":      autorest.Encode("path", "testname"),
 | 
			
		||||
	}
 | 
			
		||||
	decorators := []autorest.PrepareDecorator{
 | 
			
		||||
		autorest.WithPathParameters(
 | 
			
		||||
			"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vNets/{resourceName}", pathParameters),
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	ctx := context.Background()
 | 
			
		||||
	request, err := armClient.PrepareGetRequest(ctx, decorators...)
 | 
			
		||||
	assert.NoError(t, err)
 | 
			
		||||
 | 
			
		||||
	response, rerr := armClient.Send(ctx, request)
 | 
			
		||||
	assert.NotNil(t, rerr)
 | 
			
		||||
	assert.Equal(t, 1, count)
 | 
			
		||||
	assert.Equal(t, http.StatusTooManyRequests, response.StatusCode)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestSendAsync(t *testing.T) {
 | 
			
		||||
	count := 0
 | 
			
		||||
	server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
 | 
			
		||||
		count++
 | 
			
		||||
		http.Error(w, "failed", http.StatusForbidden)
 | 
			
		||||
	}))
 | 
			
		||||
 | 
			
		||||
	backoff := &retry.Backoff{Steps: 1}
 | 
			
		||||
	armClient := New(nil, server.URL, "test", "2019-01-01", "eastus", backoff)
 | 
			
		||||
	armClient.client.RetryDuration = time.Millisecond * 1
 | 
			
		||||
 | 
			
		||||
	pathParameters := map[string]interface{}{
 | 
			
		||||
		"resourceGroupName": autorest.Encode("path", "testgroup"),
 | 
			
		||||
		"subscriptionId":    autorest.Encode("path", "testid"),
 | 
			
		||||
		"resourceName":      autorest.Encode("path", "testname"),
 | 
			
		||||
	}
 | 
			
		||||
	decorators := []autorest.PrepareDecorator{
 | 
			
		||||
		autorest.WithPathParameters(
 | 
			
		||||
			"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vNets/{resourceName}", pathParameters),
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	ctx := context.Background()
 | 
			
		||||
	request, err := armClient.PreparePutRequest(ctx, decorators...)
 | 
			
		||||
	assert.NoError(t, err)
 | 
			
		||||
 | 
			
		||||
	future, response, rerr := armClient.SendAsync(ctx, request)
 | 
			
		||||
	assert.Nil(t, future)
 | 
			
		||||
	assert.Nil(t, response)
 | 
			
		||||
	assert.Equal(t, 1, count)
 | 
			
		||||
	assert.NotNil(t, rerr)
 | 
			
		||||
	assert.Equal(t, false, rerr.Retriable)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestSendAsyncSuccess(t *testing.T) {
 | 
			
		||||
	server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
 | 
			
		||||
		w.WriteHeader(http.StatusOK)
 | 
			
		||||
	}))
 | 
			
		||||
 | 
			
		||||
	backoff := &retry.Backoff{Steps: 1}
 | 
			
		||||
	armClient := New(nil, server.URL, "test", "2019-01-01", "eastus", backoff)
 | 
			
		||||
	armClient.client.RetryDuration = time.Millisecond * 1
 | 
			
		||||
 | 
			
		||||
	pathParameters := map[string]interface{}{
 | 
			
		||||
		"resourceGroupName": autorest.Encode("path", "testgroup"),
 | 
			
		||||
		"subscriptionId":    autorest.Encode("path", "testid"),
 | 
			
		||||
		"resourceName":      autorest.Encode("path", "testname"),
 | 
			
		||||
	}
 | 
			
		||||
	decorators := []autorest.PrepareDecorator{
 | 
			
		||||
		autorest.WithPathParameters(
 | 
			
		||||
			"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vNets/{resourceName}", pathParameters),
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	ctx := context.Background()
 | 
			
		||||
	request, err := armClient.PreparePostRequest(ctx, decorators...)
 | 
			
		||||
	assert.NoError(t, err)
 | 
			
		||||
 | 
			
		||||
	future, response, rerr := armClient.SendAsync(ctx, request)
 | 
			
		||||
	assert.Nil(t, rerr)
 | 
			
		||||
	assert.NotNil(t, response)
 | 
			
		||||
	assert.NotNil(t, future)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestNormalizeAzureRegion(t *testing.T) {
 | 
			
		||||
	tests := []struct {
 | 
			
		||||
		region   string
 | 
			
		||||
		expected string
 | 
			
		||||
	}{
 | 
			
		||||
		{
 | 
			
		||||
			region:   "eastus",
 | 
			
		||||
			expected: "eastus",
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			region:   " eastus ",
 | 
			
		||||
			expected: "eastus",
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			region:   " eastus\t",
 | 
			
		||||
			expected: "eastus",
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			region:   " eastus\v",
 | 
			
		||||
			expected: "eastus",
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			region:   " eastus\v\r\f\n",
 | 
			
		||||
			expected: "eastus",
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	for i, test := range tests {
 | 
			
		||||
		real := NormalizeAzureRegion(test.region)
 | 
			
		||||
		assert.Equal(t, test.expected, real, "test[%d]: NormalizeAzureRegion(%q) != %q", i, test.region, test.expected)
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestGetResource(t *testing.T) {
 | 
			
		||||
	expectedURI := "/subscriptions/subscription/resourceGroups/rg/providers/Microsoft.Network/publicIPAddresses/testPIP?%24expand=data&api-version=2019-01-01"
 | 
			
		||||
 | 
			
		||||
	count := 0
 | 
			
		||||
	server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
 | 
			
		||||
		assert.Equal(t, "GET", r.Method)
 | 
			
		||||
		assert.Equal(t, expectedURI, r.URL.String())
 | 
			
		||||
		w.WriteHeader(http.StatusOK)
 | 
			
		||||
		w.Write([]byte("{data: testPIP}"))
 | 
			
		||||
		count++
 | 
			
		||||
	}))
 | 
			
		||||
 | 
			
		||||
	backoff := &retry.Backoff{Steps: 1}
 | 
			
		||||
	armClient := New(nil, server.URL, "test", "2019-01-01", "eastus", backoff)
 | 
			
		||||
	armClient.client.RetryDuration = time.Millisecond * 1
 | 
			
		||||
 | 
			
		||||
	ctx := context.Background()
 | 
			
		||||
	response, rerr := armClient.GetResource(ctx, testResourceID, "data")
 | 
			
		||||
	byteResponseBody, _ := ioutil.ReadAll(response.Body)
 | 
			
		||||
	stringResponseBody := string(byteResponseBody)
 | 
			
		||||
	assert.Nil(t, rerr)
 | 
			
		||||
	assert.Equal(t, "{data: testPIP}", stringResponseBody)
 | 
			
		||||
	assert.Equal(t, 1, count)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestGetResourceWithDecorators(t *testing.T) {
 | 
			
		||||
	expectedURI := "/subscriptions/subscription/resourceGroups/rg/providers/Microsoft.Network/publicIPAddresses/testPIP?api-version=2019-01-01¶m1=value1¶m2=value2"
 | 
			
		||||
 | 
			
		||||
	count := 0
 | 
			
		||||
	server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
 | 
			
		||||
		assert.Equal(t, "GET", r.Method)
 | 
			
		||||
		assert.Equal(t, expectedURI, r.URL.String())
 | 
			
		||||
		w.WriteHeader(http.StatusOK)
 | 
			
		||||
		w.Write([]byte("{data: testPIP}"))
 | 
			
		||||
		count++
 | 
			
		||||
	}))
 | 
			
		||||
 | 
			
		||||
	backoff := &retry.Backoff{Steps: 1}
 | 
			
		||||
	armClient := New(nil, server.URL, "test", "2019-01-01", "eastus", backoff)
 | 
			
		||||
	armClient.client.RetryDuration = time.Millisecond * 1
 | 
			
		||||
 | 
			
		||||
	params := map[string]interface{}{
 | 
			
		||||
		"param1": "value1",
 | 
			
		||||
		"param2": "value2",
 | 
			
		||||
	}
 | 
			
		||||
	decorators := []autorest.PrepareDecorator{
 | 
			
		||||
		autorest.WithQueryParameters(params),
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	ctx := context.Background()
 | 
			
		||||
	response, rerr := armClient.GetResourceWithDecorators(ctx, testResourceID, decorators)
 | 
			
		||||
	byteResponseBody, _ := ioutil.ReadAll(response.Body)
 | 
			
		||||
	stringResponseBody := string(byteResponseBody)
 | 
			
		||||
	assert.Nil(t, rerr)
 | 
			
		||||
	assert.Equal(t, "{data: testPIP}", stringResponseBody)
 | 
			
		||||
	assert.Equal(t, 1, count)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestPutResource(t *testing.T) {
 | 
			
		||||
	expectedURI := "/subscriptions/subscription/resourceGroups/rg/providers/Microsoft.Network/publicIPAddresses/testPIP?api-version=2019-01-01"
 | 
			
		||||
	operationURI := "/subscriptions/subscription/providers/Microsoft.Network/locations/eastus/operations/op?api-version=2019-01-01"
 | 
			
		||||
	handlers := []func(http.ResponseWriter, *http.Request){
 | 
			
		||||
		func(rw http.ResponseWriter, req *http.Request) {
 | 
			
		||||
			assert.Equal(t, "PUT", req.Method)
 | 
			
		||||
			assert.Equal(t, expectedURI, req.URL.String())
 | 
			
		||||
			rw.Header().Set(http.CanonicalHeaderKey("Azure-AsyncOperation"),
 | 
			
		||||
				fmt.Sprintf("http://%s%s", req.Host, operationURI))
 | 
			
		||||
			rw.WriteHeader(http.StatusCreated)
 | 
			
		||||
		},
 | 
			
		||||
 | 
			
		||||
		func(rw http.ResponseWriter, req *http.Request) {
 | 
			
		||||
			assert.Equal(t, "GET", req.Method)
 | 
			
		||||
			assert.Equal(t, operationURI, req.URL.String())
 | 
			
		||||
 | 
			
		||||
			rw.WriteHeader(http.StatusOK)
 | 
			
		||||
			rw.Write([]byte(`{"error":{"code":"InternalServerError"},"status":"Failed"}`))
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	count := 0
 | 
			
		||||
	server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
 | 
			
		||||
		handlers[count](w, r)
 | 
			
		||||
		count++
 | 
			
		||||
		if count > 1 {
 | 
			
		||||
			count = 1
 | 
			
		||||
		}
 | 
			
		||||
	}))
 | 
			
		||||
 | 
			
		||||
	backoff := &retry.Backoff{Steps: 1}
 | 
			
		||||
	armClient := New(nil, server.URL, "test", "2019-01-01", "eastus", backoff)
 | 
			
		||||
	armClient.client.RetryDuration = time.Millisecond * 1
 | 
			
		||||
 | 
			
		||||
	ctx := context.Background()
 | 
			
		||||
	response, rerr := armClient.PutResource(ctx, testResourceID, nil)
 | 
			
		||||
	assert.Equal(t, 1, count)
 | 
			
		||||
	assert.Nil(t, response)
 | 
			
		||||
	assert.NotNil(t, rerr)
 | 
			
		||||
	assert.Equal(t, true, rerr.Retriable)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestPutResources(t *testing.T) {
 | 
			
		||||
	serverFuncs := []func(rw http.ResponseWriter, req *http.Request){
 | 
			
		||||
		func(rw http.ResponseWriter, req *http.Request) {
 | 
			
		||||
			assert.Equal(t, "PUT", req.Method)
 | 
			
		||||
 | 
			
		||||
			rw.Header().Set(http.CanonicalHeaderKey("Azure-AsyncOperation"),
 | 
			
		||||
				fmt.Sprintf("http://%s%s", req.Host, "/id/1?api-version=2019-01-01"))
 | 
			
		||||
			rw.WriteHeader(http.StatusCreated)
 | 
			
		||||
		},
 | 
			
		||||
		func(rw http.ResponseWriter, req *http.Request) {
 | 
			
		||||
			assert.Equal(t, "PUT", req.Method)
 | 
			
		||||
 | 
			
		||||
			rw.Header().Set(http.CanonicalHeaderKey("Azure-AsyncOperation"),
 | 
			
		||||
				fmt.Sprintf("http://%s%s", req.Host, "/id/2?api-version=2019-01-01"))
 | 
			
		||||
			rw.WriteHeader(http.StatusInternalServerError)
 | 
			
		||||
		},
 | 
			
		||||
		func(rw http.ResponseWriter, req *http.Request) {
 | 
			
		||||
			assert.Equal(t, "GET", req.Method)
 | 
			
		||||
 | 
			
		||||
			rw.WriteHeader(http.StatusOK)
 | 
			
		||||
			rw.Write([]byte(`{"error":{"code":"InternalServerError"},"status":"Failed"}`))
 | 
			
		||||
		},
 | 
			
		||||
		func(rw http.ResponseWriter, req *http.Request) {
 | 
			
		||||
			assert.Equal(t, "GET", req.Method)
 | 
			
		||||
 | 
			
		||||
			rw.WriteHeader(http.StatusOK)
 | 
			
		||||
			rw.Write([]byte(`{"error":{"code":"InternalServerError"},"status":"Failed"}`))
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	i, total := 0, 0
 | 
			
		||||
	server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
 | 
			
		||||
		serverFuncs[i](w, r)
 | 
			
		||||
		i++
 | 
			
		||||
		if i > 3 {
 | 
			
		||||
			i = 3
 | 
			
		||||
		}
 | 
			
		||||
		total++
 | 
			
		||||
	}))
 | 
			
		||||
 | 
			
		||||
	backoff := &retry.Backoff{Steps: 1}
 | 
			
		||||
	armClient := New(nil, server.URL, "test", "2019-01-01", "eastus", backoff)
 | 
			
		||||
	armClient.client.RetryDuration = time.Millisecond * 1
 | 
			
		||||
 | 
			
		||||
	ctx := context.Background()
 | 
			
		||||
	resources := map[string]interface{}{
 | 
			
		||||
		"/id/1": nil,
 | 
			
		||||
		"/id/2": nil,
 | 
			
		||||
	}
 | 
			
		||||
	responses := armClient.PutResources(ctx, nil)
 | 
			
		||||
	assert.Nil(t, responses)
 | 
			
		||||
	responses = armClient.PutResources(ctx, resources)
 | 
			
		||||
	assert.NotNil(t, responses)
 | 
			
		||||
	assert.Equal(t, 3, total)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestPutResourceAsync(t *testing.T) {
 | 
			
		||||
	count := 0
 | 
			
		||||
	server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
 | 
			
		||||
		count++
 | 
			
		||||
		http.Error(w, "failed", http.StatusInternalServerError)
 | 
			
		||||
	}))
 | 
			
		||||
 | 
			
		||||
	backoff := &retry.Backoff{Steps: 3}
 | 
			
		||||
	armClient := New(nil, server.URL, "test", "2019-01-01", "eastus", backoff)
 | 
			
		||||
	armClient.client.RetryDuration = time.Millisecond * 1
 | 
			
		||||
 | 
			
		||||
	ctx := context.Background()
 | 
			
		||||
	resourceID := testResourceID
 | 
			
		||||
	future, rerr := armClient.PutResourceAsync(ctx, resourceID, "")
 | 
			
		||||
	assert.Equal(t, 3, count)
 | 
			
		||||
	assert.Nil(t, future)
 | 
			
		||||
	assert.NotNil(t, rerr)
 | 
			
		||||
	assert.Equal(t, true, rerr.Retriable)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestDeleteResourceAsync(t *testing.T) {
 | 
			
		||||
	count := 0
 | 
			
		||||
	server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
 | 
			
		||||
		count++
 | 
			
		||||
		http.Error(w, "failed", http.StatusInternalServerError)
 | 
			
		||||
	}))
 | 
			
		||||
 | 
			
		||||
	backoff := &retry.Backoff{Steps: 3}
 | 
			
		||||
	armClient := New(nil, server.URL, "test", "2019-01-01", "eastus", backoff)
 | 
			
		||||
	armClient.client.RetryDuration = time.Millisecond * 1
 | 
			
		||||
 | 
			
		||||
	ctx := context.Background()
 | 
			
		||||
	resourceID := testResourceID
 | 
			
		||||
	future, rerr := armClient.DeleteResourceAsync(ctx, resourceID, "")
 | 
			
		||||
	assert.Equal(t, 3, count)
 | 
			
		||||
	assert.Nil(t, future)
 | 
			
		||||
	assert.NotNil(t, rerr)
 | 
			
		||||
	assert.Equal(t, true, rerr.Retriable)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestPatchResource(t *testing.T) {
 | 
			
		||||
	expectedURI := "/subscriptions/subscription/resourceGroups/rg/providers/Microsoft.Network/publicIPAddresses/testPIP?api-version=2019-01-01"
 | 
			
		||||
	operationURI := "/subscriptions/subscription/providers/Microsoft.Network/locations/eastus/operations/op?api-version=2019-01-01"
 | 
			
		||||
	handlers := []func(http.ResponseWriter, *http.Request){
 | 
			
		||||
		func(rw http.ResponseWriter, req *http.Request) {
 | 
			
		||||
			assert.Equal(t, "PATCH", req.Method)
 | 
			
		||||
			assert.Equal(t, expectedURI, req.URL.String())
 | 
			
		||||
			rw.Header().Set(http.CanonicalHeaderKey("Azure-AsyncOperation"),
 | 
			
		||||
				fmt.Sprintf("http://%s%s", req.Host, operationURI))
 | 
			
		||||
			rw.WriteHeader(http.StatusCreated)
 | 
			
		||||
		},
 | 
			
		||||
 | 
			
		||||
		func(rw http.ResponseWriter, req *http.Request) {
 | 
			
		||||
			assert.Equal(t, "GET", req.Method)
 | 
			
		||||
			assert.Equal(t, operationURI, req.URL.String())
 | 
			
		||||
 | 
			
		||||
			rw.WriteHeader(http.StatusOK)
 | 
			
		||||
			rw.Write([]byte(`{"error":{"code":"InternalServerError"},"status":"Failed"}`))
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	count := 0
 | 
			
		||||
	server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
 | 
			
		||||
		handlers[count](w, r)
 | 
			
		||||
		count++
 | 
			
		||||
		if count > 1 {
 | 
			
		||||
			count = 1
 | 
			
		||||
		}
 | 
			
		||||
	}))
 | 
			
		||||
 | 
			
		||||
	backoff := &retry.Backoff{Steps: 1}
 | 
			
		||||
	armClient := New(nil, server.URL, "test", "2019-01-01", "eastus", backoff)
 | 
			
		||||
	armClient.client.RetryDuration = time.Millisecond * 1
 | 
			
		||||
 | 
			
		||||
	ctx := context.Background()
 | 
			
		||||
	response, rerr := armClient.PatchResource(ctx, testResourceID, nil)
 | 
			
		||||
	assert.Equal(t, 1, count)
 | 
			
		||||
	assert.Nil(t, response)
 | 
			
		||||
	assert.NotNil(t, rerr)
 | 
			
		||||
	assert.Equal(t, true, rerr.Retriable)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestPostResource(t *testing.T) {
 | 
			
		||||
	count := 0
 | 
			
		||||
	server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
 | 
			
		||||
		count++
 | 
			
		||||
		http.Error(w, "failed", http.StatusInternalServerError)
 | 
			
		||||
	}))
 | 
			
		||||
 | 
			
		||||
	backoff := &retry.Backoff{Steps: 3}
 | 
			
		||||
	armClient := New(nil, server.URL, "test", "2019-01-01", "eastus", backoff)
 | 
			
		||||
	armClient.client.RetryDuration = time.Millisecond * 1
 | 
			
		||||
 | 
			
		||||
	ctx := context.Background()
 | 
			
		||||
	resourceID := testResourceID
 | 
			
		||||
	future, rerr := armClient.PostResource(ctx, resourceID, "post", "")
 | 
			
		||||
	assert.Equal(t, 3, count)
 | 
			
		||||
	assert.NotNil(t, future)
 | 
			
		||||
	assert.NotNil(t, rerr)
 | 
			
		||||
	assert.Equal(t, true, rerr.Retriable)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestDeleteResource(t *testing.T) {
 | 
			
		||||
	count := 0
 | 
			
		||||
	server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
 | 
			
		||||
		count++
 | 
			
		||||
		http.Error(w, "failed", http.StatusInternalServerError)
 | 
			
		||||
	}))
 | 
			
		||||
 | 
			
		||||
	backoff := &retry.Backoff{Steps: 3}
 | 
			
		||||
	armClient := New(nil, server.URL, "test", "2019-01-01", "eastus", backoff)
 | 
			
		||||
	armClient.client.RetryDuration = time.Millisecond * 1
 | 
			
		||||
 | 
			
		||||
	ctx := context.Background()
 | 
			
		||||
	resourceID := testResourceID
 | 
			
		||||
	rerr := armClient.DeleteResource(ctx, resourceID, "")
 | 
			
		||||
	assert.Equal(t, 3, count)
 | 
			
		||||
	assert.NotNil(t, rerr)
 | 
			
		||||
	assert.Equal(t, true, rerr.Retriable)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestHeadResource(t *testing.T) {
 | 
			
		||||
	count := 0
 | 
			
		||||
	server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
 | 
			
		||||
		count++
 | 
			
		||||
		http.Error(w, "failed", http.StatusInternalServerError)
 | 
			
		||||
	}))
 | 
			
		||||
 | 
			
		||||
	backoff := &retry.Backoff{Steps: 3}
 | 
			
		||||
	armClient := New(nil, server.URL, "test", "2019-01-01", "eastus", backoff)
 | 
			
		||||
	armClient.client.RetryDuration = time.Millisecond * 1
 | 
			
		||||
 | 
			
		||||
	ctx := context.Background()
 | 
			
		||||
	resourceID := testResourceID
 | 
			
		||||
	response, rerr := armClient.HeadResource(ctx, resourceID)
 | 
			
		||||
	assert.Equal(t, 3, count)
 | 
			
		||||
	assert.NotNil(t, response)
 | 
			
		||||
	assert.NotNil(t, rerr)
 | 
			
		||||
	assert.Equal(t, true, rerr.Retriable)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestGetResourceID(t *testing.T) {
 | 
			
		||||
	expectedResourceID := "/subscriptions/sub/resourceGroups/rg/providers/type/name"
 | 
			
		||||
 | 
			
		||||
	resourceID := GetResourceID("sub", "rg", "type", "name")
 | 
			
		||||
	assert.Equal(t, expectedResourceID, resourceID)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestGetChildResourceID(t *testing.T) {
 | 
			
		||||
	expectedResourceID := "/subscriptions/sub/resourceGroups/rg/providers/type/name-1/name-2/name-3"
 | 
			
		||||
 | 
			
		||||
	resourceID := GetChildResourceID("sub", "rg", "type", "name-1", "name-2", "name-3")
 | 
			
		||||
	assert.Equal(t, expectedResourceID, resourceID)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestGetChildResourcesListID(t *testing.T) {
 | 
			
		||||
	expectedResourceID := "/subscriptions/sub/resourceGroups/rg/providers/type/name-1/name-2"
 | 
			
		||||
 | 
			
		||||
	resourceID := GetChildResourcesListID("sub", "rg", "type", "name-1", "name-2")
 | 
			
		||||
	assert.Equal(t, expectedResourceID, resourceID)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestGetProviderResourceID(t *testing.T) {
 | 
			
		||||
	expectedResourceID := "/subscriptions/sub/providers/namespace"
 | 
			
		||||
 | 
			
		||||
	resourceID := GetProviderResourceID("sub", "namespace")
 | 
			
		||||
	assert.Equal(t, expectedResourceID, resourceID)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestGetProviderResourcesListID(t *testing.T) {
 | 
			
		||||
	expectedResourceID := "/subscriptions/sub/providers"
 | 
			
		||||
 | 
			
		||||
	resourceID := GetProviderResourcesListID("sub")
 | 
			
		||||
	assert.Equal(t, expectedResourceID, resourceID)
 | 
			
		||||
}
 | 
			
		||||
@@ -1,18 +0,0 @@
 | 
			
		||||
/*
 | 
			
		||||
Copyright 2019 The Kubernetes Authors.
 | 
			
		||||
 | 
			
		||||
Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
you may not use this file except in compliance with the License.
 | 
			
		||||
You may obtain a copy of the License at
 | 
			
		||||
 | 
			
		||||
    http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
 | 
			
		||||
Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
See the License for the specific language governing permissions and
 | 
			
		||||
limitations under the License.
 | 
			
		||||
*/
 | 
			
		||||
 | 
			
		||||
// Package armclient implements the client for ARM.
 | 
			
		||||
package armclient // import "k8s.io/legacy-cloud-providers/azure/clients/armclient"
 | 
			
		||||
@@ -1,104 +0,0 @@
 | 
			
		||||
//go:build !providerless
 | 
			
		||||
// +build !providerless
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
Copyright 2019 The Kubernetes Authors.
 | 
			
		||||
 | 
			
		||||
Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
you may not use this file except in compliance with the License.
 | 
			
		||||
You may obtain a copy of the License at
 | 
			
		||||
 | 
			
		||||
    http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
 | 
			
		||||
Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
See the License for the specific language governing permissions and
 | 
			
		||||
limitations under the License.
 | 
			
		||||
*/
 | 
			
		||||
 | 
			
		||||
//go:generate mockgen -copyright_file=$BUILD_TAG_FILE -source=interface.go  -destination=mockarmclient/interface.go -package=mockarmclient Interface
 | 
			
		||||
package armclient
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"context"
 | 
			
		||||
	"net/http"
 | 
			
		||||
 | 
			
		||||
	"github.com/Azure/go-autorest/autorest"
 | 
			
		||||
	"github.com/Azure/go-autorest/autorest/azure"
 | 
			
		||||
	"k8s.io/legacy-cloud-providers/azure/retry"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// PutResourcesResponse defines the response for PutResources.
 | 
			
		||||
type PutResourcesResponse struct {
 | 
			
		||||
	Response *http.Response
 | 
			
		||||
	Error    *retry.Error
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Interface is the client interface for ARM.
 | 
			
		||||
type Interface interface {
 | 
			
		||||
	// Send sends a http request to ARM service with possible retry to regional ARM endpoint.
 | 
			
		||||
	Send(ctx context.Context, request *http.Request) (*http.Response, *retry.Error)
 | 
			
		||||
 | 
			
		||||
	// PreparePutRequest prepares put request
 | 
			
		||||
	PreparePutRequest(ctx context.Context, decorators ...autorest.PrepareDecorator) (*http.Request, error)
 | 
			
		||||
 | 
			
		||||
	// PreparePostRequest prepares post request
 | 
			
		||||
	PreparePostRequest(ctx context.Context, decorators ...autorest.PrepareDecorator) (*http.Request, error)
 | 
			
		||||
 | 
			
		||||
	// PrepareGetRequest prepares get request
 | 
			
		||||
	PrepareGetRequest(ctx context.Context, decorators ...autorest.PrepareDecorator) (*http.Request, error)
 | 
			
		||||
 | 
			
		||||
	// PrepareDeleteRequest preparse delete request
 | 
			
		||||
	PrepareDeleteRequest(ctx context.Context, decorators ...autorest.PrepareDecorator) (*http.Request, error)
 | 
			
		||||
 | 
			
		||||
	// PrepareHeadRequest prepares head request
 | 
			
		||||
	PrepareHeadRequest(ctx context.Context, decorators ...autorest.PrepareDecorator) (*http.Request, error)
 | 
			
		||||
 | 
			
		||||
	// WaitForAsyncOperationCompletion waits for an operation completion
 | 
			
		||||
	WaitForAsyncOperationCompletion(ctx context.Context, future *azure.Future, asyncOperationName string) error
 | 
			
		||||
 | 
			
		||||
	// WaitForAsyncOperationResult waits for an operation result.
 | 
			
		||||
	WaitForAsyncOperationResult(ctx context.Context, future *azure.Future, asyncOperationName string) (*http.Response, error)
 | 
			
		||||
 | 
			
		||||
	// SendAsync send a request and return a future object representing the async result as well as the origin http response
 | 
			
		||||
	SendAsync(ctx context.Context, request *http.Request) (*azure.Future, *http.Response, *retry.Error)
 | 
			
		||||
 | 
			
		||||
	// PutResource puts a resource by resource ID
 | 
			
		||||
	PutResource(ctx context.Context, resourceID string, parameters interface{}) (*http.Response, *retry.Error)
 | 
			
		||||
 | 
			
		||||
	// PutResources puts a list of resources from resources map[resourceID]parameters.
 | 
			
		||||
	// Those resources sync requests are sequential while async requests are concurrent. It 's especially
 | 
			
		||||
	// useful when the ARM API doesn't support concurrent requests.
 | 
			
		||||
	PutResources(ctx context.Context, resources map[string]interface{}) map[string]*PutResourcesResponse
 | 
			
		||||
 | 
			
		||||
	// PutResourceWithDecorators puts a resource with decorators by resource ID
 | 
			
		||||
	PutResourceWithDecorators(ctx context.Context, resourceID string, parameters interface{}, decorators []autorest.PrepareDecorator) (*http.Response, *retry.Error)
 | 
			
		||||
 | 
			
		||||
	// PatchResource patches a resource by resource ID
 | 
			
		||||
	PatchResource(ctx context.Context, resourceID string, parameters interface{}) (*http.Response, *retry.Error)
 | 
			
		||||
 | 
			
		||||
	// PutResourceAsync puts a resource by resource ID in async mode
 | 
			
		||||
	PutResourceAsync(ctx context.Context, resourceID string, parameters interface{}) (*azure.Future, *retry.Error)
 | 
			
		||||
 | 
			
		||||
	// HeadResource heads a resource by resource ID
 | 
			
		||||
	HeadResource(ctx context.Context, resourceID string) (*http.Response, *retry.Error)
 | 
			
		||||
 | 
			
		||||
	// GetResource get a resource by resource ID
 | 
			
		||||
	GetResource(ctx context.Context, resourceID, expand string) (*http.Response, *retry.Error)
 | 
			
		||||
 | 
			
		||||
	//GetResourceWithDecorators get a resource with decorators by resource ID
 | 
			
		||||
	GetResourceWithDecorators(ctx context.Context, resourceID string, decorators []autorest.PrepareDecorator) (*http.Response, *retry.Error)
 | 
			
		||||
 | 
			
		||||
	// PostResource posts a resource by resource ID
 | 
			
		||||
	PostResource(ctx context.Context, resourceID, action string, parameters interface{}) (*http.Response, *retry.Error)
 | 
			
		||||
 | 
			
		||||
	// DeleteResource deletes a resource by resource ID
 | 
			
		||||
	DeleteResource(ctx context.Context, resourceID, ifMatch string) *retry.Error
 | 
			
		||||
 | 
			
		||||
	// DeleteResourceAsync delete a resource by resource ID and returns a future representing the async result
 | 
			
		||||
	DeleteResourceAsync(ctx context.Context, resourceID, ifMatch string) (*azure.Future, *retry.Error)
 | 
			
		||||
 | 
			
		||||
	// CloseResponse closes a response
 | 
			
		||||
	CloseResponse(ctx context.Context, response *http.Response)
 | 
			
		||||
}
 | 
			
		||||
@@ -1,18 +0,0 @@
 | 
			
		||||
/*
 | 
			
		||||
Copyright 2019 The Kubernetes Authors.
 | 
			
		||||
 | 
			
		||||
Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
you may not use this file except in compliance with the License.
 | 
			
		||||
You may obtain a copy of the License at
 | 
			
		||||
 | 
			
		||||
    http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
 | 
			
		||||
Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
See the License for the specific language governing permissions and
 | 
			
		||||
limitations under the License.
 | 
			
		||||
*/
 | 
			
		||||
 | 
			
		||||
// Package mockarmclient implements the mock client for ARM.
 | 
			
		||||
package mockarmclient // import "k8s.io/legacy-cloud-providers/azure/clients/armclient/mockarmclient"
 | 
			
		||||
@@ -1,394 +0,0 @@
 | 
			
		||||
//go:build !providerless
 | 
			
		||||
// +build !providerless
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
Copyright The Kubernetes Authors.
 | 
			
		||||
 | 
			
		||||
Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
you may not use this file except in compliance with the License.
 | 
			
		||||
You may obtain a copy of the License at
 | 
			
		||||
 | 
			
		||||
    http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
 | 
			
		||||
Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
See the License for the specific language governing permissions and
 | 
			
		||||
limitations under the License.
 | 
			
		||||
*/
 | 
			
		||||
 | 
			
		||||
// Code generated by MockGen. DO NOT EDIT.
 | 
			
		||||
// Source: interface.go
 | 
			
		||||
 | 
			
		||||
// Package mockarmclient is a generated GoMock package.
 | 
			
		||||
package mockarmclient
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	context "context"
 | 
			
		||||
	http "net/http"
 | 
			
		||||
	reflect "reflect"
 | 
			
		||||
 | 
			
		||||
	autorest "github.com/Azure/go-autorest/autorest"
 | 
			
		||||
	azure "github.com/Azure/go-autorest/autorest/azure"
 | 
			
		||||
	gomock "github.com/golang/mock/gomock"
 | 
			
		||||
	armclient "k8s.io/legacy-cloud-providers/azure/clients/armclient"
 | 
			
		||||
	retry "k8s.io/legacy-cloud-providers/azure/retry"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// MockInterface is a mock of Interface interface.
 | 
			
		||||
type MockInterface struct {
 | 
			
		||||
	ctrl     *gomock.Controller
 | 
			
		||||
	recorder *MockInterfaceMockRecorder
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// MockInterfaceMockRecorder is the mock recorder for MockInterface.
 | 
			
		||||
type MockInterfaceMockRecorder struct {
 | 
			
		||||
	mock *MockInterface
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// NewMockInterface creates a new mock instance.
 | 
			
		||||
func NewMockInterface(ctrl *gomock.Controller) *MockInterface {
 | 
			
		||||
	mock := &MockInterface{ctrl: ctrl}
 | 
			
		||||
	mock.recorder = &MockInterfaceMockRecorder{mock}
 | 
			
		||||
	return mock
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// EXPECT returns an object that allows the caller to indicate expected use.
 | 
			
		||||
func (m *MockInterface) EXPECT() *MockInterfaceMockRecorder {
 | 
			
		||||
	return m.recorder
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// CloseResponse mocks base method.
 | 
			
		||||
func (m *MockInterface) CloseResponse(ctx context.Context, response *http.Response) {
 | 
			
		||||
	m.ctrl.T.Helper()
 | 
			
		||||
	m.ctrl.Call(m, "CloseResponse", ctx, response)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// CloseResponse indicates an expected call of CloseResponse.
 | 
			
		||||
func (mr *MockInterfaceMockRecorder) CloseResponse(ctx, response interface{}) *gomock.Call {
 | 
			
		||||
	mr.mock.ctrl.T.Helper()
 | 
			
		||||
	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CloseResponse", reflect.TypeOf((*MockInterface)(nil).CloseResponse), ctx, response)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// DeleteResource mocks base method.
 | 
			
		||||
func (m *MockInterface) DeleteResource(ctx context.Context, resourceID, ifMatch string) *retry.Error {
 | 
			
		||||
	m.ctrl.T.Helper()
 | 
			
		||||
	ret := m.ctrl.Call(m, "DeleteResource", ctx, resourceID, ifMatch)
 | 
			
		||||
	ret0, _ := ret[0].(*retry.Error)
 | 
			
		||||
	return ret0
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// DeleteResource indicates an expected call of DeleteResource.
 | 
			
		||||
func (mr *MockInterfaceMockRecorder) DeleteResource(ctx, resourceID, ifMatch interface{}) *gomock.Call {
 | 
			
		||||
	mr.mock.ctrl.T.Helper()
 | 
			
		||||
	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteResource", reflect.TypeOf((*MockInterface)(nil).DeleteResource), ctx, resourceID, ifMatch)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// DeleteResourceAsync mocks base method.
 | 
			
		||||
func (m *MockInterface) DeleteResourceAsync(ctx context.Context, resourceID, ifMatch string) (*azure.Future, *retry.Error) {
 | 
			
		||||
	m.ctrl.T.Helper()
 | 
			
		||||
	ret := m.ctrl.Call(m, "DeleteResourceAsync", ctx, resourceID, ifMatch)
 | 
			
		||||
	ret0, _ := ret[0].(*azure.Future)
 | 
			
		||||
	ret1, _ := ret[1].(*retry.Error)
 | 
			
		||||
	return ret0, ret1
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// DeleteResourceAsync indicates an expected call of DeleteResourceAsync.
 | 
			
		||||
func (mr *MockInterfaceMockRecorder) DeleteResourceAsync(ctx, resourceID, ifMatch interface{}) *gomock.Call {
 | 
			
		||||
	mr.mock.ctrl.T.Helper()
 | 
			
		||||
	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteResourceAsync", reflect.TypeOf((*MockInterface)(nil).DeleteResourceAsync), ctx, resourceID, ifMatch)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// GetResource mocks base method.
 | 
			
		||||
func (m *MockInterface) GetResource(ctx context.Context, resourceID, expand string) (*http.Response, *retry.Error) {
 | 
			
		||||
	m.ctrl.T.Helper()
 | 
			
		||||
	ret := m.ctrl.Call(m, "GetResource", ctx, resourceID, expand)
 | 
			
		||||
	ret0, _ := ret[0].(*http.Response)
 | 
			
		||||
	ret1, _ := ret[1].(*retry.Error)
 | 
			
		||||
	return ret0, ret1
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// GetResource indicates an expected call of GetResource.
 | 
			
		||||
func (mr *MockInterfaceMockRecorder) GetResource(ctx, resourceID, expand interface{}) *gomock.Call {
 | 
			
		||||
	mr.mock.ctrl.T.Helper()
 | 
			
		||||
	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetResource", reflect.TypeOf((*MockInterface)(nil).GetResource), ctx, resourceID, expand)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// GetResourceWithDecorators mocks base method.
 | 
			
		||||
func (m *MockInterface) GetResourceWithDecorators(ctx context.Context, resourceID string, decorators []autorest.PrepareDecorator) (*http.Response, *retry.Error) {
 | 
			
		||||
	m.ctrl.T.Helper()
 | 
			
		||||
	ret := m.ctrl.Call(m, "GetResourceWithDecorators", ctx, resourceID, decorators)
 | 
			
		||||
	ret0, _ := ret[0].(*http.Response)
 | 
			
		||||
	ret1, _ := ret[1].(*retry.Error)
 | 
			
		||||
	return ret0, ret1
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// GetResourceWithDecorators indicates an expected call of GetResourceWithDecorators.
 | 
			
		||||
func (mr *MockInterfaceMockRecorder) GetResourceWithDecorators(ctx, resourceID, decorators interface{}) *gomock.Call {
 | 
			
		||||
	mr.mock.ctrl.T.Helper()
 | 
			
		||||
	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetResourceWithDecorators", reflect.TypeOf((*MockInterface)(nil).GetResourceWithDecorators), ctx, resourceID, decorators)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// HeadResource mocks base method.
 | 
			
		||||
func (m *MockInterface) HeadResource(ctx context.Context, resourceID string) (*http.Response, *retry.Error) {
 | 
			
		||||
	m.ctrl.T.Helper()
 | 
			
		||||
	ret := m.ctrl.Call(m, "HeadResource", ctx, resourceID)
 | 
			
		||||
	ret0, _ := ret[0].(*http.Response)
 | 
			
		||||
	ret1, _ := ret[1].(*retry.Error)
 | 
			
		||||
	return ret0, ret1
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// HeadResource indicates an expected call of HeadResource.
 | 
			
		||||
func (mr *MockInterfaceMockRecorder) HeadResource(ctx, resourceID interface{}) *gomock.Call {
 | 
			
		||||
	mr.mock.ctrl.T.Helper()
 | 
			
		||||
	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HeadResource", reflect.TypeOf((*MockInterface)(nil).HeadResource), ctx, resourceID)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// PatchResource mocks base method.
 | 
			
		||||
func (m *MockInterface) PatchResource(ctx context.Context, resourceID string, parameters interface{}) (*http.Response, *retry.Error) {
 | 
			
		||||
	m.ctrl.T.Helper()
 | 
			
		||||
	ret := m.ctrl.Call(m, "PatchResource", ctx, resourceID, parameters)
 | 
			
		||||
	ret0, _ := ret[0].(*http.Response)
 | 
			
		||||
	ret1, _ := ret[1].(*retry.Error)
 | 
			
		||||
	return ret0, ret1
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// PatchResource indicates an expected call of PatchResource.
 | 
			
		||||
func (mr *MockInterfaceMockRecorder) PatchResource(ctx, resourceID, parameters interface{}) *gomock.Call {
 | 
			
		||||
	mr.mock.ctrl.T.Helper()
 | 
			
		||||
	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PatchResource", reflect.TypeOf((*MockInterface)(nil).PatchResource), ctx, resourceID, parameters)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// PostResource mocks base method.
 | 
			
		||||
func (m *MockInterface) PostResource(ctx context.Context, resourceID, action string, parameters interface{}) (*http.Response, *retry.Error) {
 | 
			
		||||
	m.ctrl.T.Helper()
 | 
			
		||||
	ret := m.ctrl.Call(m, "PostResource", ctx, resourceID, action, parameters)
 | 
			
		||||
	ret0, _ := ret[0].(*http.Response)
 | 
			
		||||
	ret1, _ := ret[1].(*retry.Error)
 | 
			
		||||
	return ret0, ret1
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// PostResource indicates an expected call of PostResource.
 | 
			
		||||
func (mr *MockInterfaceMockRecorder) PostResource(ctx, resourceID, action, parameters interface{}) *gomock.Call {
 | 
			
		||||
	mr.mock.ctrl.T.Helper()
 | 
			
		||||
	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PostResource", reflect.TypeOf((*MockInterface)(nil).PostResource), ctx, resourceID, action, parameters)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// PrepareDeleteRequest mocks base method.
 | 
			
		||||
func (m *MockInterface) PrepareDeleteRequest(ctx context.Context, decorators ...autorest.PrepareDecorator) (*http.Request, error) {
 | 
			
		||||
	m.ctrl.T.Helper()
 | 
			
		||||
	varargs := []interface{}{ctx}
 | 
			
		||||
	for _, a := range decorators {
 | 
			
		||||
		varargs = append(varargs, a)
 | 
			
		||||
	}
 | 
			
		||||
	ret := m.ctrl.Call(m, "PrepareDeleteRequest", varargs...)
 | 
			
		||||
	ret0, _ := ret[0].(*http.Request)
 | 
			
		||||
	ret1, _ := ret[1].(error)
 | 
			
		||||
	return ret0, ret1
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// PrepareDeleteRequest indicates an expected call of PrepareDeleteRequest.
 | 
			
		||||
func (mr *MockInterfaceMockRecorder) PrepareDeleteRequest(ctx interface{}, decorators ...interface{}) *gomock.Call {
 | 
			
		||||
	mr.mock.ctrl.T.Helper()
 | 
			
		||||
	varargs := append([]interface{}{ctx}, decorators...)
 | 
			
		||||
	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PrepareDeleteRequest", reflect.TypeOf((*MockInterface)(nil).PrepareDeleteRequest), varargs...)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// PrepareGetRequest mocks base method.
 | 
			
		||||
func (m *MockInterface) PrepareGetRequest(ctx context.Context, decorators ...autorest.PrepareDecorator) (*http.Request, error) {
 | 
			
		||||
	m.ctrl.T.Helper()
 | 
			
		||||
	varargs := []interface{}{ctx}
 | 
			
		||||
	for _, a := range decorators {
 | 
			
		||||
		varargs = append(varargs, a)
 | 
			
		||||
	}
 | 
			
		||||
	ret := m.ctrl.Call(m, "PrepareGetRequest", varargs...)
 | 
			
		||||
	ret0, _ := ret[0].(*http.Request)
 | 
			
		||||
	ret1, _ := ret[1].(error)
 | 
			
		||||
	return ret0, ret1
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// PrepareGetRequest indicates an expected call of PrepareGetRequest.
 | 
			
		||||
func (mr *MockInterfaceMockRecorder) PrepareGetRequest(ctx interface{}, decorators ...interface{}) *gomock.Call {
 | 
			
		||||
	mr.mock.ctrl.T.Helper()
 | 
			
		||||
	varargs := append([]interface{}{ctx}, decorators...)
 | 
			
		||||
	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PrepareGetRequest", reflect.TypeOf((*MockInterface)(nil).PrepareGetRequest), varargs...)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// PrepareHeadRequest mocks base method.
 | 
			
		||||
func (m *MockInterface) PrepareHeadRequest(ctx context.Context, decorators ...autorest.PrepareDecorator) (*http.Request, error) {
 | 
			
		||||
	m.ctrl.T.Helper()
 | 
			
		||||
	varargs := []interface{}{ctx}
 | 
			
		||||
	for _, a := range decorators {
 | 
			
		||||
		varargs = append(varargs, a)
 | 
			
		||||
	}
 | 
			
		||||
	ret := m.ctrl.Call(m, "PrepareHeadRequest", varargs...)
 | 
			
		||||
	ret0, _ := ret[0].(*http.Request)
 | 
			
		||||
	ret1, _ := ret[1].(error)
 | 
			
		||||
	return ret0, ret1
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// PrepareHeadRequest indicates an expected call of PrepareHeadRequest.
 | 
			
		||||
func (mr *MockInterfaceMockRecorder) PrepareHeadRequest(ctx interface{}, decorators ...interface{}) *gomock.Call {
 | 
			
		||||
	mr.mock.ctrl.T.Helper()
 | 
			
		||||
	varargs := append([]interface{}{ctx}, decorators...)
 | 
			
		||||
	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PrepareHeadRequest", reflect.TypeOf((*MockInterface)(nil).PrepareHeadRequest), varargs...)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// PreparePostRequest mocks base method.
 | 
			
		||||
func (m *MockInterface) PreparePostRequest(ctx context.Context, decorators ...autorest.PrepareDecorator) (*http.Request, error) {
 | 
			
		||||
	m.ctrl.T.Helper()
 | 
			
		||||
	varargs := []interface{}{ctx}
 | 
			
		||||
	for _, a := range decorators {
 | 
			
		||||
		varargs = append(varargs, a)
 | 
			
		||||
	}
 | 
			
		||||
	ret := m.ctrl.Call(m, "PreparePostRequest", varargs...)
 | 
			
		||||
	ret0, _ := ret[0].(*http.Request)
 | 
			
		||||
	ret1, _ := ret[1].(error)
 | 
			
		||||
	return ret0, ret1
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// PreparePostRequest indicates an expected call of PreparePostRequest.
 | 
			
		||||
func (mr *MockInterfaceMockRecorder) PreparePostRequest(ctx interface{}, decorators ...interface{}) *gomock.Call {
 | 
			
		||||
	mr.mock.ctrl.T.Helper()
 | 
			
		||||
	varargs := append([]interface{}{ctx}, decorators...)
 | 
			
		||||
	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PreparePostRequest", reflect.TypeOf((*MockInterface)(nil).PreparePostRequest), varargs...)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// PreparePutRequest mocks base method.
 | 
			
		||||
func (m *MockInterface) PreparePutRequest(ctx context.Context, decorators ...autorest.PrepareDecorator) (*http.Request, error) {
 | 
			
		||||
	m.ctrl.T.Helper()
 | 
			
		||||
	varargs := []interface{}{ctx}
 | 
			
		||||
	for _, a := range decorators {
 | 
			
		||||
		varargs = append(varargs, a)
 | 
			
		||||
	}
 | 
			
		||||
	ret := m.ctrl.Call(m, "PreparePutRequest", varargs...)
 | 
			
		||||
	ret0, _ := ret[0].(*http.Request)
 | 
			
		||||
	ret1, _ := ret[1].(error)
 | 
			
		||||
	return ret0, ret1
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// PreparePutRequest indicates an expected call of PreparePutRequest.
 | 
			
		||||
func (mr *MockInterfaceMockRecorder) PreparePutRequest(ctx interface{}, decorators ...interface{}) *gomock.Call {
 | 
			
		||||
	mr.mock.ctrl.T.Helper()
 | 
			
		||||
	varargs := append([]interface{}{ctx}, decorators...)
 | 
			
		||||
	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PreparePutRequest", reflect.TypeOf((*MockInterface)(nil).PreparePutRequest), varargs...)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// PutResource mocks base method.
 | 
			
		||||
func (m *MockInterface) PutResource(ctx context.Context, resourceID string, parameters interface{}) (*http.Response, *retry.Error) {
 | 
			
		||||
	m.ctrl.T.Helper()
 | 
			
		||||
	ret := m.ctrl.Call(m, "PutResource", ctx, resourceID, parameters)
 | 
			
		||||
	ret0, _ := ret[0].(*http.Response)
 | 
			
		||||
	ret1, _ := ret[1].(*retry.Error)
 | 
			
		||||
	return ret0, ret1
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// PutResource indicates an expected call of PutResource.
 | 
			
		||||
func (mr *MockInterfaceMockRecorder) PutResource(ctx, resourceID, parameters interface{}) *gomock.Call {
 | 
			
		||||
	mr.mock.ctrl.T.Helper()
 | 
			
		||||
	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutResource", reflect.TypeOf((*MockInterface)(nil).PutResource), ctx, resourceID, parameters)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// PutResourceAsync mocks base method.
 | 
			
		||||
func (m *MockInterface) PutResourceAsync(ctx context.Context, resourceID string, parameters interface{}) (*azure.Future, *retry.Error) {
 | 
			
		||||
	m.ctrl.T.Helper()
 | 
			
		||||
	ret := m.ctrl.Call(m, "PutResourceAsync", ctx, resourceID, parameters)
 | 
			
		||||
	ret0, _ := ret[0].(*azure.Future)
 | 
			
		||||
	ret1, _ := ret[1].(*retry.Error)
 | 
			
		||||
	return ret0, ret1
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// PutResourceAsync indicates an expected call of PutResourceAsync.
 | 
			
		||||
func (mr *MockInterfaceMockRecorder) PutResourceAsync(ctx, resourceID, parameters interface{}) *gomock.Call {
 | 
			
		||||
	mr.mock.ctrl.T.Helper()
 | 
			
		||||
	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutResourceAsync", reflect.TypeOf((*MockInterface)(nil).PutResourceAsync), ctx, resourceID, parameters)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// PutResourceWithDecorators mocks base method.
 | 
			
		||||
func (m *MockInterface) PutResourceWithDecorators(ctx context.Context, resourceID string, parameters interface{}, decorators []autorest.PrepareDecorator) (*http.Response, *retry.Error) {
 | 
			
		||||
	m.ctrl.T.Helper()
 | 
			
		||||
	ret := m.ctrl.Call(m, "PutResourceWithDecorators", ctx, resourceID, parameters, decorators)
 | 
			
		||||
	ret0, _ := ret[0].(*http.Response)
 | 
			
		||||
	ret1, _ := ret[1].(*retry.Error)
 | 
			
		||||
	return ret0, ret1
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// PutResourceWithDecorators indicates an expected call of PutResourceWithDecorators.
 | 
			
		||||
func (mr *MockInterfaceMockRecorder) PutResourceWithDecorators(ctx, resourceID, parameters, decorators interface{}) *gomock.Call {
 | 
			
		||||
	mr.mock.ctrl.T.Helper()
 | 
			
		||||
	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutResourceWithDecorators", reflect.TypeOf((*MockInterface)(nil).PutResourceWithDecorators), ctx, resourceID, parameters, decorators)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// PutResources mocks base method.
 | 
			
		||||
func (m *MockInterface) PutResources(ctx context.Context, resources map[string]interface{}) map[string]*armclient.PutResourcesResponse {
 | 
			
		||||
	m.ctrl.T.Helper()
 | 
			
		||||
	ret := m.ctrl.Call(m, "PutResources", ctx, resources)
 | 
			
		||||
	ret0, _ := ret[0].(map[string]*armclient.PutResourcesResponse)
 | 
			
		||||
	return ret0
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// PutResources indicates an expected call of PutResources.
 | 
			
		||||
func (mr *MockInterfaceMockRecorder) PutResources(ctx, resources interface{}) *gomock.Call {
 | 
			
		||||
	mr.mock.ctrl.T.Helper()
 | 
			
		||||
	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutResources", reflect.TypeOf((*MockInterface)(nil).PutResources), ctx, resources)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Send mocks base method.
 | 
			
		||||
func (m *MockInterface) Send(ctx context.Context, request *http.Request) (*http.Response, *retry.Error) {
 | 
			
		||||
	m.ctrl.T.Helper()
 | 
			
		||||
	ret := m.ctrl.Call(m, "Send", ctx, request)
 | 
			
		||||
	ret0, _ := ret[0].(*http.Response)
 | 
			
		||||
	ret1, _ := ret[1].(*retry.Error)
 | 
			
		||||
	return ret0, ret1
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Send indicates an expected call of Send.
 | 
			
		||||
func (mr *MockInterfaceMockRecorder) Send(ctx, request interface{}) *gomock.Call {
 | 
			
		||||
	mr.mock.ctrl.T.Helper()
 | 
			
		||||
	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Send", reflect.TypeOf((*MockInterface)(nil).Send), ctx, request)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// SendAsync mocks base method.
 | 
			
		||||
func (m *MockInterface) SendAsync(ctx context.Context, request *http.Request) (*azure.Future, *http.Response, *retry.Error) {
 | 
			
		||||
	m.ctrl.T.Helper()
 | 
			
		||||
	ret := m.ctrl.Call(m, "SendAsync", ctx, request)
 | 
			
		||||
	ret0, _ := ret[0].(*azure.Future)
 | 
			
		||||
	ret1, _ := ret[1].(*http.Response)
 | 
			
		||||
	ret2, _ := ret[2].(*retry.Error)
 | 
			
		||||
	return ret0, ret1, ret2
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// SendAsync indicates an expected call of SendAsync.
 | 
			
		||||
func (mr *MockInterfaceMockRecorder) SendAsync(ctx, request interface{}) *gomock.Call {
 | 
			
		||||
	mr.mock.ctrl.T.Helper()
 | 
			
		||||
	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendAsync", reflect.TypeOf((*MockInterface)(nil).SendAsync), ctx, request)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// WaitForAsyncOperationCompletion mocks base method.
 | 
			
		||||
func (m *MockInterface) WaitForAsyncOperationCompletion(ctx context.Context, future *azure.Future, asyncOperationName string) error {
 | 
			
		||||
	m.ctrl.T.Helper()
 | 
			
		||||
	ret := m.ctrl.Call(m, "WaitForAsyncOperationCompletion", ctx, future, asyncOperationName)
 | 
			
		||||
	ret0, _ := ret[0].(error)
 | 
			
		||||
	return ret0
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// WaitForAsyncOperationCompletion indicates an expected call of WaitForAsyncOperationCompletion.
 | 
			
		||||
func (mr *MockInterfaceMockRecorder) WaitForAsyncOperationCompletion(ctx, future, asyncOperationName interface{}) *gomock.Call {
 | 
			
		||||
	mr.mock.ctrl.T.Helper()
 | 
			
		||||
	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WaitForAsyncOperationCompletion", reflect.TypeOf((*MockInterface)(nil).WaitForAsyncOperationCompletion), ctx, future, asyncOperationName)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// WaitForAsyncOperationResult mocks base method.
 | 
			
		||||
func (m *MockInterface) WaitForAsyncOperationResult(ctx context.Context, future *azure.Future, asyncOperationName string) (*http.Response, error) {
 | 
			
		||||
	m.ctrl.T.Helper()
 | 
			
		||||
	ret := m.ctrl.Call(m, "WaitForAsyncOperationResult", ctx, future, asyncOperationName)
 | 
			
		||||
	ret0, _ := ret[0].(*http.Response)
 | 
			
		||||
	ret1, _ := ret[1].(error)
 | 
			
		||||
	return ret0, ret1
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// WaitForAsyncOperationResult indicates an expected call of WaitForAsyncOperationResult.
 | 
			
		||||
func (mr *MockInterfaceMockRecorder) WaitForAsyncOperationResult(ctx, future, asyncOperationName interface{}) *gomock.Call {
 | 
			
		||||
	mr.mock.ctrl.T.Helper()
 | 
			
		||||
	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WaitForAsyncOperationResult", reflect.TypeOf((*MockInterface)(nil).WaitForAsyncOperationResult), ctx, future, asyncOperationName)
 | 
			
		||||
}
 | 
			
		||||
@@ -1,82 +0,0 @@
 | 
			
		||||
//go:build !providerless
 | 
			
		||||
// +build !providerless
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
Copyright 2019 The Kubernetes Authors.
 | 
			
		||||
 | 
			
		||||
Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
you may not use this file except in compliance with the License.
 | 
			
		||||
You may obtain a copy of the License at
 | 
			
		||||
 | 
			
		||||
    http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
 | 
			
		||||
Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
See the License for the specific language governing permissions and
 | 
			
		||||
limitations under the License.
 | 
			
		||||
*/
 | 
			
		||||
 | 
			
		||||
package clients
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"github.com/Azure/go-autorest/autorest"
 | 
			
		||||
	"k8s.io/client-go/util/flowcontrol"
 | 
			
		||||
	"k8s.io/legacy-cloud-providers/azure/retry"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// ClientConfig contains all essential information to create an Azure client.
 | 
			
		||||
type ClientConfig struct {
 | 
			
		||||
	CloudName               string
 | 
			
		||||
	Location                string
 | 
			
		||||
	SubscriptionID          string
 | 
			
		||||
	ResourceManagerEndpoint string
 | 
			
		||||
	Authorizer              autorest.Authorizer
 | 
			
		||||
	RateLimitConfig         *RateLimitConfig
 | 
			
		||||
	Backoff                 *retry.Backoff
 | 
			
		||||
	UserAgent               string
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// WithRateLimiter returns a new ClientConfig with rateLimitConfig set.
 | 
			
		||||
func (cfg *ClientConfig) WithRateLimiter(rl *RateLimitConfig) *ClientConfig {
 | 
			
		||||
	newClientConfig := *cfg
 | 
			
		||||
	newClientConfig.RateLimitConfig = rl
 | 
			
		||||
	return &newClientConfig
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// RateLimitConfig indicates the rate limit config options.
 | 
			
		||||
type RateLimitConfig struct {
 | 
			
		||||
	// Enable rate limiting
 | 
			
		||||
	CloudProviderRateLimit bool `json:"cloudProviderRateLimit,omitempty" yaml:"cloudProviderRateLimit,omitempty"`
 | 
			
		||||
	// Rate limit QPS (Read)
 | 
			
		||||
	CloudProviderRateLimitQPS float32 `json:"cloudProviderRateLimitQPS,omitempty" yaml:"cloudProviderRateLimitQPS,omitempty"`
 | 
			
		||||
	// Rate limit Bucket Size
 | 
			
		||||
	CloudProviderRateLimitBucket int `json:"cloudProviderRateLimitBucket,omitempty" yaml:"cloudProviderRateLimitBucket,omitempty"`
 | 
			
		||||
	// Rate limit QPS (Write)
 | 
			
		||||
	CloudProviderRateLimitQPSWrite float32 `json:"cloudProviderRateLimitQPSWrite,omitempty" yaml:"cloudProviderRateLimitQPSWrite,omitempty"`
 | 
			
		||||
	// Rate limit Bucket Size
 | 
			
		||||
	CloudProviderRateLimitBucketWrite int `json:"cloudProviderRateLimitBucketWrite,omitempty" yaml:"cloudProviderRateLimitBucketWrite,omitempty"`
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// RateLimitEnabled returns true if CloudProviderRateLimit is set to true.
 | 
			
		||||
func RateLimitEnabled(config *RateLimitConfig) bool {
 | 
			
		||||
	return config != nil && config.CloudProviderRateLimit
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// NewRateLimiter creates new read and write flowcontrol.RateLimiter from RateLimitConfig.
 | 
			
		||||
func NewRateLimiter(config *RateLimitConfig) (flowcontrol.RateLimiter, flowcontrol.RateLimiter) {
 | 
			
		||||
	readLimiter := flowcontrol.NewFakeAlwaysRateLimiter()
 | 
			
		||||
	writeLimiter := flowcontrol.NewFakeAlwaysRateLimiter()
 | 
			
		||||
 | 
			
		||||
	if config != nil && config.CloudProviderRateLimit {
 | 
			
		||||
		readLimiter = flowcontrol.NewTokenBucketRateLimiter(
 | 
			
		||||
			config.CloudProviderRateLimitQPS,
 | 
			
		||||
			config.CloudProviderRateLimitBucket)
 | 
			
		||||
 | 
			
		||||
		writeLimiter = flowcontrol.NewTokenBucketRateLimiter(
 | 
			
		||||
			config.CloudProviderRateLimitQPSWrite,
 | 
			
		||||
			config.CloudProviderRateLimitBucketWrite)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return readLimiter, writeLimiter
 | 
			
		||||
}
 | 
			
		||||
@@ -1,69 +0,0 @@
 | 
			
		||||
//go:build !providerless
 | 
			
		||||
// +build !providerless
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
Copyright 2019 The Kubernetes Authors.
 | 
			
		||||
 | 
			
		||||
Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
you may not use this file except in compliance with the License.
 | 
			
		||||
You may obtain a copy of the License at
 | 
			
		||||
 | 
			
		||||
    http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
 | 
			
		||||
Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
See the License for the specific language governing permissions and
 | 
			
		||||
limitations under the License.
 | 
			
		||||
*/
 | 
			
		||||
 | 
			
		||||
package clients
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"testing"
 | 
			
		||||
 | 
			
		||||
	"github.com/stretchr/testify/assert"
 | 
			
		||||
	"k8s.io/client-go/util/flowcontrol"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
func TestWithRateLimiter(t *testing.T) {
 | 
			
		||||
	config := &ClientConfig{}
 | 
			
		||||
	assert.Nil(t, config.RateLimitConfig)
 | 
			
		||||
	c := config.WithRateLimiter(&RateLimitConfig{CloudProviderRateLimit: true})
 | 
			
		||||
	assert.Equal(t, &RateLimitConfig{CloudProviderRateLimit: true}, c.RateLimitConfig)
 | 
			
		||||
	config.WithRateLimiter(nil)
 | 
			
		||||
	assert.Nil(t, config.RateLimitConfig)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestRateLimitEnabled(t *testing.T) {
 | 
			
		||||
	assert.Equal(t, false, RateLimitEnabled(nil))
 | 
			
		||||
	config := &RateLimitConfig{}
 | 
			
		||||
	assert.Equal(t, false, RateLimitEnabled(config))
 | 
			
		||||
	config.CloudProviderRateLimit = true
 | 
			
		||||
	assert.Equal(t, true, RateLimitEnabled(config))
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestNewRateLimiter(t *testing.T) {
 | 
			
		||||
	fakeRateLimiter := flowcontrol.NewFakeAlwaysRateLimiter()
 | 
			
		||||
	readLimiter, writeLimiter := NewRateLimiter(nil)
 | 
			
		||||
	assert.Equal(t, readLimiter, fakeRateLimiter)
 | 
			
		||||
	assert.Equal(t, writeLimiter, fakeRateLimiter)
 | 
			
		||||
 | 
			
		||||
	rateLimitConfig := &RateLimitConfig{
 | 
			
		||||
		CloudProviderRateLimit: false,
 | 
			
		||||
	}
 | 
			
		||||
	readLimiter, writeLimiter = NewRateLimiter(rateLimitConfig)
 | 
			
		||||
	assert.Equal(t, readLimiter, fakeRateLimiter)
 | 
			
		||||
	assert.Equal(t, writeLimiter, fakeRateLimiter)
 | 
			
		||||
 | 
			
		||||
	rateLimitConfig = &RateLimitConfig{
 | 
			
		||||
		CloudProviderRateLimit:            true,
 | 
			
		||||
		CloudProviderRateLimitQPS:         3,
 | 
			
		||||
		CloudProviderRateLimitBucket:      10,
 | 
			
		||||
		CloudProviderRateLimitQPSWrite:    1,
 | 
			
		||||
		CloudProviderRateLimitBucketWrite: 3,
 | 
			
		||||
	}
 | 
			
		||||
	readLimiter, writeLimiter = NewRateLimiter(rateLimitConfig)
 | 
			
		||||
	assert.Equal(t, flowcontrol.NewTokenBucketRateLimiter(3, 10), readLimiter)
 | 
			
		||||
	assert.Equal(t, flowcontrol.NewTokenBucketRateLimiter(1, 3), writeLimiter)
 | 
			
		||||
}
 | 
			
		||||
@@ -1,419 +0,0 @@
 | 
			
		||||
//go:build !providerless
 | 
			
		||||
// +build !providerless
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
Copyright 2020 The Kubernetes Authors.
 | 
			
		||||
 | 
			
		||||
Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
you may not use this file except in compliance with the License.
 | 
			
		||||
You may obtain a copy of the License at
 | 
			
		||||
 | 
			
		||||
    http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
 | 
			
		||||
Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
See the License for the specific language governing permissions and
 | 
			
		||||
limitations under the License.
 | 
			
		||||
*/
 | 
			
		||||
 | 
			
		||||
package containerserviceclient
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"context"
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"net/http"
 | 
			
		||||
	"time"
 | 
			
		||||
 | 
			
		||||
	"github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2020-04-01/containerservice"
 | 
			
		||||
	"github.com/Azure/go-autorest/autorest"
 | 
			
		||||
	"github.com/Azure/go-autorest/autorest/azure"
 | 
			
		||||
 | 
			
		||||
	"k8s.io/client-go/util/flowcontrol"
 | 
			
		||||
	"k8s.io/klog/v2"
 | 
			
		||||
	azclients "k8s.io/legacy-cloud-providers/azure/clients"
 | 
			
		||||
	"k8s.io/legacy-cloud-providers/azure/clients/armclient"
 | 
			
		||||
	"k8s.io/legacy-cloud-providers/azure/metrics"
 | 
			
		||||
	"k8s.io/legacy-cloud-providers/azure/retry"
 | 
			
		||||
	"k8s.io/utils/pointer"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
var _ Interface = &Client{}
 | 
			
		||||
 | 
			
		||||
// Client implements ContainerService client Interface.
 | 
			
		||||
type Client struct {
 | 
			
		||||
	armClient      armclient.Interface
 | 
			
		||||
	subscriptionID string
 | 
			
		||||
 | 
			
		||||
	// Rate limiting configures.
 | 
			
		||||
	rateLimiterReader flowcontrol.RateLimiter
 | 
			
		||||
	rateLimiterWriter flowcontrol.RateLimiter
 | 
			
		||||
 | 
			
		||||
	// ARM throttling configures.
 | 
			
		||||
	RetryAfterReader time.Time
 | 
			
		||||
	RetryAfterWriter time.Time
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// New creates a new ContainerServiceClient client with ratelimiting.
 | 
			
		||||
func New(config *azclients.ClientConfig) *Client {
 | 
			
		||||
	baseURI := config.ResourceManagerEndpoint
 | 
			
		||||
	authorizer := config.Authorizer
 | 
			
		||||
	armClient := armclient.New(authorizer, baseURI, config.UserAgent, APIVersion, config.Location, config.Backoff)
 | 
			
		||||
	rateLimiterReader, rateLimiterWriter := azclients.NewRateLimiter(config.RateLimitConfig)
 | 
			
		||||
 | 
			
		||||
	klog.V(2).Infof("Azure ContainerServiceClient (read ops) using rate limit config: QPS=%g, bucket=%d",
 | 
			
		||||
		config.RateLimitConfig.CloudProviderRateLimitQPS,
 | 
			
		||||
		config.RateLimitConfig.CloudProviderRateLimitBucket)
 | 
			
		||||
	klog.V(2).Infof("Azure ContainerServiceClient (write ops) using rate limit config: QPS=%g, bucket=%d",
 | 
			
		||||
		config.RateLimitConfig.CloudProviderRateLimitQPSWrite,
 | 
			
		||||
		config.RateLimitConfig.CloudProviderRateLimitBucketWrite)
 | 
			
		||||
 | 
			
		||||
	client := &Client{
 | 
			
		||||
		armClient:         armClient,
 | 
			
		||||
		rateLimiterReader: rateLimiterReader,
 | 
			
		||||
		rateLimiterWriter: rateLimiterWriter,
 | 
			
		||||
		subscriptionID:    config.SubscriptionID,
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return client
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Get gets a ManagedCluster.
 | 
			
		||||
func (c *Client) Get(ctx context.Context, resourceGroupName string, managedClusterName string) (containerservice.ManagedCluster, *retry.Error) {
 | 
			
		||||
	mc := metrics.NewMetricContext("managed_clusters", "get", resourceGroupName, c.subscriptionID, "")
 | 
			
		||||
 | 
			
		||||
	// Report errors if the client is rate limited.
 | 
			
		||||
	if !c.rateLimiterReader.TryAccept() {
 | 
			
		||||
		mc.RateLimitedCount()
 | 
			
		||||
		return containerservice.ManagedCluster{}, retry.GetRateLimitError(false, "GetManagedCluster")
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Report errors if the client is throttled.
 | 
			
		||||
	if c.RetryAfterReader.After(time.Now()) {
 | 
			
		||||
		mc.ThrottledCount()
 | 
			
		||||
		rerr := retry.GetThrottlingError("GetManagedCluster", "client throttled", c.RetryAfterReader)
 | 
			
		||||
		return containerservice.ManagedCluster{}, rerr
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	result, rerr := c.getManagedCluster(ctx, resourceGroupName, managedClusterName)
 | 
			
		||||
	mc.Observe(rerr.Error())
 | 
			
		||||
	if rerr != nil {
 | 
			
		||||
		if rerr.IsThrottled() {
 | 
			
		||||
			// Update RetryAfterReader so that no more requests would be sent until RetryAfter expires.
 | 
			
		||||
			c.RetryAfterReader = rerr.RetryAfter
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		return result, rerr
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return result, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// getManagedCluster gets a ManagedCluster.
 | 
			
		||||
func (c *Client) getManagedCluster(ctx context.Context, resourceGroupName string, managedClusterName string) (containerservice.ManagedCluster, *retry.Error) {
 | 
			
		||||
	resourceID := armclient.GetResourceID(
 | 
			
		||||
		c.subscriptionID,
 | 
			
		||||
		resourceGroupName,
 | 
			
		||||
		"Microsoft.ContainerService/managedClusters",
 | 
			
		||||
		managedClusterName,
 | 
			
		||||
	)
 | 
			
		||||
	result := containerservice.ManagedCluster{}
 | 
			
		||||
 | 
			
		||||
	response, rerr := c.armClient.GetResource(ctx, resourceID, "")
 | 
			
		||||
	defer c.armClient.CloseResponse(ctx, response)
 | 
			
		||||
	if rerr != nil {
 | 
			
		||||
		klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "managedcluster.get.request", resourceID, rerr.Error())
 | 
			
		||||
		return result, rerr
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	err := autorest.Respond(
 | 
			
		||||
		response,
 | 
			
		||||
		azure.WithErrorUnlessStatusCode(http.StatusOK),
 | 
			
		||||
		autorest.ByUnmarshallingJSON(&result))
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "managedcluster.get.respond", resourceID, err)
 | 
			
		||||
		return result, retry.GetError(response, err)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	result.Response = autorest.Response{Response: response}
 | 
			
		||||
	return result, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// List gets a list of ManagedClusters in the resource group.
 | 
			
		||||
func (c *Client) List(ctx context.Context, resourceGroupName string) ([]containerservice.ManagedCluster, *retry.Error) {
 | 
			
		||||
	mc := metrics.NewMetricContext("managed_clusters", "list", resourceGroupName, c.subscriptionID, "")
 | 
			
		||||
 | 
			
		||||
	// Report errors if the client is rate limited.
 | 
			
		||||
	if !c.rateLimiterReader.TryAccept() {
 | 
			
		||||
		mc.RateLimitedCount()
 | 
			
		||||
		return nil, retry.GetRateLimitError(false, "ListManagedCluster")
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Report errors if the client is throttled.
 | 
			
		||||
	if c.RetryAfterReader.After(time.Now()) {
 | 
			
		||||
		mc.ThrottledCount()
 | 
			
		||||
		rerr := retry.GetThrottlingError("ListManagedCluster", "client throttled", c.RetryAfterReader)
 | 
			
		||||
		return nil, rerr
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	result, rerr := c.listManagedCluster(ctx, resourceGroupName)
 | 
			
		||||
	mc.Observe(rerr.Error())
 | 
			
		||||
	if rerr != nil {
 | 
			
		||||
		if rerr.IsThrottled() {
 | 
			
		||||
			// Update RetryAfterReader so that no more requests would be sent until RetryAfter expires.
 | 
			
		||||
			c.RetryAfterReader = rerr.RetryAfter
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		return result, rerr
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return result, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// listManagedCluster gets a list of ManagedClusters in the resource group.
 | 
			
		||||
func (c *Client) listManagedCluster(ctx context.Context, resourceGroupName string) ([]containerservice.ManagedCluster, *retry.Error) {
 | 
			
		||||
	resourceID := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.ContainerService/managedClusters",
 | 
			
		||||
		autorest.Encode("path", c.subscriptionID),
 | 
			
		||||
		autorest.Encode("path", resourceGroupName))
 | 
			
		||||
	result := make([]containerservice.ManagedCluster, 0)
 | 
			
		||||
	page := &ManagedClusterResultPage{}
 | 
			
		||||
	page.fn = c.listNextResults
 | 
			
		||||
 | 
			
		||||
	resp, rerr := c.armClient.GetResource(ctx, resourceID, "")
 | 
			
		||||
	defer c.armClient.CloseResponse(ctx, resp)
 | 
			
		||||
	if rerr != nil {
 | 
			
		||||
		klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "managedcluster.list.request", resourceID, rerr.Error())
 | 
			
		||||
		return result, rerr
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	var err error
 | 
			
		||||
	page.mclr, err = c.listResponder(resp)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "managedcluster.list.respond", resourceID, err)
 | 
			
		||||
		return result, retry.GetError(resp, err)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	for {
 | 
			
		||||
		result = append(result, page.Values()...)
 | 
			
		||||
 | 
			
		||||
		// Abort the loop when there's no nextLink in the response.
 | 
			
		||||
		if pointer.StringDeref(page.Response().NextLink, "") == "" {
 | 
			
		||||
			break
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		if err = page.NextWithContext(ctx); err != nil {
 | 
			
		||||
			klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "managedcluster.list.next", resourceID, err)
 | 
			
		||||
			return result, retry.GetError(page.Response().Response.Response, err)
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return result, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (c *Client) listResponder(resp *http.Response) (result containerservice.ManagedClusterListResult, err error) {
 | 
			
		||||
	err = autorest.Respond(
 | 
			
		||||
		resp,
 | 
			
		||||
		autorest.ByIgnoring(),
 | 
			
		||||
		azure.WithErrorUnlessStatusCode(http.StatusOK),
 | 
			
		||||
		autorest.ByUnmarshallingJSON(&result))
 | 
			
		||||
	result.Response = autorest.Response{Response: resp}
 | 
			
		||||
	return
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// managedClusterListResultPreparer prepares a request to retrieve the next set of results.
 | 
			
		||||
// It returns nil if no more results exist.
 | 
			
		||||
func (c *Client) managedClusterListResultPreparer(ctx context.Context, mclr containerservice.ManagedClusterListResult) (*http.Request, error) {
 | 
			
		||||
	if mclr.NextLink == nil || len(pointer.StringDeref(mclr.NextLink, "")) < 1 {
 | 
			
		||||
		return nil, nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	decorators := []autorest.PrepareDecorator{
 | 
			
		||||
		autorest.WithBaseURL(pointer.StringDeref(mclr.NextLink, "")),
 | 
			
		||||
	}
 | 
			
		||||
	return c.armClient.PrepareGetRequest(ctx, decorators...)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// listNextResults retrieves the next set of results, if any.
 | 
			
		||||
func (c *Client) listNextResults(ctx context.Context, lastResults containerservice.ManagedClusterListResult) (result containerservice.ManagedClusterListResult, err error) {
 | 
			
		||||
	req, err := c.managedClusterListResultPreparer(ctx, lastResults)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return result, autorest.NewErrorWithError(err, "managedclusterclient", "listNextResults", nil, "Failure preparing next results request")
 | 
			
		||||
	}
 | 
			
		||||
	if req == nil {
 | 
			
		||||
		return
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	resp, rerr := c.armClient.Send(ctx, req)
 | 
			
		||||
	defer c.armClient.CloseResponse(ctx, resp)
 | 
			
		||||
	if rerr != nil {
 | 
			
		||||
		result.Response = autorest.Response{Response: resp}
 | 
			
		||||
		return result, autorest.NewErrorWithError(rerr.Error(), "managedclusterclient", "listNextResults", resp, "Failure sending next results request")
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	result, err = c.listResponder(resp)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		err = autorest.NewErrorWithError(err, "managedclusterclient", "listNextResults", resp, "Failure responding to next results request")
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// ManagedClusterResultPage contains a page of ManagedCluster values.
 | 
			
		||||
type ManagedClusterResultPage struct {
 | 
			
		||||
	fn   func(context.Context, containerservice.ManagedClusterListResult) (containerservice.ManagedClusterListResult, error)
 | 
			
		||||
	mclr containerservice.ManagedClusterListResult
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// NextWithContext advances to the next page of values.  If there was an error making
 | 
			
		||||
// the request the page does not advance and the error is returned.
 | 
			
		||||
func (page *ManagedClusterResultPage) NextWithContext(ctx context.Context) (err error) {
 | 
			
		||||
	next, err := page.fn(ctx, page.mclr)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return err
 | 
			
		||||
	}
 | 
			
		||||
	page.mclr = next
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Next advances to the next page of values.  If there was an error making
 | 
			
		||||
// the request the page does not advance and the error is returned.
 | 
			
		||||
// Deprecated: Use NextWithContext() instead.
 | 
			
		||||
func (page *ManagedClusterResultPage) Next() error {
 | 
			
		||||
	return page.NextWithContext(context.Background())
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// NotDone returns true if the page enumeration should be started or is not yet complete.
 | 
			
		||||
func (page ManagedClusterResultPage) NotDone() bool {
 | 
			
		||||
	return !page.mclr.IsEmpty()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Response returns the raw server response from the last page request.
 | 
			
		||||
func (page ManagedClusterResultPage) Response() containerservice.ManagedClusterListResult {
 | 
			
		||||
	return page.mclr
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Values returns the slice of values for the current page or nil if there are no values.
 | 
			
		||||
func (page ManagedClusterResultPage) Values() []containerservice.ManagedCluster {
 | 
			
		||||
	if page.mclr.IsEmpty() {
 | 
			
		||||
		return nil
 | 
			
		||||
	}
 | 
			
		||||
	return *page.mclr.Value
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// CreateOrUpdate creates or updates a ManagedCluster.
 | 
			
		||||
func (c *Client) CreateOrUpdate(ctx context.Context, resourceGroupName string, managedClusterName string, parameters containerservice.ManagedCluster, etag string) *retry.Error {
 | 
			
		||||
	mc := metrics.NewMetricContext("managed_clusters", "create_or_update", resourceGroupName, c.subscriptionID, "")
 | 
			
		||||
 | 
			
		||||
	// Report errors if the client is rate limited.
 | 
			
		||||
	if !c.rateLimiterWriter.TryAccept() {
 | 
			
		||||
		mc.RateLimitedCount()
 | 
			
		||||
		return retry.GetRateLimitError(true, "CreateOrUpdateManagedCluster")
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Report errors if the client is throttled.
 | 
			
		||||
	if c.RetryAfterWriter.After(time.Now()) {
 | 
			
		||||
		mc.ThrottledCount()
 | 
			
		||||
		rerr := retry.GetThrottlingError("CreateOrUpdateManagedCluster", "client throttled", c.RetryAfterWriter)
 | 
			
		||||
		return rerr
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	rerr := c.createOrUpdateManagedCluster(ctx, resourceGroupName, managedClusterName, parameters, etag)
 | 
			
		||||
	mc.Observe(rerr.Error())
 | 
			
		||||
	if rerr != nil {
 | 
			
		||||
		if rerr.IsThrottled() {
 | 
			
		||||
			// Update RetryAfterReader so that no more requests would be sent until RetryAfter expires.
 | 
			
		||||
			c.RetryAfterWriter = rerr.RetryAfter
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		return rerr
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// createOrUpdateManagedCluster creates or updates a ManagedCluster.
 | 
			
		||||
func (c *Client) createOrUpdateManagedCluster(ctx context.Context, resourceGroupName string, managedClusterName string, parameters containerservice.ManagedCluster, etag string) *retry.Error {
 | 
			
		||||
	resourceID := armclient.GetResourceID(
 | 
			
		||||
		c.subscriptionID,
 | 
			
		||||
		resourceGroupName,
 | 
			
		||||
		"Microsoft.ContainerService/managedClusters",
 | 
			
		||||
		managedClusterName,
 | 
			
		||||
	)
 | 
			
		||||
	decorators := []autorest.PrepareDecorator{
 | 
			
		||||
		autorest.WithPathParameters("{resourceID}", map[string]interface{}{"resourceID": resourceID}),
 | 
			
		||||
		autorest.WithJSON(parameters),
 | 
			
		||||
	}
 | 
			
		||||
	if etag != "" {
 | 
			
		||||
		decorators = append(decorators, autorest.WithHeader("If-Match", autorest.String(etag)))
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	response, rerr := c.armClient.PutResourceWithDecorators(ctx, resourceID, parameters, decorators)
 | 
			
		||||
	defer c.armClient.CloseResponse(ctx, response)
 | 
			
		||||
	if rerr != nil {
 | 
			
		||||
		klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "managedCluster.put.request", resourceID, rerr.Error())
 | 
			
		||||
		return rerr
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if response != nil && response.StatusCode != http.StatusNoContent {
 | 
			
		||||
		_, rerr = c.createOrUpdateResponder(response)
 | 
			
		||||
		if rerr != nil {
 | 
			
		||||
			klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "managedCluster.put.respond", resourceID, rerr.Error())
 | 
			
		||||
			return rerr
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (c *Client) createOrUpdateResponder(resp *http.Response) (*containerservice.ManagedCluster, *retry.Error) {
 | 
			
		||||
	result := &containerservice.ManagedCluster{}
 | 
			
		||||
	err := autorest.Respond(
 | 
			
		||||
		resp,
 | 
			
		||||
		azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),
 | 
			
		||||
		autorest.ByUnmarshallingJSON(&result))
 | 
			
		||||
	result.Response = autorest.Response{Response: resp}
 | 
			
		||||
	return result, retry.GetError(resp, err)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Delete deletes a ManagedCluster by name.
 | 
			
		||||
func (c *Client) Delete(ctx context.Context, resourceGroupName string, managedClusterName string) *retry.Error {
 | 
			
		||||
	mc := metrics.NewMetricContext("managed_clusters", "delete", resourceGroupName, c.subscriptionID, "")
 | 
			
		||||
 | 
			
		||||
	// Report errors if the client is rate limited.
 | 
			
		||||
	if !c.rateLimiterWriter.TryAccept() {
 | 
			
		||||
		mc.RateLimitedCount()
 | 
			
		||||
		return retry.GetRateLimitError(true, "DeleteManagedCluster")
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Report errors if the client is throttled.
 | 
			
		||||
	if c.RetryAfterWriter.After(time.Now()) {
 | 
			
		||||
		mc.ThrottledCount()
 | 
			
		||||
		rerr := retry.GetThrottlingError("DeleteManagedCluster", "client throttled", c.RetryAfterWriter)
 | 
			
		||||
		return rerr
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	rerr := c.deleteManagedCluster(ctx, resourceGroupName, managedClusterName)
 | 
			
		||||
	mc.Observe(rerr.Error())
 | 
			
		||||
	if rerr != nil {
 | 
			
		||||
		if rerr.IsThrottled() {
 | 
			
		||||
			// Update RetryAfterReader so that no more requests would be sent until RetryAfter expires.
 | 
			
		||||
			c.RetryAfterWriter = rerr.RetryAfter
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		return rerr
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// deleteManagedCluster deletes a ManagedCluster by name.
 | 
			
		||||
func (c *Client) deleteManagedCluster(ctx context.Context, resourceGroupName string, managedClusterName string) *retry.Error {
 | 
			
		||||
	resourceID := armclient.GetResourceID(
 | 
			
		||||
		c.subscriptionID,
 | 
			
		||||
		resourceGroupName,
 | 
			
		||||
		"Microsoft.ContainerService/managedClusters",
 | 
			
		||||
		managedClusterName,
 | 
			
		||||
	)
 | 
			
		||||
 | 
			
		||||
	return c.armClient.DeleteResource(ctx, resourceID, "")
 | 
			
		||||
}
 | 
			
		||||
@@ -1,634 +0,0 @@
 | 
			
		||||
//go:build !providerless
 | 
			
		||||
// +build !providerless
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
Copyright 2020 The Kubernetes Authors.
 | 
			
		||||
 | 
			
		||||
Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
you may not use this file except in compliance with the License.
 | 
			
		||||
You may obtain a copy of the License at
 | 
			
		||||
 | 
			
		||||
    http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
 | 
			
		||||
Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
See the License for the specific language governing permissions and
 | 
			
		||||
limitations under the License.
 | 
			
		||||
*/
 | 
			
		||||
 | 
			
		||||
package containerserviceclient
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"bytes"
 | 
			
		||||
	"context"
 | 
			
		||||
	"encoding/json"
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"io/ioutil"
 | 
			
		||||
	"net/http"
 | 
			
		||||
	"testing"
 | 
			
		||||
	"time"
 | 
			
		||||
 | 
			
		||||
	"github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2020-04-01/containerservice"
 | 
			
		||||
	"github.com/Azure/go-autorest/autorest"
 | 
			
		||||
	"github.com/golang/mock/gomock"
 | 
			
		||||
	"github.com/stretchr/testify/assert"
 | 
			
		||||
 | 
			
		||||
	"k8s.io/client-go/util/flowcontrol"
 | 
			
		||||
	azclients "k8s.io/legacy-cloud-providers/azure/clients"
 | 
			
		||||
	"k8s.io/legacy-cloud-providers/azure/clients/armclient"
 | 
			
		||||
	"k8s.io/legacy-cloud-providers/azure/clients/armclient/mockarmclient"
 | 
			
		||||
	"k8s.io/legacy-cloud-providers/azure/retry"
 | 
			
		||||
	"k8s.io/utils/pointer"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// 2065-01-24 05:20:00 +0000 UTC
 | 
			
		||||
func getFutureTime() time.Time {
 | 
			
		||||
	return time.Unix(3000000000, 0)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func getTestManagedClusterClient(armClient armclient.Interface) *Client {
 | 
			
		||||
	rateLimiterReader, rateLimiterWriter := azclients.NewRateLimiter(&azclients.RateLimitConfig{})
 | 
			
		||||
	return &Client{
 | 
			
		||||
		armClient:         armClient,
 | 
			
		||||
		subscriptionID:    "subscriptionID",
 | 
			
		||||
		rateLimiterReader: rateLimiterReader,
 | 
			
		||||
		rateLimiterWriter: rateLimiterWriter,
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func getTestManagedClusterClientWithNeverRateLimiter(armClient armclient.Interface) *Client {
 | 
			
		||||
	rateLimiterReader := flowcontrol.NewFakeNeverRateLimiter()
 | 
			
		||||
	rateLimiterWriter := flowcontrol.NewFakeNeverRateLimiter()
 | 
			
		||||
	return &Client{
 | 
			
		||||
		armClient:         armClient,
 | 
			
		||||
		subscriptionID:    "subscriptionID",
 | 
			
		||||
		rateLimiterReader: rateLimiterReader,
 | 
			
		||||
		rateLimiterWriter: rateLimiterWriter,
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func getTestManagedClusterClientWithRetryAfterReader(armClient armclient.Interface) *Client {
 | 
			
		||||
	rateLimiterReader := flowcontrol.NewFakeAlwaysRateLimiter()
 | 
			
		||||
	rateLimiterWriter := flowcontrol.NewFakeAlwaysRateLimiter()
 | 
			
		||||
	return &Client{
 | 
			
		||||
		armClient:         armClient,
 | 
			
		||||
		subscriptionID:    "subscriptionID",
 | 
			
		||||
		rateLimiterReader: rateLimiterReader,
 | 
			
		||||
		rateLimiterWriter: rateLimiterWriter,
 | 
			
		||||
		RetryAfterReader:  getFutureTime(),
 | 
			
		||||
		RetryAfterWriter:  getFutureTime(),
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func getTestManagedCluster(name string) containerservice.ManagedCluster {
 | 
			
		||||
	return containerservice.ManagedCluster{
 | 
			
		||||
		ID:       pointer.String(fmt.Sprintf("/subscriptions/subscriptionID/resourceGroups/rg/providers/Microsoft.ContainerService/managedClusters/%s", name)),
 | 
			
		||||
		Name:     pointer.String(name),
 | 
			
		||||
		Location: pointer.String("eastus"),
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestNew(t *testing.T) {
 | 
			
		||||
	config := &azclients.ClientConfig{
 | 
			
		||||
		SubscriptionID:          "sub",
 | 
			
		||||
		ResourceManagerEndpoint: "endpoint",
 | 
			
		||||
		Location:                "eastus",
 | 
			
		||||
		RateLimitConfig: &azclients.RateLimitConfig{
 | 
			
		||||
			CloudProviderRateLimit:            true,
 | 
			
		||||
			CloudProviderRateLimitQPS:         0.5,
 | 
			
		||||
			CloudProviderRateLimitBucket:      1,
 | 
			
		||||
			CloudProviderRateLimitQPSWrite:    0.5,
 | 
			
		||||
			CloudProviderRateLimitBucketWrite: 1,
 | 
			
		||||
		},
 | 
			
		||||
		Backoff: &retry.Backoff{Steps: 1},
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	mcClient := New(config)
 | 
			
		||||
	assert.Equal(t, "sub", mcClient.subscriptionID)
 | 
			
		||||
	assert.NotEmpty(t, mcClient.rateLimiterReader)
 | 
			
		||||
	assert.NotEmpty(t, mcClient.rateLimiterWriter)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestGet(t *testing.T) {
 | 
			
		||||
	ctrl := gomock.NewController(t)
 | 
			
		||||
	defer ctrl.Finish()
 | 
			
		||||
 | 
			
		||||
	resourceID := "/subscriptions/subscriptionID/resourceGroups/rg/providers/Microsoft.ContainerService/managedClusters/cluster"
 | 
			
		||||
	response := &http.Response{
 | 
			
		||||
		StatusCode: http.StatusOK,
 | 
			
		||||
		Body:       ioutil.NopCloser(bytes.NewReader([]byte("{}"))),
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	armClient := mockarmclient.NewMockInterface(ctrl)
 | 
			
		||||
	armClient.EXPECT().GetResource(gomock.Any(), resourceID, "").Return(response, nil).Times(1)
 | 
			
		||||
	armClient.EXPECT().CloseResponse(gomock.Any(), gomock.Any()).Times(1)
 | 
			
		||||
 | 
			
		||||
	expected := containerservice.ManagedCluster{}
 | 
			
		||||
	expected.Response = autorest.Response{Response: response}
 | 
			
		||||
	mcClient := getTestManagedClusterClient(armClient)
 | 
			
		||||
	result, rerr := mcClient.Get(context.TODO(), "rg", "cluster")
 | 
			
		||||
	assert.Equal(t, expected, result)
 | 
			
		||||
	assert.Nil(t, rerr)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestGetNeverRateLimiter(t *testing.T) {
 | 
			
		||||
	ctrl := gomock.NewController(t)
 | 
			
		||||
	defer ctrl.Finish()
 | 
			
		||||
 | 
			
		||||
	mcGetErr := &retry.Error{
 | 
			
		||||
		RawError:  fmt.Errorf("azure cloud provider rate limited(%s) for operation %q", "read", "GetManagedCluster"),
 | 
			
		||||
		Retriable: true,
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	armClient := mockarmclient.NewMockInterface(ctrl)
 | 
			
		||||
 | 
			
		||||
	mcClient := getTestManagedClusterClientWithNeverRateLimiter(armClient)
 | 
			
		||||
	expected := containerservice.ManagedCluster{}
 | 
			
		||||
	result, rerr := mcClient.Get(context.TODO(), "rg", "cluster")
 | 
			
		||||
	assert.Equal(t, expected, result)
 | 
			
		||||
	assert.Equal(t, mcGetErr, rerr)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestGetRetryAfterReader(t *testing.T) {
 | 
			
		||||
	ctrl := gomock.NewController(t)
 | 
			
		||||
	defer ctrl.Finish()
 | 
			
		||||
 | 
			
		||||
	mcGetErr := &retry.Error{
 | 
			
		||||
		RawError:   fmt.Errorf("azure cloud provider throttled for operation %s with reason %q", "GetManagedCluster", "client throttled"),
 | 
			
		||||
		Retriable:  true,
 | 
			
		||||
		RetryAfter: getFutureTime(),
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	armClient := mockarmclient.NewMockInterface(ctrl)
 | 
			
		||||
 | 
			
		||||
	mcClient := getTestManagedClusterClientWithRetryAfterReader(armClient)
 | 
			
		||||
	expected := containerservice.ManagedCluster{}
 | 
			
		||||
	result, rerr := mcClient.Get(context.TODO(), "rg", "cluster")
 | 
			
		||||
	assert.Equal(t, expected, result)
 | 
			
		||||
	assert.Equal(t, mcGetErr, rerr)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestGetThrottle(t *testing.T) {
 | 
			
		||||
	ctrl := gomock.NewController(t)
 | 
			
		||||
	defer ctrl.Finish()
 | 
			
		||||
 | 
			
		||||
	resourceID := "/subscriptions/subscriptionID/resourceGroups/rg/providers/Microsoft.ContainerService/managedClusters/cluster"
 | 
			
		||||
	response := &http.Response{
 | 
			
		||||
		StatusCode: http.StatusTooManyRequests,
 | 
			
		||||
		Body:       ioutil.NopCloser(bytes.NewReader([]byte("{}"))),
 | 
			
		||||
	}
 | 
			
		||||
	throttleErr := &retry.Error{
 | 
			
		||||
		HTTPStatusCode: http.StatusTooManyRequests,
 | 
			
		||||
		RawError:       fmt.Errorf("error"),
 | 
			
		||||
		Retriable:      true,
 | 
			
		||||
		RetryAfter:     time.Unix(100, 0),
 | 
			
		||||
	}
 | 
			
		||||
	armClient := mockarmclient.NewMockInterface(ctrl)
 | 
			
		||||
	armClient.EXPECT().GetResource(gomock.Any(), resourceID, "").Return(response, throttleErr).Times(1)
 | 
			
		||||
	armClient.EXPECT().CloseResponse(gomock.Any(), gomock.Any()).Times(1)
 | 
			
		||||
 | 
			
		||||
	mcClient := getTestManagedClusterClient(armClient)
 | 
			
		||||
	result, rerr := mcClient.Get(context.TODO(), "rg", "cluster")
 | 
			
		||||
	assert.Empty(t, result)
 | 
			
		||||
	assert.Equal(t, throttleErr, rerr)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestGetNotFound(t *testing.T) {
 | 
			
		||||
	ctrl := gomock.NewController(t)
 | 
			
		||||
	defer ctrl.Finish()
 | 
			
		||||
 | 
			
		||||
	resourceID := "/subscriptions/subscriptionID/resourceGroups/rg/providers/Microsoft.ContainerService/managedClusters/cluster"
 | 
			
		||||
	response := &http.Response{
 | 
			
		||||
		StatusCode: http.StatusNotFound,
 | 
			
		||||
		Body:       ioutil.NopCloser(bytes.NewReader([]byte("{}"))),
 | 
			
		||||
	}
 | 
			
		||||
	armClient := mockarmclient.NewMockInterface(ctrl)
 | 
			
		||||
	armClient.EXPECT().GetResource(gomock.Any(), resourceID, "").Return(response, nil).Times(1)
 | 
			
		||||
	armClient.EXPECT().CloseResponse(gomock.Any(), gomock.Any()).Times(1)
 | 
			
		||||
 | 
			
		||||
	mcClient := getTestManagedClusterClient(armClient)
 | 
			
		||||
	expected := containerservice.ManagedCluster{Response: autorest.Response{}}
 | 
			
		||||
	result, rerr := mcClient.Get(context.TODO(), "rg", "cluster")
 | 
			
		||||
	assert.Equal(t, expected, result)
 | 
			
		||||
	assert.NotNil(t, rerr)
 | 
			
		||||
	assert.Equal(t, http.StatusNotFound, rerr.HTTPStatusCode)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestGetInternalError(t *testing.T) {
 | 
			
		||||
	ctrl := gomock.NewController(t)
 | 
			
		||||
	defer ctrl.Finish()
 | 
			
		||||
 | 
			
		||||
	resourceID := "/subscriptions/subscriptionID/resourceGroups/rg/providers/Microsoft.ContainerService/managedClusters/cluster"
 | 
			
		||||
	response := &http.Response{
 | 
			
		||||
		StatusCode: http.StatusInternalServerError,
 | 
			
		||||
		Body:       ioutil.NopCloser(bytes.NewReader([]byte("{}"))),
 | 
			
		||||
	}
 | 
			
		||||
	armClient := mockarmclient.NewMockInterface(ctrl)
 | 
			
		||||
	armClient.EXPECT().GetResource(gomock.Any(), resourceID, "").Return(response, nil).Times(1)
 | 
			
		||||
	armClient.EXPECT().CloseResponse(gomock.Any(), gomock.Any()).Times(1)
 | 
			
		||||
 | 
			
		||||
	mcClient := getTestManagedClusterClient(armClient)
 | 
			
		||||
	expected := containerservice.ManagedCluster{Response: autorest.Response{}}
 | 
			
		||||
	result, rerr := mcClient.Get(context.TODO(), "rg", "cluster")
 | 
			
		||||
	assert.Equal(t, expected, result)
 | 
			
		||||
	assert.NotNil(t, rerr)
 | 
			
		||||
	assert.Equal(t, http.StatusInternalServerError, rerr.HTTPStatusCode)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestList(t *testing.T) {
 | 
			
		||||
	ctrl := gomock.NewController(t)
 | 
			
		||||
	defer ctrl.Finish()
 | 
			
		||||
 | 
			
		||||
	resourceID := "/subscriptions/subscriptionID/resourceGroups/rg/providers/Microsoft.ContainerService/managedClusters"
 | 
			
		||||
	armClient := mockarmclient.NewMockInterface(ctrl)
 | 
			
		||||
	mcList := []containerservice.ManagedCluster{getTestManagedCluster("cluster"), getTestManagedCluster("cluster1"), getTestManagedCluster("cluster2")}
 | 
			
		||||
	responseBody, err := json.Marshal(containerservice.ManagedClusterListResult{Value: &mcList})
 | 
			
		||||
	assert.NoError(t, err)
 | 
			
		||||
	armClient.EXPECT().GetResource(gomock.Any(), resourceID, "").Return(
 | 
			
		||||
		&http.Response{
 | 
			
		||||
			StatusCode: http.StatusOK,
 | 
			
		||||
			Body:       ioutil.NopCloser(bytes.NewReader(responseBody)),
 | 
			
		||||
		}, nil).Times(1)
 | 
			
		||||
	armClient.EXPECT().CloseResponse(gomock.Any(), gomock.Any()).Times(1)
 | 
			
		||||
 | 
			
		||||
	mcClient := getTestManagedClusterClient(armClient)
 | 
			
		||||
	result, rerr := mcClient.List(context.TODO(), "rg")
 | 
			
		||||
	assert.Nil(t, rerr)
 | 
			
		||||
	assert.Equal(t, 3, len(result))
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestListNextResultsMultiPages(t *testing.T) {
 | 
			
		||||
	ctrl := gomock.NewController(t)
 | 
			
		||||
	defer ctrl.Finish()
 | 
			
		||||
 | 
			
		||||
	tests := []struct {
 | 
			
		||||
		prepareErr error
 | 
			
		||||
		sendErr    *retry.Error
 | 
			
		||||
		statusCode int
 | 
			
		||||
	}{
 | 
			
		||||
		{
 | 
			
		||||
			prepareErr: nil,
 | 
			
		||||
			sendErr:    nil,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			prepareErr: fmt.Errorf("error"),
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			sendErr: &retry.Error{RawError: fmt.Errorf("error")},
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	lastResult := containerservice.ManagedClusterListResult{
 | 
			
		||||
		NextLink: pointer.String("next"),
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	for _, test := range tests {
 | 
			
		||||
		armClient := mockarmclient.NewMockInterface(ctrl)
 | 
			
		||||
		req := &http.Request{
 | 
			
		||||
			Method: "GET",
 | 
			
		||||
		}
 | 
			
		||||
		armClient.EXPECT().PrepareGetRequest(gomock.Any(), gomock.Any()).Return(req, test.prepareErr)
 | 
			
		||||
		if test.prepareErr == nil {
 | 
			
		||||
			armClient.EXPECT().Send(gomock.Any(), req).Return(&http.Response{
 | 
			
		||||
				StatusCode: http.StatusOK,
 | 
			
		||||
				Body:       ioutil.NopCloser(bytes.NewReader([]byte(`{"foo":"bar"}`))),
 | 
			
		||||
			}, test.sendErr)
 | 
			
		||||
			armClient.EXPECT().CloseResponse(gomock.Any(), gomock.Any())
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		mcClient := getTestManagedClusterClient(armClient)
 | 
			
		||||
		result, err := mcClient.listNextResults(context.TODO(), lastResult)
 | 
			
		||||
		if test.prepareErr != nil || test.sendErr != nil {
 | 
			
		||||
			assert.Error(t, err)
 | 
			
		||||
		} else {
 | 
			
		||||
			assert.NoError(t, err)
 | 
			
		||||
		}
 | 
			
		||||
		if test.prepareErr != nil {
 | 
			
		||||
			assert.Empty(t, result)
 | 
			
		||||
		} else {
 | 
			
		||||
			assert.NotEmpty(t, result)
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestListNextResultsMultiPagesWithListResponderError(t *testing.T) {
 | 
			
		||||
	ctrl := gomock.NewController(t)
 | 
			
		||||
	defer ctrl.Finish()
 | 
			
		||||
 | 
			
		||||
	test := struct {
 | 
			
		||||
		prepareErr error
 | 
			
		||||
		sendErr    *retry.Error
 | 
			
		||||
	}{
 | 
			
		||||
		prepareErr: nil,
 | 
			
		||||
		sendErr:    nil,
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	lastResult := containerservice.ManagedClusterListResult{
 | 
			
		||||
		NextLink: pointer.String("next"),
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	armClient := mockarmclient.NewMockInterface(ctrl)
 | 
			
		||||
	req := &http.Request{
 | 
			
		||||
		Method: "GET",
 | 
			
		||||
	}
 | 
			
		||||
	armClient.EXPECT().PrepareGetRequest(gomock.Any(), gomock.Any()).Return(req, test.prepareErr)
 | 
			
		||||
	if test.prepareErr == nil {
 | 
			
		||||
		armClient.EXPECT().Send(gomock.Any(), req).Return(&http.Response{
 | 
			
		||||
			StatusCode: http.StatusNotFound,
 | 
			
		||||
			Body:       ioutil.NopCloser(bytes.NewReader([]byte(`{"foo":"bar"}`))),
 | 
			
		||||
		}, test.sendErr)
 | 
			
		||||
		armClient.EXPECT().CloseResponse(gomock.Any(), gomock.Any())
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	response := &http.Response{
 | 
			
		||||
		StatusCode: http.StatusNotFound,
 | 
			
		||||
		Body:       ioutil.NopCloser(bytes.NewBuffer([]byte(`{"foo":"bar"}`))),
 | 
			
		||||
	}
 | 
			
		||||
	expected := containerservice.ManagedClusterListResult{}
 | 
			
		||||
	expected.Response = autorest.Response{Response: response}
 | 
			
		||||
	mcClient := getTestManagedClusterClient(armClient)
 | 
			
		||||
	result, err := mcClient.listNextResults(context.TODO(), lastResult)
 | 
			
		||||
	assert.Error(t, err)
 | 
			
		||||
	assert.Equal(t, expected, result)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestListWithListResponderError(t *testing.T) {
 | 
			
		||||
	ctrl := gomock.NewController(t)
 | 
			
		||||
	defer ctrl.Finish()
 | 
			
		||||
 | 
			
		||||
	resourceID := "/subscriptions/subscriptionID/resourceGroups/rg/providers/Microsoft.ContainerService/managedClusters"
 | 
			
		||||
	armClient := mockarmclient.NewMockInterface(ctrl)
 | 
			
		||||
	mcList := []containerservice.ManagedCluster{getTestManagedCluster("cluster"), getTestManagedCluster("cluster1"), getTestManagedCluster("cluster2")}
 | 
			
		||||
	responseBody, err := json.Marshal(containerservice.ManagedClusterListResult{Value: &mcList})
 | 
			
		||||
	assert.NoError(t, err)
 | 
			
		||||
	armClient.EXPECT().GetResource(gomock.Any(), resourceID, "").Return(
 | 
			
		||||
		&http.Response{
 | 
			
		||||
			StatusCode: http.StatusNotFound,
 | 
			
		||||
			Body:       ioutil.NopCloser(bytes.NewReader(responseBody)),
 | 
			
		||||
		}, nil).Times(1)
 | 
			
		||||
	armClient.EXPECT().CloseResponse(gomock.Any(), gomock.Any()).Times(1)
 | 
			
		||||
	mcClient := getTestManagedClusterClient(armClient)
 | 
			
		||||
	result, rerr := mcClient.List(context.TODO(), "rg")
 | 
			
		||||
	assert.NotNil(t, rerr)
 | 
			
		||||
	assert.Equal(t, 0, len(result))
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestListWithNextPage(t *testing.T) {
 | 
			
		||||
	ctrl := gomock.NewController(t)
 | 
			
		||||
	defer ctrl.Finish()
 | 
			
		||||
 | 
			
		||||
	resourceID := "/subscriptions/subscriptionID/resourceGroups/rg/providers/Microsoft.ContainerService/managedClusters"
 | 
			
		||||
	armClient := mockarmclient.NewMockInterface(ctrl)
 | 
			
		||||
	mcList := []containerservice.ManagedCluster{getTestManagedCluster("cluster"), getTestManagedCluster("cluster1"), getTestManagedCluster("cluster2")}
 | 
			
		||||
	// ManagedClusterListResult.MarshalJson() doesn't include "nextLink" in its result, hence responseBody is composed manually below.
 | 
			
		||||
	responseBody, err := json.Marshal(map[string]interface{}{"value": mcList, "nextLink": "nextLink"})
 | 
			
		||||
	assert.NoError(t, err)
 | 
			
		||||
 | 
			
		||||
	pagedResponse, err := json.Marshal(containerservice.ManagedClusterListResult{Value: &mcList})
 | 
			
		||||
	assert.NoError(t, err)
 | 
			
		||||
	armClient.EXPECT().PrepareGetRequest(gomock.Any(), gomock.Any()).Return(&http.Request{}, nil)
 | 
			
		||||
	armClient.EXPECT().Send(gomock.Any(), gomock.Any()).Return(
 | 
			
		||||
		&http.Response{
 | 
			
		||||
			StatusCode: http.StatusOK,
 | 
			
		||||
			Body:       ioutil.NopCloser(bytes.NewReader(pagedResponse)),
 | 
			
		||||
		}, nil)
 | 
			
		||||
	armClient.EXPECT().GetResource(gomock.Any(), resourceID, "").Return(
 | 
			
		||||
		&http.Response{
 | 
			
		||||
			StatusCode: http.StatusOK,
 | 
			
		||||
			Body:       ioutil.NopCloser(bytes.NewReader(responseBody)),
 | 
			
		||||
		}, nil).Times(1)
 | 
			
		||||
	armClient.EXPECT().CloseResponse(gomock.Any(), gomock.Any()).Times(2)
 | 
			
		||||
	mcClient := getTestManagedClusterClient(armClient)
 | 
			
		||||
	result, rerr := mcClient.List(context.TODO(), "rg")
 | 
			
		||||
	assert.Nil(t, rerr)
 | 
			
		||||
	assert.Equal(t, 6, len(result))
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestListNeverRateLimiter(t *testing.T) {
 | 
			
		||||
	ctrl := gomock.NewController(t)
 | 
			
		||||
	defer ctrl.Finish()
 | 
			
		||||
 | 
			
		||||
	mcListErr := &retry.Error{
 | 
			
		||||
		RawError:  fmt.Errorf("azure cloud provider rate limited(%s) for operation %q", "read", "ListManagedCluster"),
 | 
			
		||||
		Retriable: true,
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	armClient := mockarmclient.NewMockInterface(ctrl)
 | 
			
		||||
	mcClient := getTestManagedClusterClientWithNeverRateLimiter(armClient)
 | 
			
		||||
	result, rerr := mcClient.List(context.TODO(), "rg")
 | 
			
		||||
	assert.Equal(t, 0, len(result))
 | 
			
		||||
	assert.NotNil(t, rerr)
 | 
			
		||||
	assert.Equal(t, mcListErr, rerr)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestListRetryAfterReader(t *testing.T) {
 | 
			
		||||
	ctrl := gomock.NewController(t)
 | 
			
		||||
	defer ctrl.Finish()
 | 
			
		||||
 | 
			
		||||
	mcListErr := &retry.Error{
 | 
			
		||||
		RawError:   fmt.Errorf("azure cloud provider throttled for operation %s with reason %q", "ListManagedCluster", "client throttled"),
 | 
			
		||||
		Retriable:  true,
 | 
			
		||||
		RetryAfter: getFutureTime(),
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	armClient := mockarmclient.NewMockInterface(ctrl)
 | 
			
		||||
	mcClient := getTestManagedClusterClientWithRetryAfterReader(armClient)
 | 
			
		||||
	result, rerr := mcClient.List(context.TODO(), "rg")
 | 
			
		||||
	assert.Equal(t, 0, len(result))
 | 
			
		||||
	assert.NotNil(t, rerr)
 | 
			
		||||
	assert.Equal(t, mcListErr, rerr)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestListThrottle(t *testing.T) {
 | 
			
		||||
	ctrl := gomock.NewController(t)
 | 
			
		||||
	defer ctrl.Finish()
 | 
			
		||||
 | 
			
		||||
	resourceID := "/subscriptions/subscriptionID/resourceGroups/rg/providers/Microsoft.ContainerService/managedClusters"
 | 
			
		||||
	response := &http.Response{
 | 
			
		||||
		StatusCode: http.StatusTooManyRequests,
 | 
			
		||||
		Body:       ioutil.NopCloser(bytes.NewReader([]byte("{}"))),
 | 
			
		||||
	}
 | 
			
		||||
	throttleErr := &retry.Error{
 | 
			
		||||
		HTTPStatusCode: http.StatusTooManyRequests,
 | 
			
		||||
		RawError:       fmt.Errorf("error"),
 | 
			
		||||
		Retriable:      true,
 | 
			
		||||
		RetryAfter:     time.Unix(100, 0),
 | 
			
		||||
	}
 | 
			
		||||
	armClient := mockarmclient.NewMockInterface(ctrl)
 | 
			
		||||
	armClient.EXPECT().GetResource(gomock.Any(), resourceID, "").Return(response, throttleErr).Times(1)
 | 
			
		||||
	armClient.EXPECT().CloseResponse(gomock.Any(), gomock.Any()).Times(1)
 | 
			
		||||
 | 
			
		||||
	mcClient := getTestManagedClusterClient(armClient)
 | 
			
		||||
	result, rerr := mcClient.List(context.TODO(), "rg")
 | 
			
		||||
	assert.Empty(t, result)
 | 
			
		||||
	assert.NotNil(t, rerr)
 | 
			
		||||
	assert.Equal(t, throttleErr, rerr)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestCreateOrUpdate(t *testing.T) {
 | 
			
		||||
	ctrl := gomock.NewController(t)
 | 
			
		||||
	defer ctrl.Finish()
 | 
			
		||||
 | 
			
		||||
	mc := getTestManagedCluster("cluster")
 | 
			
		||||
	armClient := mockarmclient.NewMockInterface(ctrl)
 | 
			
		||||
	response := &http.Response{
 | 
			
		||||
		StatusCode: http.StatusOK,
 | 
			
		||||
		Body:       ioutil.NopCloser(bytes.NewReader([]byte(""))),
 | 
			
		||||
	}
 | 
			
		||||
	armClient.EXPECT().PutResourceWithDecorators(gomock.Any(), pointer.StringDeref(mc.ID, ""), mc, gomock.Any()).Return(response, nil).Times(1)
 | 
			
		||||
	armClient.EXPECT().CloseResponse(gomock.Any(), gomock.Any()).Times(1)
 | 
			
		||||
 | 
			
		||||
	mcClient := getTestManagedClusterClient(armClient)
 | 
			
		||||
	rerr := mcClient.CreateOrUpdate(context.TODO(), "rg", "cluster", mc, "*")
 | 
			
		||||
	assert.Nil(t, rerr)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestCreateOrUpdateWithCreateOrUpdateResponderError(t *testing.T) {
 | 
			
		||||
	ctrl := gomock.NewController(t)
 | 
			
		||||
	defer ctrl.Finish()
 | 
			
		||||
	mc := getTestManagedCluster("cluster")
 | 
			
		||||
	armClient := mockarmclient.NewMockInterface(ctrl)
 | 
			
		||||
	response := &http.Response{
 | 
			
		||||
		StatusCode: http.StatusNotFound,
 | 
			
		||||
		Body:       ioutil.NopCloser(bytes.NewReader([]byte(""))),
 | 
			
		||||
	}
 | 
			
		||||
	armClient.EXPECT().PutResourceWithDecorators(gomock.Any(), pointer.StringDeref(mc.ID, ""), mc, gomock.Any()).Return(response, nil).Times(1)
 | 
			
		||||
	armClient.EXPECT().CloseResponse(gomock.Any(), gomock.Any()).Times(1)
 | 
			
		||||
 | 
			
		||||
	mcClient := getTestManagedClusterClient(armClient)
 | 
			
		||||
	rerr := mcClient.CreateOrUpdate(context.TODO(), "rg", "cluster", mc, "")
 | 
			
		||||
	assert.NotNil(t, rerr)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestCreateOrUpdateNeverRateLimiter(t *testing.T) {
 | 
			
		||||
	ctrl := gomock.NewController(t)
 | 
			
		||||
	defer ctrl.Finish()
 | 
			
		||||
 | 
			
		||||
	mcCreateOrUpdateErr := &retry.Error{
 | 
			
		||||
		RawError:  fmt.Errorf("azure cloud provider rate limited(%s) for operation %q", "write", "CreateOrUpdateManagedCluster"),
 | 
			
		||||
		Retriable: true,
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	armClient := mockarmclient.NewMockInterface(ctrl)
 | 
			
		||||
 | 
			
		||||
	mcClient := getTestManagedClusterClientWithNeverRateLimiter(armClient)
 | 
			
		||||
	mc := getTestManagedCluster("cluster")
 | 
			
		||||
	rerr := mcClient.CreateOrUpdate(context.TODO(), "rg", "cluster", mc, "")
 | 
			
		||||
	assert.NotNil(t, rerr)
 | 
			
		||||
	assert.Equal(t, mcCreateOrUpdateErr, rerr)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestCreateOrUpdateRetryAfterReader(t *testing.T) {
 | 
			
		||||
	ctrl := gomock.NewController(t)
 | 
			
		||||
	defer ctrl.Finish()
 | 
			
		||||
 | 
			
		||||
	mcCreateOrUpdateErr := &retry.Error{
 | 
			
		||||
		RawError:   fmt.Errorf("azure cloud provider throttled for operation %s with reason %q", "CreateOrUpdateManagedCluster", "client throttled"),
 | 
			
		||||
		Retriable:  true,
 | 
			
		||||
		RetryAfter: getFutureTime(),
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	mc := getTestManagedCluster("cluster")
 | 
			
		||||
	armClient := mockarmclient.NewMockInterface(ctrl)
 | 
			
		||||
 | 
			
		||||
	mcClient := getTestManagedClusterClientWithRetryAfterReader(armClient)
 | 
			
		||||
	rerr := mcClient.CreateOrUpdate(context.TODO(), "rg", "cluster", mc, "")
 | 
			
		||||
	assert.NotNil(t, rerr)
 | 
			
		||||
	assert.Equal(t, mcCreateOrUpdateErr, rerr)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestCreateOrUpdateThrottle(t *testing.T) {
 | 
			
		||||
	ctrl := gomock.NewController(t)
 | 
			
		||||
	defer ctrl.Finish()
 | 
			
		||||
 | 
			
		||||
	response := &http.Response{
 | 
			
		||||
		StatusCode: http.StatusTooManyRequests,
 | 
			
		||||
		Body:       ioutil.NopCloser(bytes.NewReader([]byte("{}"))),
 | 
			
		||||
	}
 | 
			
		||||
	throttleErr := &retry.Error{
 | 
			
		||||
		HTTPStatusCode: http.StatusTooManyRequests,
 | 
			
		||||
		RawError:       fmt.Errorf("error"),
 | 
			
		||||
		Retriable:      true,
 | 
			
		||||
		RetryAfter:     time.Unix(100, 0),
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	mc := getTestManagedCluster("cluster")
 | 
			
		||||
	armClient := mockarmclient.NewMockInterface(ctrl)
 | 
			
		||||
	armClient.EXPECT().PutResourceWithDecorators(gomock.Any(), pointer.StringDeref(mc.ID, ""), mc, gomock.Any()).Return(response, throttleErr).Times(1)
 | 
			
		||||
	armClient.EXPECT().CloseResponse(gomock.Any(), gomock.Any()).Times(1)
 | 
			
		||||
 | 
			
		||||
	mcClient := getTestManagedClusterClient(armClient)
 | 
			
		||||
	rerr := mcClient.CreateOrUpdate(context.TODO(), "rg", "cluster", mc, "")
 | 
			
		||||
	assert.NotNil(t, rerr)
 | 
			
		||||
	assert.Equal(t, throttleErr, rerr)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestDelete(t *testing.T) {
 | 
			
		||||
	ctrl := gomock.NewController(t)
 | 
			
		||||
	defer ctrl.Finish()
 | 
			
		||||
 | 
			
		||||
	mc := getTestManagedCluster("cluster")
 | 
			
		||||
	armClient := mockarmclient.NewMockInterface(ctrl)
 | 
			
		||||
	armClient.EXPECT().DeleteResource(gomock.Any(), pointer.StringDeref(mc.ID, ""), "").Return(nil).Times(1)
 | 
			
		||||
 | 
			
		||||
	mcClient := getTestManagedClusterClient(armClient)
 | 
			
		||||
	rerr := mcClient.Delete(context.TODO(), "rg", "cluster")
 | 
			
		||||
	assert.Nil(t, rerr)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestDeleteNeverRateLimiter(t *testing.T) {
 | 
			
		||||
	ctrl := gomock.NewController(t)
 | 
			
		||||
	defer ctrl.Finish()
 | 
			
		||||
 | 
			
		||||
	mcDeleteErr := &retry.Error{
 | 
			
		||||
		RawError:  fmt.Errorf("azure cloud provider rate limited(%s) for operation %q", "write", "DeleteManagedCluster"),
 | 
			
		||||
		Retriable: true,
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	armClient := mockarmclient.NewMockInterface(ctrl)
 | 
			
		||||
 | 
			
		||||
	mcClient := getTestManagedClusterClientWithNeverRateLimiter(armClient)
 | 
			
		||||
	rerr := mcClient.Delete(context.TODO(), "rg", "cluster")
 | 
			
		||||
	assert.NotNil(t, rerr)
 | 
			
		||||
	assert.Equal(t, mcDeleteErr, rerr)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestDeleteRetryAfterReader(t *testing.T) {
 | 
			
		||||
	ctrl := gomock.NewController(t)
 | 
			
		||||
	defer ctrl.Finish()
 | 
			
		||||
 | 
			
		||||
	mcDeleteErr := &retry.Error{
 | 
			
		||||
		RawError:   fmt.Errorf("azure cloud provider throttled for operation %s with reason %q", "DeleteManagedCluster", "client throttled"),
 | 
			
		||||
		Retriable:  true,
 | 
			
		||||
		RetryAfter: getFutureTime(),
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	armClient := mockarmclient.NewMockInterface(ctrl)
 | 
			
		||||
 | 
			
		||||
	mcClient := getTestManagedClusterClientWithRetryAfterReader(armClient)
 | 
			
		||||
	rerr := mcClient.Delete(context.TODO(), "rg", "cluster")
 | 
			
		||||
	assert.NotNil(t, rerr)
 | 
			
		||||
	assert.Equal(t, mcDeleteErr, rerr)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestDeleteThrottle(t *testing.T) {
 | 
			
		||||
	ctrl := gomock.NewController(t)
 | 
			
		||||
	defer ctrl.Finish()
 | 
			
		||||
 | 
			
		||||
	throttleErr := &retry.Error{
 | 
			
		||||
		HTTPStatusCode: http.StatusTooManyRequests,
 | 
			
		||||
		RawError:       fmt.Errorf("error"),
 | 
			
		||||
		Retriable:      true,
 | 
			
		||||
		RetryAfter:     time.Unix(100, 0),
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	mc := getTestManagedCluster("cluster")
 | 
			
		||||
	armClient := mockarmclient.NewMockInterface(ctrl)
 | 
			
		||||
	armClient.EXPECT().DeleteResource(gomock.Any(), pointer.StringDeref(mc.ID, ""), "").Return(throttleErr).Times(1)
 | 
			
		||||
 | 
			
		||||
	mcClient := getTestManagedClusterClient(armClient)
 | 
			
		||||
	rerr := mcClient.Delete(context.TODO(), "rg", "cluster")
 | 
			
		||||
	assert.NotNil(t, rerr)
 | 
			
		||||
	assert.Equal(t, throttleErr, rerr)
 | 
			
		||||
}
 | 
			
		||||
@@ -1,18 +0,0 @@
 | 
			
		||||
/*
 | 
			
		||||
Copyright 2020 The Kubernetes Authors.
 | 
			
		||||
 | 
			
		||||
Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
you may not use this file except in compliance with the License.
 | 
			
		||||
You may obtain a copy of the License at
 | 
			
		||||
 | 
			
		||||
    http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
 | 
			
		||||
Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
See the License for the specific language governing permissions and
 | 
			
		||||
limitations under the License.
 | 
			
		||||
*/
 | 
			
		||||
 | 
			
		||||
// Package containerserviceclient implements the client for azure container service.
 | 
			
		||||
package containerserviceclient // import "k8s.io/legacy-cloud-providers/azure/clients/containerserviceclient"
 | 
			
		||||
@@ -1,42 +0,0 @@
 | 
			
		||||
//go:build !providerless
 | 
			
		||||
// +build !providerless
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
Copyright 2020 The Kubernetes Authors.
 | 
			
		||||
 | 
			
		||||
Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
you may not use this file except in compliance with the License.
 | 
			
		||||
You may obtain a copy of the License at
 | 
			
		||||
 | 
			
		||||
    http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
 | 
			
		||||
Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
See the License for the specific language governing permissions and
 | 
			
		||||
limitations under the License.
 | 
			
		||||
*/
 | 
			
		||||
 | 
			
		||||
//go:generate mockgen -copyright_file=$BUILD_TAG_FILE -source=interface.go  -destination=mockcontainerserviceclient/interface.go -package=mockcontainerserviceclient Interface
 | 
			
		||||
package containerserviceclient
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"context"
 | 
			
		||||
 | 
			
		||||
	"k8s.io/legacy-cloud-providers/azure/retry"
 | 
			
		||||
 | 
			
		||||
	"github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2020-04-01/containerservice"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
const (
 | 
			
		||||
	// APIVersion is the API version for containerservice.
 | 
			
		||||
	APIVersion = "2020-04-01"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// Interface is the client interface for ContainerService.
 | 
			
		||||
type Interface interface {
 | 
			
		||||
	CreateOrUpdate(ctx context.Context, resourceGroupName string, managedClusterName string, parameters containerservice.ManagedCluster, etag string) *retry.Error
 | 
			
		||||
	Delete(ctx context.Context, resourceGroupName string, managedClusterName string) *retry.Error
 | 
			
		||||
	Get(ctx context.Context, resourceGroupName string, managedClusterName string) (containerservice.ManagedCluster, *retry.Error)
 | 
			
		||||
	List(ctx context.Context, resourceGroupName string) ([]containerservice.ManagedCluster, *retry.Error)
 | 
			
		||||
}
 | 
			
		||||
@@ -1,18 +0,0 @@
 | 
			
		||||
/*
 | 
			
		||||
Copyright 2020 The Kubernetes Authors.
 | 
			
		||||
 | 
			
		||||
Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
you may not use this file except in compliance with the License.
 | 
			
		||||
You may obtain a copy of the License at
 | 
			
		||||
 | 
			
		||||
    http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
 | 
			
		||||
Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
See the License for the specific language governing permissions and
 | 
			
		||||
limitations under the License.
 | 
			
		||||
*/
 | 
			
		||||
 | 
			
		||||
// Package mockcontainerserviceclient implements the mock client for azure container service.
 | 
			
		||||
package mockcontainerserviceclient // import "k8s.io/legacy-cloud-providers/azure/clients/containerserviceclient/mockcontainerserviceclient"
 | 
			
		||||
@@ -1,114 +0,0 @@
 | 
			
		||||
//go:build !providerless
 | 
			
		||||
// +build !providerless
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
Copyright The Kubernetes Authors.
 | 
			
		||||
 | 
			
		||||
Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
you may not use this file except in compliance with the License.
 | 
			
		||||
You may obtain a copy of the License at
 | 
			
		||||
 | 
			
		||||
    http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
 | 
			
		||||
Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
See the License for the specific language governing permissions and
 | 
			
		||||
limitations under the License.
 | 
			
		||||
*/
 | 
			
		||||
 | 
			
		||||
// Code generated by MockGen. DO NOT EDIT.
 | 
			
		||||
// Source: interface.go
 | 
			
		||||
 | 
			
		||||
// Package mockcontainerserviceclient is a generated GoMock package.
 | 
			
		||||
package mockcontainerserviceclient
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	context "context"
 | 
			
		||||
	reflect "reflect"
 | 
			
		||||
 | 
			
		||||
	containerservice "github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2020-04-01/containerservice"
 | 
			
		||||
	gomock "github.com/golang/mock/gomock"
 | 
			
		||||
	retry "k8s.io/legacy-cloud-providers/azure/retry"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// MockInterface is a mock of Interface interface.
 | 
			
		||||
type MockInterface struct {
 | 
			
		||||
	ctrl     *gomock.Controller
 | 
			
		||||
	recorder *MockInterfaceMockRecorder
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// MockInterfaceMockRecorder is the mock recorder for MockInterface.
 | 
			
		||||
type MockInterfaceMockRecorder struct {
 | 
			
		||||
	mock *MockInterface
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// NewMockInterface creates a new mock instance.
 | 
			
		||||
func NewMockInterface(ctrl *gomock.Controller) *MockInterface {
 | 
			
		||||
	mock := &MockInterface{ctrl: ctrl}
 | 
			
		||||
	mock.recorder = &MockInterfaceMockRecorder{mock}
 | 
			
		||||
	return mock
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// EXPECT returns an object that allows the caller to indicate expected use.
 | 
			
		||||
func (m *MockInterface) EXPECT() *MockInterfaceMockRecorder {
 | 
			
		||||
	return m.recorder
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// CreateOrUpdate mocks base method.
 | 
			
		||||
func (m *MockInterface) CreateOrUpdate(ctx context.Context, resourceGroupName, managedClusterName string, parameters containerservice.ManagedCluster, etag string) *retry.Error {
 | 
			
		||||
	m.ctrl.T.Helper()
 | 
			
		||||
	ret := m.ctrl.Call(m, "CreateOrUpdate", ctx, resourceGroupName, managedClusterName, parameters, etag)
 | 
			
		||||
	ret0, _ := ret[0].(*retry.Error)
 | 
			
		||||
	return ret0
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// CreateOrUpdate indicates an expected call of CreateOrUpdate.
 | 
			
		||||
func (mr *MockInterfaceMockRecorder) CreateOrUpdate(ctx, resourceGroupName, managedClusterName, parameters, etag interface{}) *gomock.Call {
 | 
			
		||||
	mr.mock.ctrl.T.Helper()
 | 
			
		||||
	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateOrUpdate", reflect.TypeOf((*MockInterface)(nil).CreateOrUpdate), ctx, resourceGroupName, managedClusterName, parameters, etag)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Delete mocks base method.
 | 
			
		||||
func (m *MockInterface) Delete(ctx context.Context, resourceGroupName, managedClusterName string) *retry.Error {
 | 
			
		||||
	m.ctrl.T.Helper()
 | 
			
		||||
	ret := m.ctrl.Call(m, "Delete", ctx, resourceGroupName, managedClusterName)
 | 
			
		||||
	ret0, _ := ret[0].(*retry.Error)
 | 
			
		||||
	return ret0
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Delete indicates an expected call of Delete.
 | 
			
		||||
func (mr *MockInterfaceMockRecorder) Delete(ctx, resourceGroupName, managedClusterName interface{}) *gomock.Call {
 | 
			
		||||
	mr.mock.ctrl.T.Helper()
 | 
			
		||||
	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockInterface)(nil).Delete), ctx, resourceGroupName, managedClusterName)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Get mocks base method.
 | 
			
		||||
func (m *MockInterface) Get(ctx context.Context, resourceGroupName, managedClusterName string) (containerservice.ManagedCluster, *retry.Error) {
 | 
			
		||||
	m.ctrl.T.Helper()
 | 
			
		||||
	ret := m.ctrl.Call(m, "Get", ctx, resourceGroupName, managedClusterName)
 | 
			
		||||
	ret0, _ := ret[0].(containerservice.ManagedCluster)
 | 
			
		||||
	ret1, _ := ret[1].(*retry.Error)
 | 
			
		||||
	return ret0, ret1
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Get indicates an expected call of Get.
 | 
			
		||||
func (mr *MockInterfaceMockRecorder) Get(ctx, resourceGroupName, managedClusterName interface{}) *gomock.Call {
 | 
			
		||||
	mr.mock.ctrl.T.Helper()
 | 
			
		||||
	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockInterface)(nil).Get), ctx, resourceGroupName, managedClusterName)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// List mocks base method.
 | 
			
		||||
func (m *MockInterface) List(ctx context.Context, resourceGroupName string) ([]containerservice.ManagedCluster, *retry.Error) {
 | 
			
		||||
	m.ctrl.T.Helper()
 | 
			
		||||
	ret := m.ctrl.Call(m, "List", ctx, resourceGroupName)
 | 
			
		||||
	ret0, _ := ret[0].([]containerservice.ManagedCluster)
 | 
			
		||||
	ret1, _ := ret[1].(*retry.Error)
 | 
			
		||||
	return ret0, ret1
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// List indicates an expected call of List.
 | 
			
		||||
func (mr *MockInterfaceMockRecorder) List(ctx, resourceGroupName interface{}) *gomock.Call {
 | 
			
		||||
	mr.mock.ctrl.T.Helper()
 | 
			
		||||
	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "List", reflect.TypeOf((*MockInterface)(nil).List), ctx, resourceGroupName)
 | 
			
		||||
}
 | 
			
		||||
@@ -1,459 +0,0 @@
 | 
			
		||||
//go:build !providerless
 | 
			
		||||
// +build !providerless
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
Copyright 2020 The Kubernetes Authors.
 | 
			
		||||
 | 
			
		||||
Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
you may not use this file except in compliance with the License.
 | 
			
		||||
You may obtain a copy of the License at
 | 
			
		||||
 | 
			
		||||
    http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
 | 
			
		||||
Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
See the License for the specific language governing permissions and
 | 
			
		||||
limitations under the License.
 | 
			
		||||
*/
 | 
			
		||||
 | 
			
		||||
package deploymentclient
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"context"
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"net/http"
 | 
			
		||||
	"time"
 | 
			
		||||
 | 
			
		||||
	"github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2017-05-10/resources"
 | 
			
		||||
	"github.com/Azure/go-autorest/autorest"
 | 
			
		||||
	"github.com/Azure/go-autorest/autorest/azure"
 | 
			
		||||
 | 
			
		||||
	"k8s.io/client-go/util/flowcontrol"
 | 
			
		||||
	"k8s.io/klog/v2"
 | 
			
		||||
	azclients "k8s.io/legacy-cloud-providers/azure/clients"
 | 
			
		||||
	"k8s.io/legacy-cloud-providers/azure/clients/armclient"
 | 
			
		||||
	"k8s.io/legacy-cloud-providers/azure/metrics"
 | 
			
		||||
	"k8s.io/legacy-cloud-providers/azure/retry"
 | 
			
		||||
	"k8s.io/utils/pointer"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
var _ Interface = &Client{}
 | 
			
		||||
 | 
			
		||||
// Client implements ContainerService client Interface.
 | 
			
		||||
type Client struct {
 | 
			
		||||
	armClient      armclient.Interface
 | 
			
		||||
	subscriptionID string
 | 
			
		||||
 | 
			
		||||
	// Rate limiting configures.
 | 
			
		||||
	rateLimiterReader flowcontrol.RateLimiter
 | 
			
		||||
	rateLimiterWriter flowcontrol.RateLimiter
 | 
			
		||||
 | 
			
		||||
	// ARM throttling configures.
 | 
			
		||||
	RetryAfterReader time.Time
 | 
			
		||||
	RetryAfterWriter time.Time
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// New creates a new ContainerServiceClient client with ratelimiting.
 | 
			
		||||
func New(config *azclients.ClientConfig) *Client {
 | 
			
		||||
	baseURI := config.ResourceManagerEndpoint
 | 
			
		||||
	authorizer := config.Authorizer
 | 
			
		||||
	armClient := armclient.New(authorizer, baseURI, config.UserAgent, APIVersion, config.Location, config.Backoff)
 | 
			
		||||
	rateLimiterReader, rateLimiterWriter := azclients.NewRateLimiter(config.RateLimitConfig)
 | 
			
		||||
 | 
			
		||||
	klog.V(2).Infof("Azure DeploymentClient (read ops) using rate limit config: QPS=%g, bucket=%d",
 | 
			
		||||
		config.RateLimitConfig.CloudProviderRateLimitQPS,
 | 
			
		||||
		config.RateLimitConfig.CloudProviderRateLimitBucket)
 | 
			
		||||
	klog.V(2).Infof("Azure DeploymentClient (write ops) using rate limit config: QPS=%g, bucket=%d",
 | 
			
		||||
		config.RateLimitConfig.CloudProviderRateLimitQPSWrite,
 | 
			
		||||
		config.RateLimitConfig.CloudProviderRateLimitBucketWrite)
 | 
			
		||||
 | 
			
		||||
	client := &Client{
 | 
			
		||||
		armClient:         armClient,
 | 
			
		||||
		rateLimiterReader: rateLimiterReader,
 | 
			
		||||
		rateLimiterWriter: rateLimiterWriter,
 | 
			
		||||
		subscriptionID:    config.SubscriptionID,
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return client
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Get gets a deployment
 | 
			
		||||
func (c *Client) Get(ctx context.Context, resourceGroupName string, deploymentName string) (resources.DeploymentExtended, *retry.Error) {
 | 
			
		||||
	mc := metrics.NewMetricContext("deployments", "get", resourceGroupName, c.subscriptionID, "")
 | 
			
		||||
 | 
			
		||||
	// Report errors if the client is rate limited.
 | 
			
		||||
	if !c.rateLimiterReader.TryAccept() {
 | 
			
		||||
		mc.RateLimitedCount()
 | 
			
		||||
		return resources.DeploymentExtended{}, retry.GetRateLimitError(false, "GetDeployment")
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Report errors if the client is throttled.
 | 
			
		||||
	if c.RetryAfterReader.After(time.Now()) {
 | 
			
		||||
		mc.ThrottledCount()
 | 
			
		||||
		rerr := retry.GetThrottlingError("GetDeployment", "client throttled", c.RetryAfterReader)
 | 
			
		||||
		return resources.DeploymentExtended{}, rerr
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	result, rerr := c.getDeployment(ctx, resourceGroupName, deploymentName)
 | 
			
		||||
	mc.Observe(rerr.Error())
 | 
			
		||||
	if rerr != nil {
 | 
			
		||||
		if rerr.IsThrottled() {
 | 
			
		||||
			// Update RetryAfterReader so that no more requests would be sent until RetryAfter expires.
 | 
			
		||||
			c.RetryAfterReader = rerr.RetryAfter
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		return result, rerr
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return result, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// getDeployment gets a deployment.
 | 
			
		||||
func (c *Client) getDeployment(ctx context.Context, resourceGroupName string, deploymentName string) (resources.DeploymentExtended, *retry.Error) {
 | 
			
		||||
	resourceID := armclient.GetResourceID(
 | 
			
		||||
		c.subscriptionID,
 | 
			
		||||
		resourceGroupName,
 | 
			
		||||
		"Microsoft.Resources/deployments",
 | 
			
		||||
		deploymentName,
 | 
			
		||||
	)
 | 
			
		||||
	result := resources.DeploymentExtended{}
 | 
			
		||||
 | 
			
		||||
	response, rerr := c.armClient.GetResource(ctx, resourceID, "")
 | 
			
		||||
	defer c.armClient.CloseResponse(ctx, response)
 | 
			
		||||
	if rerr != nil {
 | 
			
		||||
		klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "deployment.get.request", resourceID, rerr.Error())
 | 
			
		||||
		return result, rerr
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	err := autorest.Respond(
 | 
			
		||||
		response,
 | 
			
		||||
		azure.WithErrorUnlessStatusCode(http.StatusOK),
 | 
			
		||||
		autorest.ByUnmarshallingJSON(&result))
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "deployment.get.respond", resourceID, err)
 | 
			
		||||
		return result, retry.GetError(response, err)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	result.Response = autorest.Response{Response: response}
 | 
			
		||||
	return result, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// List gets a list of deployments in the resource group.
 | 
			
		||||
func (c *Client) List(ctx context.Context, resourceGroupName string) ([]resources.DeploymentExtended, *retry.Error) {
 | 
			
		||||
	mc := metrics.NewMetricContext("deployments", "list", resourceGroupName, c.subscriptionID, "")
 | 
			
		||||
 | 
			
		||||
	// Report errors if the client is rate limited.
 | 
			
		||||
	if !c.rateLimiterReader.TryAccept() {
 | 
			
		||||
		mc.RateLimitedCount()
 | 
			
		||||
		return nil, retry.GetRateLimitError(false, "ListDeployment")
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Report errors if the client is throttled.
 | 
			
		||||
	if c.RetryAfterReader.After(time.Now()) {
 | 
			
		||||
		mc.ThrottledCount()
 | 
			
		||||
		rerr := retry.GetThrottlingError("ListDeployment", "client throttled", c.RetryAfterReader)
 | 
			
		||||
		return nil, rerr
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	result, rerr := c.listDeployment(ctx, resourceGroupName)
 | 
			
		||||
	mc.Observe(rerr.Error())
 | 
			
		||||
	if rerr != nil {
 | 
			
		||||
		if rerr.IsThrottled() {
 | 
			
		||||
			// Update RetryAfterReader so that no more requests would be sent until RetryAfter expires.
 | 
			
		||||
			c.RetryAfterReader = rerr.RetryAfter
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		return result, rerr
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return result, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// listDeployment gets a list of deployments in the resource group.
 | 
			
		||||
func (c *Client) listDeployment(ctx context.Context, resourceGroupName string) ([]resources.DeploymentExtended, *retry.Error) {
 | 
			
		||||
	resourceID := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Resources/deployments",
 | 
			
		||||
		autorest.Encode("path", c.subscriptionID),
 | 
			
		||||
		autorest.Encode("path", resourceGroupName))
 | 
			
		||||
	result := make([]resources.DeploymentExtended, 0)
 | 
			
		||||
	page := &DeploymentResultPage{}
 | 
			
		||||
	page.fn = c.listNextResults
 | 
			
		||||
 | 
			
		||||
	resp, rerr := c.armClient.GetResource(ctx, resourceID, "")
 | 
			
		||||
	defer c.armClient.CloseResponse(ctx, resp)
 | 
			
		||||
	if rerr != nil {
 | 
			
		||||
		klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "deployment.list.request", resourceID, rerr.Error())
 | 
			
		||||
		return result, rerr
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	var err error
 | 
			
		||||
	page.dplr, err = c.listResponder(resp)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "deployment.list.respond", resourceID, err)
 | 
			
		||||
		return result, retry.GetError(resp, err)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	for {
 | 
			
		||||
		result = append(result, page.Values()...)
 | 
			
		||||
 | 
			
		||||
		// Abort the loop when there's no nextLink in the response.
 | 
			
		||||
		if pointer.StringDeref(page.Response().NextLink, "") == "" {
 | 
			
		||||
			break
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		if err = page.NextWithContext(ctx); err != nil {
 | 
			
		||||
			klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "deployment.list.next", resourceID, err)
 | 
			
		||||
			return result, retry.GetError(page.Response().Response.Response, err)
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return result, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (c *Client) listResponder(resp *http.Response) (result resources.DeploymentListResult, err error) {
 | 
			
		||||
	err = autorest.Respond(
 | 
			
		||||
		resp,
 | 
			
		||||
		autorest.ByIgnoring(),
 | 
			
		||||
		azure.WithErrorUnlessStatusCode(http.StatusOK),
 | 
			
		||||
		autorest.ByUnmarshallingJSON(&result))
 | 
			
		||||
	result.Response = autorest.Response{Response: resp}
 | 
			
		||||
	return
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// deploymentListResultPreparer prepares a request to retrieve the next set of results.
 | 
			
		||||
// It returns nil if no more results exist.
 | 
			
		||||
func (c *Client) deploymentListResultPreparer(ctx context.Context, dplr resources.DeploymentListResult) (*http.Request, error) {
 | 
			
		||||
	if dplr.NextLink == nil || len(pointer.StringDeref(dplr.NextLink, "")) < 1 {
 | 
			
		||||
		return nil, nil
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	decorators := []autorest.PrepareDecorator{
 | 
			
		||||
		autorest.WithBaseURL(pointer.StringDeref(dplr.NextLink, "")),
 | 
			
		||||
	}
 | 
			
		||||
	return c.armClient.PrepareGetRequest(ctx, decorators...)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// listNextResults retrieves the next set of results, if any.
 | 
			
		||||
func (c *Client) listNextResults(ctx context.Context, lastResults resources.DeploymentListResult) (result resources.DeploymentListResult, err error) {
 | 
			
		||||
	req, err := c.deploymentListResultPreparer(ctx, lastResults)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return result, autorest.NewErrorWithError(err, "deploymentclient", "listNextResults", nil, "Failure preparing next results request")
 | 
			
		||||
	}
 | 
			
		||||
	if req == nil {
 | 
			
		||||
		return
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	resp, rerr := c.armClient.Send(ctx, req)
 | 
			
		||||
	defer c.armClient.CloseResponse(ctx, resp)
 | 
			
		||||
	if rerr != nil {
 | 
			
		||||
		result.Response = autorest.Response{Response: resp}
 | 
			
		||||
		return result, autorest.NewErrorWithError(rerr.Error(), "deploymentclient", "listNextResults", resp, "Failure sending next results request")
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	result, err = c.listResponder(resp)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		err = autorest.NewErrorWithError(err, "deploymentclient", "listNextResults", resp, "Failure responding to next results request")
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// DeploymentResultPage contains a page of deployments values.
 | 
			
		||||
type DeploymentResultPage struct {
 | 
			
		||||
	fn   func(context.Context, resources.DeploymentListResult) (resources.DeploymentListResult, error)
 | 
			
		||||
	dplr resources.DeploymentListResult
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// NextWithContext advances to the next page of values.  If there was an error making
 | 
			
		||||
// the request the page does not advance and the error is returned.
 | 
			
		||||
func (page *DeploymentResultPage) NextWithContext(ctx context.Context) (err error) {
 | 
			
		||||
	next, err := page.fn(ctx, page.dplr)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return err
 | 
			
		||||
	}
 | 
			
		||||
	page.dplr = next
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Next advances to the next page of values.  If there was an error making
 | 
			
		||||
// the request the page does not advance and the error is returned.
 | 
			
		||||
// Deprecated: Use NextWithContext() instead.
 | 
			
		||||
func (page *DeploymentResultPage) Next() error {
 | 
			
		||||
	return page.NextWithContext(context.Background())
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// NotDone returns true if the page enumeration should be started or is not yet complete.
 | 
			
		||||
func (page DeploymentResultPage) NotDone() bool {
 | 
			
		||||
	return !page.dplr.IsEmpty()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Response returns the raw server response from the last page request.
 | 
			
		||||
func (page DeploymentResultPage) Response() resources.DeploymentListResult {
 | 
			
		||||
	return page.dplr
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Values returns the slice of values for the current page or nil if there are no values.
 | 
			
		||||
func (page DeploymentResultPage) Values() []resources.DeploymentExtended {
 | 
			
		||||
	if page.dplr.IsEmpty() {
 | 
			
		||||
		return nil
 | 
			
		||||
	}
 | 
			
		||||
	return *page.dplr.Value
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// CreateOrUpdate creates or updates a deployment.
 | 
			
		||||
func (c *Client) CreateOrUpdate(ctx context.Context, resourceGroupName string, deploymentName string, parameters resources.Deployment, etag string) *retry.Error {
 | 
			
		||||
	mc := metrics.NewMetricContext("deployments", "create_or_update", resourceGroupName, c.subscriptionID, "")
 | 
			
		||||
 | 
			
		||||
	// Report errors if the client is rate limited.
 | 
			
		||||
	if !c.rateLimiterWriter.TryAccept() {
 | 
			
		||||
		mc.RateLimitedCount()
 | 
			
		||||
		return retry.GetRateLimitError(true, "CreateOrUpdateDeployment")
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Report errors if the client is throttled.
 | 
			
		||||
	if c.RetryAfterWriter.After(time.Now()) {
 | 
			
		||||
		mc.ThrottledCount()
 | 
			
		||||
		rerr := retry.GetThrottlingError("CreateOrUpdateDeployment", "client throttled", c.RetryAfterWriter)
 | 
			
		||||
		return rerr
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	rerr := c.createOrUpdateDeployment(ctx, resourceGroupName, deploymentName, parameters, etag)
 | 
			
		||||
	mc.Observe(rerr.Error())
 | 
			
		||||
	if rerr != nil {
 | 
			
		||||
		if rerr.IsThrottled() {
 | 
			
		||||
			// Update RetryAfterReader so that no more requests would be sent until RetryAfter expires.
 | 
			
		||||
			c.RetryAfterWriter = rerr.RetryAfter
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		return rerr
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (c *Client) createOrUpdateDeployment(ctx context.Context, resourceGroupName string, deploymentName string, parameters resources.Deployment, etag string) *retry.Error {
 | 
			
		||||
	resourceID := armclient.GetResourceID(
 | 
			
		||||
		c.subscriptionID,
 | 
			
		||||
		resourceGroupName,
 | 
			
		||||
		"Microsoft.Resources/deployments",
 | 
			
		||||
		deploymentName,
 | 
			
		||||
	)
 | 
			
		||||
	decorators := []autorest.PrepareDecorator{
 | 
			
		||||
		autorest.WithPathParameters("{resourceID}", map[string]interface{}{"resourceID": resourceID}),
 | 
			
		||||
		autorest.WithJSON(parameters),
 | 
			
		||||
	}
 | 
			
		||||
	if etag != "" {
 | 
			
		||||
		decorators = append(decorators, autorest.WithHeader("If-Match", autorest.String(etag)))
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	response, rerr := c.armClient.PutResourceWithDecorators(ctx, resourceID, parameters, decorators)
 | 
			
		||||
	defer c.armClient.CloseResponse(ctx, response)
 | 
			
		||||
	if rerr != nil {
 | 
			
		||||
		klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "deployment.put.request", resourceID, rerr.Error())
 | 
			
		||||
		return rerr
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if response != nil && response.StatusCode != http.StatusNoContent {
 | 
			
		||||
		_, rerr = c.createOrUpdateResponder(response)
 | 
			
		||||
		if rerr != nil {
 | 
			
		||||
			klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "deployment.put.respond", resourceID, rerr.Error())
 | 
			
		||||
			return rerr
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (c *Client) createOrUpdateResponder(resp *http.Response) (*resources.DeploymentExtended, *retry.Error) {
 | 
			
		||||
	result := &resources.DeploymentExtended{}
 | 
			
		||||
	err := autorest.Respond(
 | 
			
		||||
		resp,
 | 
			
		||||
		azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),
 | 
			
		||||
		autorest.ByUnmarshallingJSON(&result))
 | 
			
		||||
	result.Response = autorest.Response{Response: resp}
 | 
			
		||||
	return result, retry.GetError(resp, err)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Delete deletes a deployment by name.
 | 
			
		||||
func (c *Client) Delete(ctx context.Context, resourceGroupName string, deploymentName string) *retry.Error {
 | 
			
		||||
	mc := metrics.NewMetricContext("deployments", "delete", resourceGroupName, c.subscriptionID, "")
 | 
			
		||||
 | 
			
		||||
	// Report errors if the client is rate limited.
 | 
			
		||||
	if !c.rateLimiterWriter.TryAccept() {
 | 
			
		||||
		mc.RateLimitedCount()
 | 
			
		||||
		return retry.GetRateLimitError(true, "DeleteDeployment")
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Report errors if the client is throttled.
 | 
			
		||||
	if c.RetryAfterWriter.After(time.Now()) {
 | 
			
		||||
		mc.ThrottledCount()
 | 
			
		||||
		rerr := retry.GetThrottlingError("DeleteDeployment", "client throttled", c.RetryAfterWriter)
 | 
			
		||||
		return rerr
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	rerr := c.deleteDeployment(ctx, resourceGroupName, deploymentName)
 | 
			
		||||
	mc.Observe(rerr.Error())
 | 
			
		||||
	if rerr != nil {
 | 
			
		||||
		if rerr.IsThrottled() {
 | 
			
		||||
			// Update RetryAfterReader so that no more requests would be sent until RetryAfter expires.
 | 
			
		||||
			c.RetryAfterWriter = rerr.RetryAfter
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		return rerr
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// deleteDeployment deletes a deployment by name.
 | 
			
		||||
func (c *Client) deleteDeployment(ctx context.Context, resourceGroupName string, deploymentName string) *retry.Error {
 | 
			
		||||
	resourceID := armclient.GetResourceID(
 | 
			
		||||
		c.subscriptionID,
 | 
			
		||||
		resourceGroupName,
 | 
			
		||||
		"Microsoft.Resources/deployments",
 | 
			
		||||
		deploymentName,
 | 
			
		||||
	)
 | 
			
		||||
 | 
			
		||||
	return c.armClient.DeleteResource(ctx, resourceID, "")
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// ExportTemplate exports the template used for specified deployment
 | 
			
		||||
func (c *Client) ExportTemplate(ctx context.Context, resourceGroupName string, deploymentName string) (result resources.DeploymentExportResult, rerr *retry.Error) {
 | 
			
		||||
	mc := metrics.NewMetricContext("deployments", "export_template", resourceGroupName, c.subscriptionID, "")
 | 
			
		||||
 | 
			
		||||
	// Report errors if the client is rate limited.
 | 
			
		||||
	if !c.rateLimiterWriter.TryAccept() {
 | 
			
		||||
		mc.RateLimitedCount()
 | 
			
		||||
		return resources.DeploymentExportResult{}, retry.GetRateLimitError(true, "ExportTemplateDeployment")
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Report errors if the client is throttled.
 | 
			
		||||
	if c.RetryAfterWriter.After(time.Now()) {
 | 
			
		||||
		mc.ThrottledCount()
 | 
			
		||||
		rerr := retry.GetThrottlingError("CreateOrUpdateDeployment", "client throttled", c.RetryAfterWriter)
 | 
			
		||||
		return resources.DeploymentExportResult{}, rerr
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	resourceID := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Resources/deployments/%s/exportTemplate",
 | 
			
		||||
		autorest.Encode("path", c.subscriptionID),
 | 
			
		||||
		autorest.Encode("path", resourceGroupName),
 | 
			
		||||
		autorest.Encode("path", deploymentName))
 | 
			
		||||
	response, rerr := c.armClient.PostResource(ctx, resourceID, "exportTemplate", struct{}{})
 | 
			
		||||
	defer c.armClient.CloseResponse(ctx, response)
 | 
			
		||||
	if rerr != nil {
 | 
			
		||||
		klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "deployment.exportTemplate.request", resourceID, rerr.Error())
 | 
			
		||||
		return
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	err := autorest.Respond(
 | 
			
		||||
		response,
 | 
			
		||||
		azure.WithErrorUnlessStatusCode(http.StatusOK),
 | 
			
		||||
		autorest.ByUnmarshallingJSON(&result))
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "deployment.exportTemplate.respond", resourceID, err)
 | 
			
		||||
		return result, retry.GetError(response, err)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	result.Response = autorest.Response{Response: response}
 | 
			
		||||
	return
 | 
			
		||||
}
 | 
			
		||||
@@ -1,635 +0,0 @@
 | 
			
		||||
//go:build !providerless
 | 
			
		||||
// +build !providerless
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
Copyright 2020 The Kubernetes Authors.
 | 
			
		||||
 | 
			
		||||
Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
you may not use this file except in compliance with the License.
 | 
			
		||||
You may obtain a copy of the License at
 | 
			
		||||
 | 
			
		||||
    http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
 | 
			
		||||
Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
See the License for the specific language governing permissions and
 | 
			
		||||
limitations under the License.
 | 
			
		||||
*/
 | 
			
		||||
 | 
			
		||||
package deploymentclient
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"bytes"
 | 
			
		||||
	"context"
 | 
			
		||||
	"encoding/json"
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"io/ioutil"
 | 
			
		||||
	"net/http"
 | 
			
		||||
	"testing"
 | 
			
		||||
	"time"
 | 
			
		||||
 | 
			
		||||
	"github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2017-05-10/resources"
 | 
			
		||||
	"github.com/Azure/go-autorest/autorest"
 | 
			
		||||
	"github.com/golang/mock/gomock"
 | 
			
		||||
	"github.com/stretchr/testify/assert"
 | 
			
		||||
 | 
			
		||||
	"k8s.io/client-go/util/flowcontrol"
 | 
			
		||||
	azclients "k8s.io/legacy-cloud-providers/azure/clients"
 | 
			
		||||
	"k8s.io/legacy-cloud-providers/azure/clients/armclient"
 | 
			
		||||
	"k8s.io/legacy-cloud-providers/azure/clients/armclient/mockarmclient"
 | 
			
		||||
	"k8s.io/legacy-cloud-providers/azure/retry"
 | 
			
		||||
	"k8s.io/utils/pointer"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// 2065-01-24 05:20:00 +0000 UTC
 | 
			
		||||
func getFutureTime() time.Time {
 | 
			
		||||
	return time.Unix(3000000000, 0)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func getTestDeploymentClient(armClient armclient.Interface) *Client {
 | 
			
		||||
	rateLimiterReader, rateLimiterWriter := azclients.NewRateLimiter(&azclients.RateLimitConfig{})
 | 
			
		||||
	return &Client{
 | 
			
		||||
		armClient:         armClient,
 | 
			
		||||
		subscriptionID:    "subscriptionID",
 | 
			
		||||
		rateLimiterReader: rateLimiterReader,
 | 
			
		||||
		rateLimiterWriter: rateLimiterWriter,
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func getTestDeploymentClientWithNeverRateLimiter(armClient armclient.Interface) *Client {
 | 
			
		||||
	rateLimiterReader := flowcontrol.NewFakeNeverRateLimiter()
 | 
			
		||||
	rateLimiterWriter := flowcontrol.NewFakeNeverRateLimiter()
 | 
			
		||||
	return &Client{
 | 
			
		||||
		armClient:         armClient,
 | 
			
		||||
		subscriptionID:    "subscriptionID",
 | 
			
		||||
		rateLimiterReader: rateLimiterReader,
 | 
			
		||||
		rateLimiterWriter: rateLimiterWriter,
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func getTestDeploymentClientWithRetryAfterReader(armClient armclient.Interface) *Client {
 | 
			
		||||
	rateLimiterReader := flowcontrol.NewFakeAlwaysRateLimiter()
 | 
			
		||||
	rateLimiterWriter := flowcontrol.NewFakeAlwaysRateLimiter()
 | 
			
		||||
	return &Client{
 | 
			
		||||
		armClient:         armClient,
 | 
			
		||||
		subscriptionID:    "subscriptionID",
 | 
			
		||||
		rateLimiterReader: rateLimiterReader,
 | 
			
		||||
		rateLimiterWriter: rateLimiterWriter,
 | 
			
		||||
		RetryAfterReader:  getFutureTime(),
 | 
			
		||||
		RetryAfterWriter:  getFutureTime(),
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func getTestDeploymentExtended(name string) resources.DeploymentExtended {
 | 
			
		||||
	return resources.DeploymentExtended{
 | 
			
		||||
		ID:   pointer.String(fmt.Sprintf("/subscriptions/subscriptionID/resourceGroups/rg/providers/Microsoft.Resources/deployments/%s", name)),
 | 
			
		||||
		Name: pointer.String(name),
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestNew(t *testing.T) {
 | 
			
		||||
	config := &azclients.ClientConfig{
 | 
			
		||||
		SubscriptionID:          "sub",
 | 
			
		||||
		ResourceManagerEndpoint: "endpoint",
 | 
			
		||||
		Location:                "eastus",
 | 
			
		||||
		RateLimitConfig: &azclients.RateLimitConfig{
 | 
			
		||||
			CloudProviderRateLimit:            true,
 | 
			
		||||
			CloudProviderRateLimitQPS:         0.5,
 | 
			
		||||
			CloudProviderRateLimitBucket:      1,
 | 
			
		||||
			CloudProviderRateLimitQPSWrite:    0.5,
 | 
			
		||||
			CloudProviderRateLimitBucketWrite: 1,
 | 
			
		||||
		},
 | 
			
		||||
		Backoff: &retry.Backoff{Steps: 1},
 | 
			
		||||
	}
 | 
			
		||||
	dpClient := New(config)
 | 
			
		||||
	assert.Equal(t, "sub", dpClient.subscriptionID)
 | 
			
		||||
	assert.NotEmpty(t, dpClient.rateLimiterReader)
 | 
			
		||||
	assert.NotEmpty(t, dpClient.rateLimiterWriter)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestGet(t *testing.T) {
 | 
			
		||||
	ctrl := gomock.NewController(t)
 | 
			
		||||
	defer ctrl.Finish()
 | 
			
		||||
 | 
			
		||||
	resourceID := "/subscriptions/subscriptionID/resourceGroups/rg/providers/Microsoft.Resources/deployments/dep"
 | 
			
		||||
	response := &http.Response{
 | 
			
		||||
		StatusCode: http.StatusOK,
 | 
			
		||||
		Body:       ioutil.NopCloser(bytes.NewReader([]byte("{}"))),
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	armClient := mockarmclient.NewMockInterface(ctrl)
 | 
			
		||||
	armClient.EXPECT().GetResource(gomock.Any(), resourceID, "").Return(response, nil).Times(1)
 | 
			
		||||
	armClient.EXPECT().CloseResponse(gomock.Any(), gomock.Any()).Times(1)
 | 
			
		||||
 | 
			
		||||
	expected := resources.DeploymentExtended{}
 | 
			
		||||
	expected.Response = autorest.Response{Response: response}
 | 
			
		||||
	dpClient := getTestDeploymentClient(armClient)
 | 
			
		||||
	result, rerr := dpClient.Get(context.TODO(), "rg", "dep")
 | 
			
		||||
	assert.Equal(t, expected, result)
 | 
			
		||||
	assert.Nil(t, rerr)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestGetNeverRateLimiter(t *testing.T) {
 | 
			
		||||
	ctrl := gomock.NewController(t)
 | 
			
		||||
	defer ctrl.Finish()
 | 
			
		||||
 | 
			
		||||
	dpGetErr := &retry.Error{
 | 
			
		||||
		RawError:  fmt.Errorf("azure cloud provider rate limited(%s) for operation %q", "read", "GetDeployment"),
 | 
			
		||||
		Retriable: true,
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	armClient := mockarmclient.NewMockInterface(ctrl)
 | 
			
		||||
 | 
			
		||||
	dpClient := getTestDeploymentClientWithNeverRateLimiter(armClient)
 | 
			
		||||
	expected := resources.DeploymentExtended{}
 | 
			
		||||
	result, rerr := dpClient.Get(context.TODO(), "rg", "dep")
 | 
			
		||||
	assert.Equal(t, expected, result)
 | 
			
		||||
	assert.Equal(t, dpGetErr, rerr)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestGetRetryAfterReader(t *testing.T) {
 | 
			
		||||
	ctrl := gomock.NewController(t)
 | 
			
		||||
	defer ctrl.Finish()
 | 
			
		||||
 | 
			
		||||
	dpGetErr := &retry.Error{
 | 
			
		||||
		RawError:   fmt.Errorf("azure cloud provider throttled for operation %s with reason %q", "GetDeployment", "client throttled"),
 | 
			
		||||
		Retriable:  true,
 | 
			
		||||
		RetryAfter: getFutureTime(),
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	armClient := mockarmclient.NewMockInterface(ctrl)
 | 
			
		||||
 | 
			
		||||
	dpClient := getTestDeploymentClientWithRetryAfterReader(armClient)
 | 
			
		||||
	expected := resources.DeploymentExtended{}
 | 
			
		||||
	result, rerr := dpClient.Get(context.TODO(), "rg", "dep")
 | 
			
		||||
	assert.Equal(t, expected, result)
 | 
			
		||||
	assert.Equal(t, dpGetErr, rerr)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestGetThrottle(t *testing.T) {
 | 
			
		||||
	ctrl := gomock.NewController(t)
 | 
			
		||||
	defer ctrl.Finish()
 | 
			
		||||
 | 
			
		||||
	resourceID := "/subscriptions/subscriptionID/resourceGroups/rg/providers/Microsoft.Resources/deployments/dep"
 | 
			
		||||
	response := &http.Response{
 | 
			
		||||
		StatusCode: http.StatusTooManyRequests,
 | 
			
		||||
		Body:       ioutil.NopCloser(bytes.NewReader([]byte("{}"))),
 | 
			
		||||
	}
 | 
			
		||||
	throttleErr := &retry.Error{
 | 
			
		||||
		HTTPStatusCode: http.StatusTooManyRequests,
 | 
			
		||||
		RawError:       fmt.Errorf("error"),
 | 
			
		||||
		Retriable:      true,
 | 
			
		||||
		RetryAfter:     time.Unix(100, 0),
 | 
			
		||||
	}
 | 
			
		||||
	armClient := mockarmclient.NewMockInterface(ctrl)
 | 
			
		||||
	armClient.EXPECT().GetResource(gomock.Any(), resourceID, "").Return(response, throttleErr).Times(1)
 | 
			
		||||
	armClient.EXPECT().CloseResponse(gomock.Any(), gomock.Any()).Times(1)
 | 
			
		||||
 | 
			
		||||
	dpClient := getTestDeploymentClient(armClient)
 | 
			
		||||
	result, rerr := dpClient.Get(context.TODO(), "rg", "dep")
 | 
			
		||||
	assert.Empty(t, result)
 | 
			
		||||
	assert.Equal(t, throttleErr, rerr)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestGetNotFound(t *testing.T) {
 | 
			
		||||
	ctrl := gomock.NewController(t)
 | 
			
		||||
	defer ctrl.Finish()
 | 
			
		||||
 | 
			
		||||
	resourceID := "/subscriptions/subscriptionID/resourceGroups/rg/providers/Microsoft.Resources/deployments/dep"
 | 
			
		||||
	response := &http.Response{
 | 
			
		||||
		StatusCode: http.StatusNotFound,
 | 
			
		||||
		Body:       ioutil.NopCloser(bytes.NewReader([]byte("{}"))),
 | 
			
		||||
	}
 | 
			
		||||
	armClient := mockarmclient.NewMockInterface(ctrl)
 | 
			
		||||
	armClient.EXPECT().GetResource(gomock.Any(), resourceID, "").Return(response, nil).Times(1)
 | 
			
		||||
	armClient.EXPECT().CloseResponse(gomock.Any(), gomock.Any()).Times(1)
 | 
			
		||||
 | 
			
		||||
	dpClient := getTestDeploymentClient(armClient)
 | 
			
		||||
	expected := resources.DeploymentExtended{Response: autorest.Response{}}
 | 
			
		||||
	result, rerr := dpClient.Get(context.TODO(), "rg", "dep")
 | 
			
		||||
	assert.Equal(t, expected, result)
 | 
			
		||||
	assert.NotNil(t, rerr)
 | 
			
		||||
	assert.Equal(t, http.StatusNotFound, rerr.HTTPStatusCode)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestGetInternalError(t *testing.T) {
 | 
			
		||||
	ctrl := gomock.NewController(t)
 | 
			
		||||
	defer ctrl.Finish()
 | 
			
		||||
 | 
			
		||||
	resourceID := "/subscriptions/subscriptionID/resourceGroups/rg/providers/Microsoft.Resources/deployments/dep"
 | 
			
		||||
	response := &http.Response{
 | 
			
		||||
		StatusCode: http.StatusInternalServerError,
 | 
			
		||||
		Body:       ioutil.NopCloser(bytes.NewReader([]byte("{}"))),
 | 
			
		||||
	}
 | 
			
		||||
	armClient := mockarmclient.NewMockInterface(ctrl)
 | 
			
		||||
	armClient.EXPECT().GetResource(gomock.Any(), resourceID, "").Return(response, nil).Times(1)
 | 
			
		||||
	armClient.EXPECT().CloseResponse(gomock.Any(), gomock.Any()).Times(1)
 | 
			
		||||
 | 
			
		||||
	dpClient := getTestDeploymentClient(armClient)
 | 
			
		||||
	expected := resources.DeploymentExtended{Response: autorest.Response{}}
 | 
			
		||||
	result, rerr := dpClient.Get(context.TODO(), "rg", "dep")
 | 
			
		||||
	assert.Equal(t, expected, result)
 | 
			
		||||
	assert.NotNil(t, rerr)
 | 
			
		||||
	assert.Equal(t, http.StatusInternalServerError, rerr.HTTPStatusCode)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestList(t *testing.T) {
 | 
			
		||||
	ctrl := gomock.NewController(t)
 | 
			
		||||
	defer ctrl.Finish()
 | 
			
		||||
 | 
			
		||||
	resourceID := "/subscriptions/subscriptionID/resourceGroups/rg/providers/Microsoft.Resources/deployments"
 | 
			
		||||
	armClient := mockarmclient.NewMockInterface(ctrl)
 | 
			
		||||
	dpList := []resources.DeploymentExtended{getTestDeploymentExtended("dep"), getTestDeploymentExtended("dep1"), getTestDeploymentExtended("dep2")}
 | 
			
		||||
	responseBody, err := json.Marshal(resources.DeploymentListResult{Value: &dpList})
 | 
			
		||||
	assert.NoError(t, err)
 | 
			
		||||
	armClient.EXPECT().GetResource(gomock.Any(), resourceID, "").Return(
 | 
			
		||||
		&http.Response{
 | 
			
		||||
			StatusCode: http.StatusOK,
 | 
			
		||||
			Body:       ioutil.NopCloser(bytes.NewReader(responseBody)),
 | 
			
		||||
		}, nil).Times(1)
 | 
			
		||||
	armClient.EXPECT().CloseResponse(gomock.Any(), gomock.Any()).Times(1)
 | 
			
		||||
 | 
			
		||||
	dpClient := getTestDeploymentClient(armClient)
 | 
			
		||||
	result, rerr := dpClient.List(context.TODO(), "rg")
 | 
			
		||||
	assert.Nil(t, rerr)
 | 
			
		||||
	assert.Equal(t, 3, len(result))
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestListNextResultsMultiPages(t *testing.T) {
 | 
			
		||||
	ctrl := gomock.NewController(t)
 | 
			
		||||
	defer ctrl.Finish()
 | 
			
		||||
 | 
			
		||||
	tests := []struct {
 | 
			
		||||
		prepareErr error
 | 
			
		||||
		sendErr    *retry.Error
 | 
			
		||||
		statusCode int
 | 
			
		||||
	}{
 | 
			
		||||
		{
 | 
			
		||||
			prepareErr: nil,
 | 
			
		||||
			sendErr:    nil,
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			prepareErr: fmt.Errorf("error"),
 | 
			
		||||
		},
 | 
			
		||||
		{
 | 
			
		||||
			sendErr: &retry.Error{RawError: fmt.Errorf("error")},
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	lastResult := resources.DeploymentListResult{
 | 
			
		||||
		NextLink: pointer.String("next"),
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	for _, test := range tests {
 | 
			
		||||
		armClient := mockarmclient.NewMockInterface(ctrl)
 | 
			
		||||
		req := &http.Request{
 | 
			
		||||
			Method: "GET",
 | 
			
		||||
		}
 | 
			
		||||
		armClient.EXPECT().PrepareGetRequest(gomock.Any(), gomock.Any()).Return(req, test.prepareErr)
 | 
			
		||||
		if test.prepareErr == nil {
 | 
			
		||||
			armClient.EXPECT().Send(gomock.Any(), req).Return(&http.Response{
 | 
			
		||||
				StatusCode: http.StatusOK,
 | 
			
		||||
				Body:       ioutil.NopCloser(bytes.NewReader([]byte(`{"foo":"bar"}`))),
 | 
			
		||||
			}, test.sendErr)
 | 
			
		||||
			armClient.EXPECT().CloseResponse(gomock.Any(), gomock.Any())
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		dpClient := getTestDeploymentClient(armClient)
 | 
			
		||||
		result, err := dpClient.listNextResults(context.TODO(), lastResult)
 | 
			
		||||
		if test.prepareErr != nil || test.sendErr != nil {
 | 
			
		||||
			assert.Error(t, err)
 | 
			
		||||
		} else {
 | 
			
		||||
			assert.NoError(t, err)
 | 
			
		||||
		}
 | 
			
		||||
		if test.prepareErr != nil {
 | 
			
		||||
			assert.Empty(t, result)
 | 
			
		||||
		} else {
 | 
			
		||||
			assert.NotEmpty(t, result)
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestListNextResultsMultiPagesWithListResponderError(t *testing.T) {
 | 
			
		||||
	ctrl := gomock.NewController(t)
 | 
			
		||||
	defer ctrl.Finish()
 | 
			
		||||
 | 
			
		||||
	test := struct {
 | 
			
		||||
		prepareErr error
 | 
			
		||||
		sendErr    *retry.Error
 | 
			
		||||
	}{
 | 
			
		||||
		prepareErr: nil,
 | 
			
		||||
		sendErr:    nil,
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	lastResult := resources.DeploymentListResult{
 | 
			
		||||
		NextLink: pointer.String("next"),
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	armClient := mockarmclient.NewMockInterface(ctrl)
 | 
			
		||||
	req := &http.Request{
 | 
			
		||||
		Method: "GET",
 | 
			
		||||
	}
 | 
			
		||||
	armClient.EXPECT().PrepareGetRequest(gomock.Any(), gomock.Any()).Return(req, test.prepareErr)
 | 
			
		||||
	if test.prepareErr == nil {
 | 
			
		||||
		armClient.EXPECT().Send(gomock.Any(), req).Return(&http.Response{
 | 
			
		||||
			StatusCode: http.StatusNotFound,
 | 
			
		||||
			Body:       ioutil.NopCloser(bytes.NewReader([]byte(`{"foo":"bar"}`))),
 | 
			
		||||
		}, test.sendErr)
 | 
			
		||||
		armClient.EXPECT().CloseResponse(gomock.Any(), gomock.Any())
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	response := &http.Response{
 | 
			
		||||
		StatusCode: http.StatusNotFound,
 | 
			
		||||
		Body:       ioutil.NopCloser(bytes.NewBuffer([]byte(`{"foo":"bar"}`))),
 | 
			
		||||
	}
 | 
			
		||||
	expected := resources.DeploymentListResult{}
 | 
			
		||||
	expected.Response = autorest.Response{Response: response}
 | 
			
		||||
	dpClient := getTestDeploymentClient(armClient)
 | 
			
		||||
	result, err := dpClient.listNextResults(context.TODO(), lastResult)
 | 
			
		||||
	assert.Error(t, err)
 | 
			
		||||
	assert.Equal(t, expected, result)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestListWithListResponderError(t *testing.T) {
 | 
			
		||||
	ctrl := gomock.NewController(t)
 | 
			
		||||
	defer ctrl.Finish()
 | 
			
		||||
 | 
			
		||||
	resourceID := "/subscriptions/subscriptionID/resourceGroups/rg/providers/Microsoft.Resources/deployments"
 | 
			
		||||
	armClient := mockarmclient.NewMockInterface(ctrl)
 | 
			
		||||
	dpList := []resources.DeploymentExtended{getTestDeploymentExtended("dep"), getTestDeploymentExtended("dep1"), getTestDeploymentExtended("dep2")}
 | 
			
		||||
	responseBody, err := json.Marshal(resources.DeploymentListResult{Value: &dpList})
 | 
			
		||||
	assert.NoError(t, err)
 | 
			
		||||
	armClient.EXPECT().GetResource(gomock.Any(), resourceID, "").Return(
 | 
			
		||||
		&http.Response{
 | 
			
		||||
			StatusCode: http.StatusNotFound,
 | 
			
		||||
			Body:       ioutil.NopCloser(bytes.NewReader(responseBody)),
 | 
			
		||||
		}, nil).Times(1)
 | 
			
		||||
	armClient.EXPECT().CloseResponse(gomock.Any(), gomock.Any()).Times(1)
 | 
			
		||||
	dpClient := getTestDeploymentClient(armClient)
 | 
			
		||||
	result, rerr := dpClient.List(context.TODO(), "rg")
 | 
			
		||||
	assert.NotNil(t, rerr)
 | 
			
		||||
	assert.Equal(t, 0, len(result))
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestListWithNextPage(t *testing.T) {
 | 
			
		||||
	ctrl := gomock.NewController(t)
 | 
			
		||||
	defer ctrl.Finish()
 | 
			
		||||
 | 
			
		||||
	resourceID := "/subscriptions/subscriptionID/resourceGroups/rg/providers/Microsoft.Resources/deployments"
 | 
			
		||||
	armClient := mockarmclient.NewMockInterface(ctrl)
 | 
			
		||||
	dpList := []resources.DeploymentExtended{getTestDeploymentExtended("dep"), getTestDeploymentExtended("dep1"), getTestDeploymentExtended("dep2")}
 | 
			
		||||
	// DeploymentListResult.MarshalJson() doesn't include "nextLink" in its result, hence partialResponse is composed manually below.
 | 
			
		||||
	partialResponse, err := json.Marshal(map[string]interface{}{"value": dpList, "nextLink": "nextLink"})
 | 
			
		||||
	assert.NoError(t, err)
 | 
			
		||||
	pagedResponse, err := json.Marshal(resources.DeploymentListResult{Value: &dpList})
 | 
			
		||||
	assert.NoError(t, err)
 | 
			
		||||
	armClient.EXPECT().PrepareGetRequest(gomock.Any(), gomock.Any()).Return(&http.Request{}, nil)
 | 
			
		||||
	armClient.EXPECT().Send(gomock.Any(), gomock.Any()).Return(
 | 
			
		||||
		&http.Response{
 | 
			
		||||
			StatusCode: http.StatusOK,
 | 
			
		||||
			Body:       ioutil.NopCloser(bytes.NewReader(pagedResponse)),
 | 
			
		||||
		}, nil)
 | 
			
		||||
	armClient.EXPECT().GetResource(gomock.Any(), resourceID, "").Return(
 | 
			
		||||
		&http.Response{
 | 
			
		||||
			StatusCode: http.StatusOK,
 | 
			
		||||
			Body:       ioutil.NopCloser(bytes.NewReader(partialResponse)),
 | 
			
		||||
		}, nil).Times(1)
 | 
			
		||||
	armClient.EXPECT().CloseResponse(gomock.Any(), gomock.Any()).Times(2)
 | 
			
		||||
	dpClient := getTestDeploymentClient(armClient)
 | 
			
		||||
	result, rerr := dpClient.List(context.TODO(), "rg")
 | 
			
		||||
	assert.Nil(t, rerr)
 | 
			
		||||
	assert.Equal(t, 6, len(result))
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestListNeverRateLimiter(t *testing.T) {
 | 
			
		||||
	ctrl := gomock.NewController(t)
 | 
			
		||||
	defer ctrl.Finish()
 | 
			
		||||
 | 
			
		||||
	dpListErr := &retry.Error{
 | 
			
		||||
		RawError:  fmt.Errorf("azure cloud provider rate limited(%s) for operation %q", "read", "ListDeployment"),
 | 
			
		||||
		Retriable: true,
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	armClient := mockarmclient.NewMockInterface(ctrl)
 | 
			
		||||
	dpClient := getTestDeploymentClientWithNeverRateLimiter(armClient)
 | 
			
		||||
	result, rerr := dpClient.List(context.TODO(), "rg")
 | 
			
		||||
	assert.Equal(t, 0, len(result))
 | 
			
		||||
	assert.NotNil(t, rerr)
 | 
			
		||||
	assert.Equal(t, dpListErr, rerr)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestListRetryAfterReader(t *testing.T) {
 | 
			
		||||
	ctrl := gomock.NewController(t)
 | 
			
		||||
	defer ctrl.Finish()
 | 
			
		||||
 | 
			
		||||
	dpListErr := &retry.Error{
 | 
			
		||||
		RawError:   fmt.Errorf("azure cloud provider throttled for operation %s with reason %q", "ListDeployment", "client throttled"),
 | 
			
		||||
		Retriable:  true,
 | 
			
		||||
		RetryAfter: getFutureTime(),
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	armClient := mockarmclient.NewMockInterface(ctrl)
 | 
			
		||||
	dpClient := getTestDeploymentClientWithRetryAfterReader(armClient)
 | 
			
		||||
	result, rerr := dpClient.List(context.TODO(), "rg")
 | 
			
		||||
	assert.Equal(t, 0, len(result))
 | 
			
		||||
	assert.NotNil(t, rerr)
 | 
			
		||||
	assert.Equal(t, dpListErr, rerr)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestListThrottle(t *testing.T) {
 | 
			
		||||
	ctrl := gomock.NewController(t)
 | 
			
		||||
	defer ctrl.Finish()
 | 
			
		||||
 | 
			
		||||
	resourceID := "/subscriptions/subscriptionID/resourceGroups/rg/providers/Microsoft.Resources/deployments"
 | 
			
		||||
	response := &http.Response{
 | 
			
		||||
		StatusCode: http.StatusTooManyRequests,
 | 
			
		||||
		Body:       ioutil.NopCloser(bytes.NewReader([]byte("{}"))),
 | 
			
		||||
	}
 | 
			
		||||
	throttleErr := &retry.Error{
 | 
			
		||||
		HTTPStatusCode: http.StatusTooManyRequests,
 | 
			
		||||
		RawError:       fmt.Errorf("error"),
 | 
			
		||||
		Retriable:      true,
 | 
			
		||||
		RetryAfter:     time.Unix(100, 0),
 | 
			
		||||
	}
 | 
			
		||||
	armClient := mockarmclient.NewMockInterface(ctrl)
 | 
			
		||||
	armClient.EXPECT().GetResource(gomock.Any(), resourceID, "").Return(response, throttleErr).Times(1)
 | 
			
		||||
	armClient.EXPECT().CloseResponse(gomock.Any(), gomock.Any()).Times(1)
 | 
			
		||||
 | 
			
		||||
	dpClient := getTestDeploymentClient(armClient)
 | 
			
		||||
	result, rerr := dpClient.List(context.TODO(), "rg")
 | 
			
		||||
	assert.Empty(t, result)
 | 
			
		||||
	assert.NotNil(t, rerr)
 | 
			
		||||
	assert.Equal(t, throttleErr, rerr)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestCreateOrUpdate(t *testing.T) {
 | 
			
		||||
	ctrl := gomock.NewController(t)
 | 
			
		||||
	defer ctrl.Finish()
 | 
			
		||||
 | 
			
		||||
	dp := resources.Deployment{}
 | 
			
		||||
	dpExtended := getTestDeploymentExtended("dep")
 | 
			
		||||
	armClient := mockarmclient.NewMockInterface(ctrl)
 | 
			
		||||
	response := &http.Response{
 | 
			
		||||
		StatusCode: http.StatusOK,
 | 
			
		||||
		Body:       ioutil.NopCloser(bytes.NewReader([]byte(""))),
 | 
			
		||||
	}
 | 
			
		||||
	armClient.EXPECT().PutResourceWithDecorators(gomock.Any(), pointer.StringDeref(dpExtended.ID, ""), dp, gomock.Any()).Return(response, nil).Times(1)
 | 
			
		||||
	armClient.EXPECT().CloseResponse(gomock.Any(), gomock.Any()).Times(1)
 | 
			
		||||
 | 
			
		||||
	dpClient := getTestDeploymentClient(armClient)
 | 
			
		||||
	rerr := dpClient.CreateOrUpdate(context.TODO(), "rg", "dep", dp, "*")
 | 
			
		||||
	assert.Nil(t, rerr)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestCreateOrUpdateWithCreateOrUpdateResponderError(t *testing.T) {
 | 
			
		||||
	ctrl := gomock.NewController(t)
 | 
			
		||||
	defer ctrl.Finish()
 | 
			
		||||
 | 
			
		||||
	dp := resources.Deployment{}
 | 
			
		||||
	dpExtended := getTestDeploymentExtended("dep")
 | 
			
		||||
	armClient := mockarmclient.NewMockInterface(ctrl)
 | 
			
		||||
	response := &http.Response{
 | 
			
		||||
		StatusCode: http.StatusNotFound,
 | 
			
		||||
		Body:       ioutil.NopCloser(bytes.NewReader([]byte(""))),
 | 
			
		||||
	}
 | 
			
		||||
	armClient.EXPECT().PutResourceWithDecorators(gomock.Any(), pointer.StringDeref(dpExtended.ID, ""), dp, gomock.Any()).Return(response, nil).Times(1)
 | 
			
		||||
	armClient.EXPECT().CloseResponse(gomock.Any(), gomock.Any()).Times(1)
 | 
			
		||||
 | 
			
		||||
	dpClient := getTestDeploymentClient(armClient)
 | 
			
		||||
	rerr := dpClient.CreateOrUpdate(context.TODO(), "rg", "dep", dp, "")
 | 
			
		||||
	assert.NotNil(t, rerr)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestCreateOrUpdateNeverRateLimiter(t *testing.T) {
 | 
			
		||||
	ctrl := gomock.NewController(t)
 | 
			
		||||
	defer ctrl.Finish()
 | 
			
		||||
 | 
			
		||||
	dpCreateOrUpdateErr := &retry.Error{
 | 
			
		||||
		RawError:  fmt.Errorf("azure cloud provider rate limited(%s) for operation %q", "write", "CreateOrUpdateDeployment"),
 | 
			
		||||
		Retriable: true,
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	armClient := mockarmclient.NewMockInterface(ctrl)
 | 
			
		||||
 | 
			
		||||
	dpClient := getTestDeploymentClientWithNeverRateLimiter(armClient)
 | 
			
		||||
	dp := resources.Deployment{}
 | 
			
		||||
	rerr := dpClient.CreateOrUpdate(context.TODO(), "rg", "dep", dp, "")
 | 
			
		||||
	assert.NotNil(t, rerr)
 | 
			
		||||
	assert.Equal(t, dpCreateOrUpdateErr, rerr)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestCreateOrUpdateRetryAfterReader(t *testing.T) {
 | 
			
		||||
	ctrl := gomock.NewController(t)
 | 
			
		||||
	defer ctrl.Finish()
 | 
			
		||||
 | 
			
		||||
	dpCreateOrUpdateErr := &retry.Error{
 | 
			
		||||
		RawError:   fmt.Errorf("azure cloud provider throttled for operation %s with reason %q", "CreateOrUpdateDeployment", "client throttled"),
 | 
			
		||||
		Retriable:  true,
 | 
			
		||||
		RetryAfter: getFutureTime(),
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	dp := resources.Deployment{}
 | 
			
		||||
	armClient := mockarmclient.NewMockInterface(ctrl)
 | 
			
		||||
 | 
			
		||||
	mcClient := getTestDeploymentClientWithRetryAfterReader(armClient)
 | 
			
		||||
	rerr := mcClient.CreateOrUpdate(context.TODO(), "rg", "dep", dp, "")
 | 
			
		||||
	assert.NotNil(t, rerr)
 | 
			
		||||
	assert.Equal(t, dpCreateOrUpdateErr, rerr)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestCreateOrUpdateThrottle(t *testing.T) {
 | 
			
		||||
	ctrl := gomock.NewController(t)
 | 
			
		||||
	defer ctrl.Finish()
 | 
			
		||||
 | 
			
		||||
	response := &http.Response{
 | 
			
		||||
		StatusCode: http.StatusTooManyRequests,
 | 
			
		||||
		Body:       ioutil.NopCloser(bytes.NewReader([]byte("{}"))),
 | 
			
		||||
	}
 | 
			
		||||
	throttleErr := &retry.Error{
 | 
			
		||||
		HTTPStatusCode: http.StatusTooManyRequests,
 | 
			
		||||
		RawError:       fmt.Errorf("error"),
 | 
			
		||||
		Retriable:      true,
 | 
			
		||||
		RetryAfter:     time.Unix(100, 0),
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	dp := resources.Deployment{}
 | 
			
		||||
	dpExtended := getTestDeploymentExtended("dep")
 | 
			
		||||
	armClient := mockarmclient.NewMockInterface(ctrl)
 | 
			
		||||
	armClient.EXPECT().PutResourceWithDecorators(gomock.Any(), pointer.StringDeref(dpExtended.ID, ""), dp, gomock.Any()).Return(response, throttleErr).Times(1)
 | 
			
		||||
	armClient.EXPECT().CloseResponse(gomock.Any(), gomock.Any()).Times(1)
 | 
			
		||||
 | 
			
		||||
	dpClient := getTestDeploymentClient(armClient)
 | 
			
		||||
	rerr := dpClient.CreateOrUpdate(context.TODO(), "rg", "dep", dp, "")
 | 
			
		||||
	assert.NotNil(t, rerr)
 | 
			
		||||
	assert.Equal(t, throttleErr, rerr)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestDelete(t *testing.T) {
 | 
			
		||||
	ctrl := gomock.NewController(t)
 | 
			
		||||
	defer ctrl.Finish()
 | 
			
		||||
 | 
			
		||||
	dp := getTestDeploymentExtended("dep")
 | 
			
		||||
	armClient := mockarmclient.NewMockInterface(ctrl)
 | 
			
		||||
	armClient.EXPECT().DeleteResource(gomock.Any(), pointer.StringDeref(dp.ID, ""), "").Return(nil).Times(1)
 | 
			
		||||
 | 
			
		||||
	dpClient := getTestDeploymentClient(armClient)
 | 
			
		||||
	rerr := dpClient.Delete(context.TODO(), "rg", "dep")
 | 
			
		||||
	assert.Nil(t, rerr)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestDeleteNeverRateLimiter(t *testing.T) {
 | 
			
		||||
	ctrl := gomock.NewController(t)
 | 
			
		||||
	defer ctrl.Finish()
 | 
			
		||||
 | 
			
		||||
	dpDeleteErr := &retry.Error{
 | 
			
		||||
		RawError:  fmt.Errorf("azure cloud provider rate limited(%s) for operation %q", "write", "DeleteDeployment"),
 | 
			
		||||
		Retriable: true,
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	armClient := mockarmclient.NewMockInterface(ctrl)
 | 
			
		||||
 | 
			
		||||
	dpClient := getTestDeploymentClientWithNeverRateLimiter(armClient)
 | 
			
		||||
	rerr := dpClient.Delete(context.TODO(), "rg", "dep")
 | 
			
		||||
	assert.NotNil(t, rerr)
 | 
			
		||||
	assert.Equal(t, dpDeleteErr, rerr)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestDeleteRetryAfterReader(t *testing.T) {
 | 
			
		||||
	ctrl := gomock.NewController(t)
 | 
			
		||||
	defer ctrl.Finish()
 | 
			
		||||
 | 
			
		||||
	dpDeleteErr := &retry.Error{
 | 
			
		||||
		RawError:   fmt.Errorf("azure cloud provider throttled for operation %s with reason %q", "DeleteDeployment", "client throttled"),
 | 
			
		||||
		Retriable:  true,
 | 
			
		||||
		RetryAfter: getFutureTime(),
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	armClient := mockarmclient.NewMockInterface(ctrl)
 | 
			
		||||
 | 
			
		||||
	dpClient := getTestDeploymentClientWithRetryAfterReader(armClient)
 | 
			
		||||
	rerr := dpClient.Delete(context.TODO(), "rg", "dep")
 | 
			
		||||
	assert.NotNil(t, rerr)
 | 
			
		||||
	assert.Equal(t, dpDeleteErr, rerr)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestDeleteThrottle(t *testing.T) {
 | 
			
		||||
	ctrl := gomock.NewController(t)
 | 
			
		||||
	defer ctrl.Finish()
 | 
			
		||||
 | 
			
		||||
	throttleErr := &retry.Error{
 | 
			
		||||
		HTTPStatusCode: http.StatusTooManyRequests,
 | 
			
		||||
		RawError:       fmt.Errorf("error"),
 | 
			
		||||
		Retriable:      true,
 | 
			
		||||
		RetryAfter:     time.Unix(100, 0),
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	dp := getTestDeploymentExtended("dep")
 | 
			
		||||
	armClient := mockarmclient.NewMockInterface(ctrl)
 | 
			
		||||
	armClient.EXPECT().DeleteResource(gomock.Any(), pointer.StringDeref(dp.ID, ""), "").Return(throttleErr).Times(1)
 | 
			
		||||
 | 
			
		||||
	dpClient := getTestDeploymentClient(armClient)
 | 
			
		||||
	rerr := dpClient.Delete(context.TODO(), "rg", "dep")
 | 
			
		||||
	assert.NotNil(t, rerr)
 | 
			
		||||
	assert.Equal(t, throttleErr, rerr)
 | 
			
		||||
}
 | 
			
		||||
@@ -1,18 +0,0 @@
 | 
			
		||||
/*
 | 
			
		||||
Copyright 2020 The Kubernetes Authors.
 | 
			
		||||
 | 
			
		||||
Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
you may not use this file except in compliance with the License.
 | 
			
		||||
You may obtain a copy of the License at
 | 
			
		||||
 | 
			
		||||
    http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
 | 
			
		||||
Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
See the License for the specific language governing permissions and
 | 
			
		||||
limitations under the License.
 | 
			
		||||
*/
 | 
			
		||||
 | 
			
		||||
// Package deploymentclient implements the client for azure deployments.
 | 
			
		||||
package deploymentclient // import "k8s.io/legacy-cloud-providers/azure/clients/deploymentclient"
 | 
			
		||||
@@ -1,42 +0,0 @@
 | 
			
		||||
//go:build !providerless
 | 
			
		||||
// +build !providerless
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
Copyright 2020 The Kubernetes Authors.
 | 
			
		||||
 | 
			
		||||
Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
you may not use this file except in compliance with the License.
 | 
			
		||||
You may obtain a copy of the License at
 | 
			
		||||
 | 
			
		||||
    http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
 | 
			
		||||
Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
See the License for the specific language governing permissions and
 | 
			
		||||
limitations under the License.
 | 
			
		||||
*/
 | 
			
		||||
 | 
			
		||||
//go:generate mockgen -copyright_file=$BUILD_TAG_FILE -source=interface.go  -destination=mockdeploymentclient/interface.go -package=mockdeploymentclient Interface
 | 
			
		||||
package deploymentclient
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"context"
 | 
			
		||||
 | 
			
		||||
	"github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2017-05-10/resources"
 | 
			
		||||
	"k8s.io/legacy-cloud-providers/azure/retry"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
const (
 | 
			
		||||
	// APIVersion is the API version for resources.
 | 
			
		||||
	APIVersion = "2017-05-10"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// Interface is the client interface for Deployments.
 | 
			
		||||
type Interface interface {
 | 
			
		||||
	Get(ctx context.Context, resourceGroupName string, deploymentName string) (resources.DeploymentExtended, *retry.Error)
 | 
			
		||||
	List(ctx context.Context, resourceGroupName string) ([]resources.DeploymentExtended, *retry.Error)
 | 
			
		||||
	ExportTemplate(ctx context.Context, resourceGroupName string, deploymentName string) (result resources.DeploymentExportResult, rerr *retry.Error)
 | 
			
		||||
	CreateOrUpdate(ctx context.Context, resourceGroupName string, managedClusterName string, parameters resources.Deployment, etag string) *retry.Error
 | 
			
		||||
	Delete(ctx context.Context, resourceGroupName string, deploymentName string) *retry.Error
 | 
			
		||||
}
 | 
			
		||||
@@ -1,18 +0,0 @@
 | 
			
		||||
/*
 | 
			
		||||
Copyright 2020 The Kubernetes Authors.
 | 
			
		||||
 | 
			
		||||
Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
you may not use this file except in compliance with the License.
 | 
			
		||||
You may obtain a copy of the License at
 | 
			
		||||
 | 
			
		||||
    http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
 | 
			
		||||
Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
See the License for the specific language governing permissions and
 | 
			
		||||
limitations under the License.
 | 
			
		||||
*/
 | 
			
		||||
 | 
			
		||||
// Package mockdeploymentclient implements the mock client for azure deployments.
 | 
			
		||||
package mockdeploymentclient // import "k8s.io/legacy-cloud-providers/azure/clients/deploymentclient/mockdeploymentclient"
 | 
			
		||||
@@ -1,129 +0,0 @@
 | 
			
		||||
//go:build !providerless
 | 
			
		||||
// +build !providerless
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
Copyright The Kubernetes Authors.
 | 
			
		||||
 | 
			
		||||
Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
you may not use this file except in compliance with the License.
 | 
			
		||||
You may obtain a copy of the License at
 | 
			
		||||
 | 
			
		||||
    http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
 | 
			
		||||
Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
See the License for the specific language governing permissions and
 | 
			
		||||
limitations under the License.
 | 
			
		||||
*/
 | 
			
		||||
 | 
			
		||||
// Code generated by MockGen. DO NOT EDIT.
 | 
			
		||||
// Source: interface.go
 | 
			
		||||
 | 
			
		||||
// Package mockdeploymentclient is a generated GoMock package.
 | 
			
		||||
package mockdeploymentclient
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	context "context"
 | 
			
		||||
	reflect "reflect"
 | 
			
		||||
 | 
			
		||||
	resources "github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2017-05-10/resources"
 | 
			
		||||
	gomock "github.com/golang/mock/gomock"
 | 
			
		||||
	retry "k8s.io/legacy-cloud-providers/azure/retry"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// MockInterface is a mock of Interface interface.
 | 
			
		||||
type MockInterface struct {
 | 
			
		||||
	ctrl     *gomock.Controller
 | 
			
		||||
	recorder *MockInterfaceMockRecorder
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// MockInterfaceMockRecorder is the mock recorder for MockInterface.
 | 
			
		||||
type MockInterfaceMockRecorder struct {
 | 
			
		||||
	mock *MockInterface
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// NewMockInterface creates a new mock instance.
 | 
			
		||||
func NewMockInterface(ctrl *gomock.Controller) *MockInterface {
 | 
			
		||||
	mock := &MockInterface{ctrl: ctrl}
 | 
			
		||||
	mock.recorder = &MockInterfaceMockRecorder{mock}
 | 
			
		||||
	return mock
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// EXPECT returns an object that allows the caller to indicate expected use.
 | 
			
		||||
func (m *MockInterface) EXPECT() *MockInterfaceMockRecorder {
 | 
			
		||||
	return m.recorder
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// CreateOrUpdate mocks base method.
 | 
			
		||||
func (m *MockInterface) CreateOrUpdate(ctx context.Context, resourceGroupName, managedClusterName string, parameters resources.Deployment, etag string) *retry.Error {
 | 
			
		||||
	m.ctrl.T.Helper()
 | 
			
		||||
	ret := m.ctrl.Call(m, "CreateOrUpdate", ctx, resourceGroupName, managedClusterName, parameters, etag)
 | 
			
		||||
	ret0, _ := ret[0].(*retry.Error)
 | 
			
		||||
	return ret0
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// CreateOrUpdate indicates an expected call of CreateOrUpdate.
 | 
			
		||||
func (mr *MockInterfaceMockRecorder) CreateOrUpdate(ctx, resourceGroupName, managedClusterName, parameters, etag interface{}) *gomock.Call {
 | 
			
		||||
	mr.mock.ctrl.T.Helper()
 | 
			
		||||
	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateOrUpdate", reflect.TypeOf((*MockInterface)(nil).CreateOrUpdate), ctx, resourceGroupName, managedClusterName, parameters, etag)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Delete mocks base method.
 | 
			
		||||
func (m *MockInterface) Delete(ctx context.Context, resourceGroupName, deploymentName string) *retry.Error {
 | 
			
		||||
	m.ctrl.T.Helper()
 | 
			
		||||
	ret := m.ctrl.Call(m, "Delete", ctx, resourceGroupName, deploymentName)
 | 
			
		||||
	ret0, _ := ret[0].(*retry.Error)
 | 
			
		||||
	return ret0
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Delete indicates an expected call of Delete.
 | 
			
		||||
func (mr *MockInterfaceMockRecorder) Delete(ctx, resourceGroupName, deploymentName interface{}) *gomock.Call {
 | 
			
		||||
	mr.mock.ctrl.T.Helper()
 | 
			
		||||
	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockInterface)(nil).Delete), ctx, resourceGroupName, deploymentName)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// ExportTemplate mocks base method.
 | 
			
		||||
func (m *MockInterface) ExportTemplate(ctx context.Context, resourceGroupName, deploymentName string) (resources.DeploymentExportResult, *retry.Error) {
 | 
			
		||||
	m.ctrl.T.Helper()
 | 
			
		||||
	ret := m.ctrl.Call(m, "ExportTemplate", ctx, resourceGroupName, deploymentName)
 | 
			
		||||
	ret0, _ := ret[0].(resources.DeploymentExportResult)
 | 
			
		||||
	ret1, _ := ret[1].(*retry.Error)
 | 
			
		||||
	return ret0, ret1
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// ExportTemplate indicates an expected call of ExportTemplate.
 | 
			
		||||
func (mr *MockInterfaceMockRecorder) ExportTemplate(ctx, resourceGroupName, deploymentName interface{}) *gomock.Call {
 | 
			
		||||
	mr.mock.ctrl.T.Helper()
 | 
			
		||||
	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ExportTemplate", reflect.TypeOf((*MockInterface)(nil).ExportTemplate), ctx, resourceGroupName, deploymentName)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Get mocks base method.
 | 
			
		||||
func (m *MockInterface) Get(ctx context.Context, resourceGroupName, deploymentName string) (resources.DeploymentExtended, *retry.Error) {
 | 
			
		||||
	m.ctrl.T.Helper()
 | 
			
		||||
	ret := m.ctrl.Call(m, "Get", ctx, resourceGroupName, deploymentName)
 | 
			
		||||
	ret0, _ := ret[0].(resources.DeploymentExtended)
 | 
			
		||||
	ret1, _ := ret[1].(*retry.Error)
 | 
			
		||||
	return ret0, ret1
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Get indicates an expected call of Get.
 | 
			
		||||
func (mr *MockInterfaceMockRecorder) Get(ctx, resourceGroupName, deploymentName interface{}) *gomock.Call {
 | 
			
		||||
	mr.mock.ctrl.T.Helper()
 | 
			
		||||
	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockInterface)(nil).Get), ctx, resourceGroupName, deploymentName)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// List mocks base method.
 | 
			
		||||
func (m *MockInterface) List(ctx context.Context, resourceGroupName string) ([]resources.DeploymentExtended, *retry.Error) {
 | 
			
		||||
	m.ctrl.T.Helper()
 | 
			
		||||
	ret := m.ctrl.Call(m, "List", ctx, resourceGroupName)
 | 
			
		||||
	ret0, _ := ret[0].([]resources.DeploymentExtended)
 | 
			
		||||
	ret1, _ := ret[1].(*retry.Error)
 | 
			
		||||
	return ret0, ret1
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// List indicates an expected call of List.
 | 
			
		||||
func (mr *MockInterfaceMockRecorder) List(ctx, resourceGroupName interface{}) *gomock.Call {
 | 
			
		||||
	mr.mock.ctrl.T.Helper()
 | 
			
		||||
	return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "List", reflect.TypeOf((*MockInterface)(nil).List), ctx, resourceGroupName)
 | 
			
		||||
}
 | 
			
		||||
@@ -1,453 +0,0 @@
 | 
			
		||||
//go:build !providerless
 | 
			
		||||
// +build !providerless
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
Copyright 2020 The Kubernetes Authors.
 | 
			
		||||
 | 
			
		||||
Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
you may not use this file except in compliance with the License.
 | 
			
		||||
You may obtain a copy of the License at
 | 
			
		||||
 | 
			
		||||
    http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
 | 
			
		||||
Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
See the License for the specific language governing permissions and
 | 
			
		||||
limitations under the License.
 | 
			
		||||
*/
 | 
			
		||||
 | 
			
		||||
package diskclient
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"context"
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"net/http"
 | 
			
		||||
	"strings"
 | 
			
		||||
	"time"
 | 
			
		||||
 | 
			
		||||
	"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-12-01/compute"
 | 
			
		||||
	"github.com/Azure/go-autorest/autorest"
 | 
			
		||||
	"github.com/Azure/go-autorest/autorest/azure"
 | 
			
		||||
 | 
			
		||||
	"k8s.io/client-go/util/flowcontrol"
 | 
			
		||||
	"k8s.io/klog/v2"
 | 
			
		||||
	azclients "k8s.io/legacy-cloud-providers/azure/clients"
 | 
			
		||||
	"k8s.io/legacy-cloud-providers/azure/clients/armclient"
 | 
			
		||||
	"k8s.io/legacy-cloud-providers/azure/metrics"
 | 
			
		||||
	"k8s.io/legacy-cloud-providers/azure/retry"
 | 
			
		||||
	"k8s.io/utils/pointer"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
var _ Interface = &Client{}
 | 
			
		||||
 | 
			
		||||
// Client implements Disk client Interface.
 | 
			
		||||
type Client struct {
 | 
			
		||||
	armClient      armclient.Interface
 | 
			
		||||
	subscriptionID string
 | 
			
		||||
 | 
			
		||||
	// Rate limiting configures.
 | 
			
		||||
	rateLimiterReader flowcontrol.RateLimiter
 | 
			
		||||
	rateLimiterWriter flowcontrol.RateLimiter
 | 
			
		||||
 | 
			
		||||
	// ARM throttling configures.
 | 
			
		||||
	RetryAfterReader time.Time
 | 
			
		||||
	RetryAfterWriter time.Time
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// New creates a new Disk client with ratelimiting.
 | 
			
		||||
func New(config *azclients.ClientConfig) *Client {
 | 
			
		||||
	baseURI := config.ResourceManagerEndpoint
 | 
			
		||||
	authorizer := config.Authorizer
 | 
			
		||||
	apiVersion := APIVersion
 | 
			
		||||
	if strings.EqualFold(config.CloudName, AzureStackCloudName) {
 | 
			
		||||
		apiVersion = AzureStackCloudAPIVersion
 | 
			
		||||
	}
 | 
			
		||||
	armClient := armclient.New(authorizer, baseURI, config.UserAgent, apiVersion, config.Location, config.Backoff)
 | 
			
		||||
	rateLimiterReader, rateLimiterWriter := azclients.NewRateLimiter(config.RateLimitConfig)
 | 
			
		||||
 | 
			
		||||
	klog.V(2).Infof("Azure DisksClient (read ops) using rate limit config: QPS=%g, bucket=%d",
 | 
			
		||||
		config.RateLimitConfig.CloudProviderRateLimitQPS,
 | 
			
		||||
		config.RateLimitConfig.CloudProviderRateLimitBucket)
 | 
			
		||||
	klog.V(2).Infof("Azure DisksClient (write ops) using rate limit config: QPS=%g, bucket=%d",
 | 
			
		||||
		config.RateLimitConfig.CloudProviderRateLimitQPSWrite,
 | 
			
		||||
		config.RateLimitConfig.CloudProviderRateLimitBucketWrite)
 | 
			
		||||
 | 
			
		||||
	client := &Client{
 | 
			
		||||
		armClient:         armClient,
 | 
			
		||||
		rateLimiterReader: rateLimiterReader,
 | 
			
		||||
		rateLimiterWriter: rateLimiterWriter,
 | 
			
		||||
		subscriptionID:    config.SubscriptionID,
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return client
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Get gets a Disk.
 | 
			
		||||
func (c *Client) Get(ctx context.Context, resourceGroupName string, diskName string) (compute.Disk, *retry.Error) {
 | 
			
		||||
	mc := metrics.NewMetricContext("disks", "get", resourceGroupName, c.subscriptionID, "")
 | 
			
		||||
 | 
			
		||||
	// Report errors if the client is rate limited.
 | 
			
		||||
	if !c.rateLimiterReader.TryAccept() {
 | 
			
		||||
		mc.RateLimitedCount()
 | 
			
		||||
		return compute.Disk{}, retry.GetRateLimitError(false, "GetDisk")
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Report errors if the client is throttled.
 | 
			
		||||
	if c.RetryAfterReader.After(time.Now()) {
 | 
			
		||||
		mc.ThrottledCount()
 | 
			
		||||
		rerr := retry.GetThrottlingError("GetDisk", "client throttled", c.RetryAfterReader)
 | 
			
		||||
		return compute.Disk{}, rerr
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	result, rerr := c.getDisk(ctx, resourceGroupName, diskName)
 | 
			
		||||
	mc.Observe(rerr.Error())
 | 
			
		||||
	if rerr != nil {
 | 
			
		||||
		if rerr.IsThrottled() {
 | 
			
		||||
			// Update RetryAfterReader so that no more requests would be sent until RetryAfter expires.
 | 
			
		||||
			c.RetryAfterReader = rerr.RetryAfter
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		return result, rerr
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return result, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// getDisk gets a Disk.
 | 
			
		||||
func (c *Client) getDisk(ctx context.Context, resourceGroupName string, diskName string) (compute.Disk, *retry.Error) {
 | 
			
		||||
	resourceID := armclient.GetResourceID(
 | 
			
		||||
		c.subscriptionID,
 | 
			
		||||
		resourceGroupName,
 | 
			
		||||
		"Microsoft.Compute/disks",
 | 
			
		||||
		diskName,
 | 
			
		||||
	)
 | 
			
		||||
	result := compute.Disk{}
 | 
			
		||||
 | 
			
		||||
	response, rerr := c.armClient.GetResource(ctx, resourceID, "")
 | 
			
		||||
	defer c.armClient.CloseResponse(ctx, response)
 | 
			
		||||
	if rerr != nil {
 | 
			
		||||
		klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "disk.get.request", resourceID, rerr.Error())
 | 
			
		||||
		return result, rerr
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	err := autorest.Respond(
 | 
			
		||||
		response,
 | 
			
		||||
		azure.WithErrorUnlessStatusCode(http.StatusOK),
 | 
			
		||||
		autorest.ByUnmarshallingJSON(&result))
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "disk.get.respond", resourceID, err)
 | 
			
		||||
		return result, retry.GetError(response, err)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	result.Response = autorest.Response{Response: response}
 | 
			
		||||
	return result, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// CreateOrUpdate creates or updates a Disk.
 | 
			
		||||
func (c *Client) CreateOrUpdate(ctx context.Context, resourceGroupName string, diskName string, diskParameter compute.Disk) *retry.Error {
 | 
			
		||||
	mc := metrics.NewMetricContext("disks", "create_or_update", resourceGroupName, c.subscriptionID, "")
 | 
			
		||||
 | 
			
		||||
	// Report errors if the client is rate limited.
 | 
			
		||||
	if !c.rateLimiterWriter.TryAccept() {
 | 
			
		||||
		mc.RateLimitedCount()
 | 
			
		||||
		return retry.GetRateLimitError(true, "DiskCreateOrUpdate")
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Report errors if the client is throttled.
 | 
			
		||||
	if c.RetryAfterWriter.After(time.Now()) {
 | 
			
		||||
		mc.ThrottledCount()
 | 
			
		||||
		rerr := retry.GetThrottlingError("DiskCreateOrUpdate", "client throttled", c.RetryAfterWriter)
 | 
			
		||||
		return rerr
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	rerr := c.createOrUpdateDisk(ctx, resourceGroupName, diskName, diskParameter)
 | 
			
		||||
	mc.Observe(rerr.Error())
 | 
			
		||||
	if rerr != nil {
 | 
			
		||||
		if rerr.IsThrottled() {
 | 
			
		||||
			// Update RetryAfterReader so that no more requests would be sent until RetryAfter expires.
 | 
			
		||||
			c.RetryAfterWriter = rerr.RetryAfter
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		return rerr
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// createOrUpdateDisk creates or updates a Disk.
 | 
			
		||||
func (c *Client) createOrUpdateDisk(ctx context.Context, resourceGroupName string, diskName string, diskParameter compute.Disk) *retry.Error {
 | 
			
		||||
	resourceID := armclient.GetResourceID(
 | 
			
		||||
		c.subscriptionID,
 | 
			
		||||
		resourceGroupName,
 | 
			
		||||
		"Microsoft.Compute/disks",
 | 
			
		||||
		diskName,
 | 
			
		||||
	)
 | 
			
		||||
 | 
			
		||||
	response, rerr := c.armClient.PutResource(ctx, resourceID, diskParameter)
 | 
			
		||||
	defer c.armClient.CloseResponse(ctx, response)
 | 
			
		||||
	if rerr != nil {
 | 
			
		||||
		klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "disk.put.request", resourceID, rerr.Error())
 | 
			
		||||
		return rerr
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if response != nil && response.StatusCode != http.StatusNoContent {
 | 
			
		||||
		_, rerr = c.createOrUpdateResponder(response)
 | 
			
		||||
		if rerr != nil {
 | 
			
		||||
			klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "disk.put.respond", resourceID, rerr.Error())
 | 
			
		||||
			return rerr
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (c *Client) createOrUpdateResponder(resp *http.Response) (*compute.Disk, *retry.Error) {
 | 
			
		||||
	result := &compute.Disk{}
 | 
			
		||||
	err := autorest.Respond(
 | 
			
		||||
		resp,
 | 
			
		||||
		azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),
 | 
			
		||||
		autorest.ByUnmarshallingJSON(&result))
 | 
			
		||||
	result.Response = autorest.Response{Response: resp}
 | 
			
		||||
	return result, retry.GetError(resp, err)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Update creates or updates a Disk.
 | 
			
		||||
func (c *Client) Update(ctx context.Context, resourceGroupName string, diskName string, diskParameter compute.DiskUpdate) *retry.Error {
 | 
			
		||||
	mc := metrics.NewMetricContext("disks", "update", resourceGroupName, c.subscriptionID, "")
 | 
			
		||||
 | 
			
		||||
	// Report errors if the client is rate limited.
 | 
			
		||||
	if !c.rateLimiterWriter.TryAccept() {
 | 
			
		||||
		mc.RateLimitedCount()
 | 
			
		||||
		return retry.GetRateLimitError(true, "DiskUpdate")
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Report errors if the client is throttled.
 | 
			
		||||
	if c.RetryAfterWriter.After(time.Now()) {
 | 
			
		||||
		mc.ThrottledCount()
 | 
			
		||||
		rerr := retry.GetThrottlingError("DiskUpdate", "client throttled", c.RetryAfterWriter)
 | 
			
		||||
		return rerr
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	rerr := c.updateDisk(ctx, resourceGroupName, diskName, diskParameter)
 | 
			
		||||
	mc.Observe(rerr.Error())
 | 
			
		||||
	if rerr != nil {
 | 
			
		||||
		if rerr.IsThrottled() {
 | 
			
		||||
			// Update RetryAfterReader so that no more requests would be sent until RetryAfter expires.
 | 
			
		||||
			c.RetryAfterWriter = rerr.RetryAfter
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		return rerr
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// updateDisk updates a Disk.
 | 
			
		||||
func (c *Client) updateDisk(ctx context.Context, resourceGroupName string, diskName string, diskParameter compute.DiskUpdate) *retry.Error {
 | 
			
		||||
	resourceID := armclient.GetResourceID(
 | 
			
		||||
		c.subscriptionID,
 | 
			
		||||
		resourceGroupName,
 | 
			
		||||
		"Microsoft.Compute/disks",
 | 
			
		||||
		diskName,
 | 
			
		||||
	)
 | 
			
		||||
 | 
			
		||||
	response, rerr := c.armClient.PatchResource(ctx, resourceID, diskParameter)
 | 
			
		||||
	defer c.armClient.CloseResponse(ctx, response)
 | 
			
		||||
	if rerr != nil {
 | 
			
		||||
		klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "disk.put.request", resourceID, rerr.Error())
 | 
			
		||||
		return rerr
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if response != nil && response.StatusCode != http.StatusNoContent {
 | 
			
		||||
		_, rerr = c.updateResponder(response)
 | 
			
		||||
		if rerr != nil {
 | 
			
		||||
			klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "disk.put.respond", resourceID, rerr.Error())
 | 
			
		||||
			return rerr
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (c *Client) updateResponder(resp *http.Response) (*compute.Disk, *retry.Error) {
 | 
			
		||||
	result := &compute.Disk{}
 | 
			
		||||
	err := autorest.Respond(
 | 
			
		||||
		resp,
 | 
			
		||||
		azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),
 | 
			
		||||
		autorest.ByUnmarshallingJSON(&result))
 | 
			
		||||
	result.Response = autorest.Response{Response: resp}
 | 
			
		||||
	return result, retry.GetError(resp, err)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Delete deletes a Disk by name.
 | 
			
		||||
func (c *Client) Delete(ctx context.Context, resourceGroupName string, diskName string) *retry.Error {
 | 
			
		||||
	mc := metrics.NewMetricContext("disks", "delete", resourceGroupName, c.subscriptionID, "")
 | 
			
		||||
 | 
			
		||||
	// Report errors if the client is rate limited.
 | 
			
		||||
	if !c.rateLimiterWriter.TryAccept() {
 | 
			
		||||
		mc.RateLimitedCount()
 | 
			
		||||
		return retry.GetRateLimitError(true, "DiskDelete")
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	// Report errors if the client is throttled.
 | 
			
		||||
	if c.RetryAfterWriter.After(time.Now()) {
 | 
			
		||||
		mc.ThrottledCount()
 | 
			
		||||
		rerr := retry.GetThrottlingError("DiskDelete", "client throttled", c.RetryAfterWriter)
 | 
			
		||||
		return rerr
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	rerr := c.deleteDisk(ctx, resourceGroupName, diskName)
 | 
			
		||||
	mc.Observe(rerr.Error())
 | 
			
		||||
	if rerr != nil {
 | 
			
		||||
		if rerr.IsThrottled() {
 | 
			
		||||
			// Update RetryAfterReader so that no more requests would be sent until RetryAfter expires.
 | 
			
		||||
			c.RetryAfterWriter = rerr.RetryAfter
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		return rerr
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// deleteDisk deletes a PublicIPAddress by name.
 | 
			
		||||
func (c *Client) deleteDisk(ctx context.Context, resourceGroupName string, diskName string) *retry.Error {
 | 
			
		||||
	resourceID := armclient.GetResourceID(
 | 
			
		||||
		c.subscriptionID,
 | 
			
		||||
		resourceGroupName,
 | 
			
		||||
		"Microsoft.Compute/disks",
 | 
			
		||||
		diskName,
 | 
			
		||||
	)
 | 
			
		||||
 | 
			
		||||
	return c.armClient.DeleteResource(ctx, resourceID, "")
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// ListByResourceGroup lists all the disks under a resource group.
 | 
			
		||||
func (c *Client) ListByResourceGroup(ctx context.Context, resourceGroupName string) ([]compute.Disk, *retry.Error) {
 | 
			
		||||
	resourceID := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Compute/disks",
 | 
			
		||||
		autorest.Encode("path", c.subscriptionID),
 | 
			
		||||
		autorest.Encode("path", resourceGroupName))
 | 
			
		||||
 | 
			
		||||
	result := make([]compute.Disk, 0)
 | 
			
		||||
	page := &DiskListPage{}
 | 
			
		||||
	page.fn = c.listNextResults
 | 
			
		||||
 | 
			
		||||
	resp, rerr := c.armClient.GetResource(ctx, resourceID, "")
 | 
			
		||||
	defer c.armClient.CloseResponse(ctx, resp)
 | 
			
		||||
	if rerr != nil {
 | 
			
		||||
		klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "disk.list.request", resourceID, rerr.Error())
 | 
			
		||||
		return result, rerr
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	var err error
 | 
			
		||||
	page.dl, err = c.listResponder(resp)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "disk.list.respond", resourceID, err)
 | 
			
		||||
		return result, retry.GetError(resp, err)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	for {
 | 
			
		||||
		result = append(result, page.Values()...)
 | 
			
		||||
 | 
			
		||||
		// Abort the loop when there's no nextLink in the response.
 | 
			
		||||
		if pointer.StringDeref(page.Response().NextLink, "") == "" {
 | 
			
		||||
			break
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		if err = page.NextWithContext(ctx); err != nil {
 | 
			
		||||
			klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "disk.list.next", resourceID, err)
 | 
			
		||||
			return result, retry.GetError(page.Response().Response.Response, err)
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return result, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// listNextResults retrieves the next set of results, if any.
 | 
			
		||||
func (c *Client) listNextResults(ctx context.Context, lastResults compute.DiskList) (result compute.DiskList, err error) {
 | 
			
		||||
	req, err := c.diskListPreparer(ctx, lastResults)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return result, autorest.NewErrorWithError(err, "diskclient", "listNextResults", nil, "Failure preparing next results request")
 | 
			
		||||
	}
 | 
			
		||||
	if req == nil {
 | 
			
		||||
		return
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	resp, rerr := c.armClient.Send(ctx, req)
 | 
			
		||||
	defer c.armClient.CloseResponse(ctx, resp)
 | 
			
		||||
	if rerr != nil {
 | 
			
		||||
		result.Response = autorest.Response{Response: resp}
 | 
			
		||||
		return result, autorest.NewErrorWithError(rerr.Error(), "diskclient", "listNextResults", resp, "Failure sending next results request")
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	result, err = c.listResponder(resp)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		err = autorest.NewErrorWithError(err, "diskclient", "listNextResults", resp, "Failure responding to next results request")
 | 
			
		||||
	}
 | 
			
		||||
	return
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// listResponder handles the response to the List request. The method always
 | 
			
		||||
// closes the http.Response Body.
 | 
			
		||||
func (c *Client) listResponder(resp *http.Response) (result compute.DiskList, err error) {
 | 
			
		||||
	err = autorest.Respond(
 | 
			
		||||
		resp,
 | 
			
		||||
		azure.WithErrorUnlessStatusCode(http.StatusOK),
 | 
			
		||||
		autorest.ByUnmarshallingJSON(&result),
 | 
			
		||||
		autorest.ByClosing())
 | 
			
		||||
	result.Response = autorest.Response{Response: resp}
 | 
			
		||||
	return
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func (c *Client) diskListPreparer(ctx context.Context, lr compute.DiskList) (*http.Request, error) {
 | 
			
		||||
	if lr.NextLink == nil || len(pointer.StringDeref(lr.NextLink, "")) < 1 {
 | 
			
		||||
		return nil, nil
 | 
			
		||||
	}
 | 
			
		||||
	return autorest.Prepare((&http.Request{}).WithContext(ctx),
 | 
			
		||||
		autorest.AsJSON(),
 | 
			
		||||
		autorest.AsGet(),
 | 
			
		||||
		autorest.WithBaseURL(pointer.StringDeref(lr.NextLink, "")))
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// DiskListPage contains a page of Disk values.
 | 
			
		||||
type DiskListPage struct {
 | 
			
		||||
	fn func(context.Context, compute.DiskList) (compute.DiskList, error)
 | 
			
		||||
	dl compute.DiskList
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// NextWithContext advances to the next page of values.  If there was an error making
 | 
			
		||||
// the request the page does not advance and the error is returned.
 | 
			
		||||
func (page *DiskListPage) NextWithContext(ctx context.Context) (err error) {
 | 
			
		||||
	next, err := page.fn(ctx, page.dl)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return err
 | 
			
		||||
	}
 | 
			
		||||
	page.dl = next
 | 
			
		||||
	return nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Next advances to the next page of values.  If there was an error making
 | 
			
		||||
// the request the page does not advance and the error is returned.
 | 
			
		||||
// Deprecated: Use NextWithContext() instead.
 | 
			
		||||
func (page *DiskListPage) Next() error {
 | 
			
		||||
	return page.NextWithContext(context.Background())
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// NotDone returns true if the page enumeration should be started or is not yet complete.
 | 
			
		||||
func (page DiskListPage) NotDone() bool {
 | 
			
		||||
	return !page.dl.IsEmpty()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Response returns the raw server response from the last page request.
 | 
			
		||||
func (page DiskListPage) Response() compute.DiskList {
 | 
			
		||||
	return page.dl
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Values returns the slice of values for the current page or nil if there are no values.
 | 
			
		||||
func (page DiskListPage) Values() []compute.Disk {
 | 
			
		||||
	if page.dl.IsEmpty() {
 | 
			
		||||
		return nil
 | 
			
		||||
	}
 | 
			
		||||
	return *page.dl.Value
 | 
			
		||||
}
 | 
			
		||||
@@ -1,247 +0,0 @@
 | 
			
		||||
//go:build !providerless
 | 
			
		||||
// +build !providerless
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
Copyright 2020 The Kubernetes Authors.
 | 
			
		||||
 | 
			
		||||
Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
you may not use this file except in compliance with the License.
 | 
			
		||||
You may obtain a copy of the License at
 | 
			
		||||
 | 
			
		||||
    http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
 | 
			
		||||
Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
See the License for the specific language governing permissions and
 | 
			
		||||
limitations under the License.
 | 
			
		||||
*/
 | 
			
		||||
 | 
			
		||||
package diskclient
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"bytes"
 | 
			
		||||
	"context"
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"io/ioutil"
 | 
			
		||||
	"net/http"
 | 
			
		||||
	"testing"
 | 
			
		||||
	"time"
 | 
			
		||||
 | 
			
		||||
	"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-12-01/compute"
 | 
			
		||||
	"github.com/Azure/go-autorest/autorest"
 | 
			
		||||
	"github.com/golang/mock/gomock"
 | 
			
		||||
	"github.com/stretchr/testify/assert"
 | 
			
		||||
 | 
			
		||||
	azclients "k8s.io/legacy-cloud-providers/azure/clients"
 | 
			
		||||
	"k8s.io/legacy-cloud-providers/azure/clients/armclient"
 | 
			
		||||
	"k8s.io/legacy-cloud-providers/azure/clients/armclient/mockarmclient"
 | 
			
		||||
	"k8s.io/legacy-cloud-providers/azure/retry"
 | 
			
		||||
	"k8s.io/utils/pointer"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
func TestNew(t *testing.T) {
 | 
			
		||||
	config := &azclients.ClientConfig{
 | 
			
		||||
		SubscriptionID:          "sub",
 | 
			
		||||
		ResourceManagerEndpoint: "endpoint",
 | 
			
		||||
		Location:                "eastus",
 | 
			
		||||
		RateLimitConfig: &azclients.RateLimitConfig{
 | 
			
		||||
			CloudProviderRateLimit:            true,
 | 
			
		||||
			CloudProviderRateLimitQPS:         0.5,
 | 
			
		||||
			CloudProviderRateLimitBucket:      1,
 | 
			
		||||
			CloudProviderRateLimitQPSWrite:    0.5,
 | 
			
		||||
			CloudProviderRateLimitBucketWrite: 1,
 | 
			
		||||
		},
 | 
			
		||||
		Backoff: &retry.Backoff{Steps: 1},
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	diskClient := New(config)
 | 
			
		||||
	assert.Equal(t, "sub", diskClient.subscriptionID)
 | 
			
		||||
	assert.NotEmpty(t, diskClient.rateLimiterReader)
 | 
			
		||||
	assert.NotEmpty(t, diskClient.rateLimiterWriter)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestGetNotFound(t *testing.T) {
 | 
			
		||||
	ctrl := gomock.NewController(t)
 | 
			
		||||
	defer ctrl.Finish()
 | 
			
		||||
 | 
			
		||||
	resourceID := "/subscriptions/subscriptionID/resourceGroups/rg/providers/Microsoft.Compute/disks/disk1"
 | 
			
		||||
	response := &http.Response{
 | 
			
		||||
		StatusCode: http.StatusNotFound,
 | 
			
		||||
		Body:       ioutil.NopCloser(bytes.NewReader([]byte("{}"))),
 | 
			
		||||
	}
 | 
			
		||||
	armClient := mockarmclient.NewMockInterface(ctrl)
 | 
			
		||||
	armClient.EXPECT().GetResource(gomock.Any(), resourceID, "").Return(response, nil).Times(1)
 | 
			
		||||
	armClient.EXPECT().CloseResponse(gomock.Any(), gomock.Any()).Times(1)
 | 
			
		||||
 | 
			
		||||
	diskClient := getTestDiskClient(armClient)
 | 
			
		||||
	expected := compute.Disk{Response: autorest.Response{}}
 | 
			
		||||
	result, rerr := diskClient.Get(context.TODO(), "rg", "disk1")
 | 
			
		||||
	assert.Equal(t, expected, result)
 | 
			
		||||
	assert.NotNil(t, rerr)
 | 
			
		||||
	assert.Equal(t, http.StatusNotFound, rerr.HTTPStatusCode)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestGetInternalError(t *testing.T) {
 | 
			
		||||
	ctrl := gomock.NewController(t)
 | 
			
		||||
	defer ctrl.Finish()
 | 
			
		||||
 | 
			
		||||
	resourceID := "/subscriptions/subscriptionID/resourceGroups/rg/providers/Microsoft.Compute/disks/disk1"
 | 
			
		||||
	response := &http.Response{
 | 
			
		||||
		StatusCode: http.StatusInternalServerError,
 | 
			
		||||
		Body:       ioutil.NopCloser(bytes.NewReader([]byte("{}"))),
 | 
			
		||||
	}
 | 
			
		||||
	armClient := mockarmclient.NewMockInterface(ctrl)
 | 
			
		||||
	armClient.EXPECT().GetResource(gomock.Any(), resourceID, "").Return(response, nil).Times(1)
 | 
			
		||||
	armClient.EXPECT().CloseResponse(gomock.Any(), gomock.Any()).Times(1)
 | 
			
		||||
 | 
			
		||||
	diskClient := getTestDiskClient(armClient)
 | 
			
		||||
	expected := compute.Disk{Response: autorest.Response{}}
 | 
			
		||||
	result, rerr := diskClient.Get(context.TODO(), "rg", "disk1")
 | 
			
		||||
	assert.Equal(t, expected, result)
 | 
			
		||||
	assert.NotNil(t, rerr)
 | 
			
		||||
	assert.Equal(t, http.StatusInternalServerError, rerr.HTTPStatusCode)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestGetThrottle(t *testing.T) {
 | 
			
		||||
	ctrl := gomock.NewController(t)
 | 
			
		||||
	defer ctrl.Finish()
 | 
			
		||||
 | 
			
		||||
	resourceID := "/subscriptions/subscriptionID/resourceGroups/rg/providers/Microsoft.Compute/disks/disk1"
 | 
			
		||||
	response := &http.Response{
 | 
			
		||||
		StatusCode: http.StatusTooManyRequests,
 | 
			
		||||
		Body:       ioutil.NopCloser(bytes.NewReader([]byte("{}"))),
 | 
			
		||||
	}
 | 
			
		||||
	throttleErr := &retry.Error{
 | 
			
		||||
		HTTPStatusCode: http.StatusTooManyRequests,
 | 
			
		||||
		RawError:       fmt.Errorf("error"),
 | 
			
		||||
		Retriable:      true,
 | 
			
		||||
		RetryAfter:     time.Unix(100, 0),
 | 
			
		||||
	}
 | 
			
		||||
	armClient := mockarmclient.NewMockInterface(ctrl)
 | 
			
		||||
	armClient.EXPECT().GetResource(gomock.Any(), resourceID, "").Return(response, throttleErr).Times(1)
 | 
			
		||||
	armClient.EXPECT().CloseResponse(gomock.Any(), gomock.Any()).Times(1)
 | 
			
		||||
 | 
			
		||||
	diskClient := getTestDiskClient(armClient)
 | 
			
		||||
	result, rerr := diskClient.Get(context.TODO(), "rg", "disk1")
 | 
			
		||||
	assert.Empty(t, result)
 | 
			
		||||
	assert.Equal(t, throttleErr, rerr)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestCreateOrUpdate(t *testing.T) {
 | 
			
		||||
	ctrl := gomock.NewController(t)
 | 
			
		||||
	defer ctrl.Finish()
 | 
			
		||||
 | 
			
		||||
	disk := getTestDisk("disk1")
 | 
			
		||||
	armClient := mockarmclient.NewMockInterface(ctrl)
 | 
			
		||||
	response := &http.Response{
 | 
			
		||||
		StatusCode: http.StatusOK,
 | 
			
		||||
		Body:       ioutil.NopCloser(bytes.NewReader([]byte(""))),
 | 
			
		||||
	}
 | 
			
		||||
	armClient.EXPECT().PutResource(gomock.Any(), pointer.StringDeref(disk.ID, ""), disk).Return(response, nil).Times(1)
 | 
			
		||||
	armClient.EXPECT().CloseResponse(gomock.Any(), gomock.Any()).Times(1)
 | 
			
		||||
 | 
			
		||||
	diskClient := getTestDiskClient(armClient)
 | 
			
		||||
	rerr := diskClient.CreateOrUpdate(context.TODO(), "rg", "disk1", disk)
 | 
			
		||||
	assert.Nil(t, rerr)
 | 
			
		||||
 | 
			
		||||
	response = &http.Response{
 | 
			
		||||
		StatusCode: http.StatusTooManyRequests,
 | 
			
		||||
		Body:       ioutil.NopCloser(bytes.NewReader([]byte("{}"))),
 | 
			
		||||
	}
 | 
			
		||||
	throttleErr := &retry.Error{
 | 
			
		||||
		HTTPStatusCode: http.StatusTooManyRequests,
 | 
			
		||||
		RawError:       fmt.Errorf("error"),
 | 
			
		||||
		Retriable:      true,
 | 
			
		||||
		RetryAfter:     time.Unix(100, 0),
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	armClient.EXPECT().PutResource(gomock.Any(), pointer.StringDeref(disk.ID, ""), disk).Return(response, throttleErr).Times(1)
 | 
			
		||||
	armClient.EXPECT().CloseResponse(gomock.Any(), gomock.Any()).Times(1)
 | 
			
		||||
	rerr = diskClient.CreateOrUpdate(context.TODO(), "rg", "disk1", disk)
 | 
			
		||||
	assert.Equal(t, throttleErr, rerr)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestUpdate(t *testing.T) {
 | 
			
		||||
	ctrl := gomock.NewController(t)
 | 
			
		||||
	defer ctrl.Finish()
 | 
			
		||||
 | 
			
		||||
	resourceID := "/subscriptions/subscriptionID/resourceGroups/rg/providers/Microsoft.Compute/disks/disk1"
 | 
			
		||||
	diskUpdate := getTestDiskUpdate()
 | 
			
		||||
	armClient := mockarmclient.NewMockInterface(ctrl)
 | 
			
		||||
	response := &http.Response{
 | 
			
		||||
		StatusCode: http.StatusOK,
 | 
			
		||||
		Body:       ioutil.NopCloser(bytes.NewReader([]byte(""))),
 | 
			
		||||
	}
 | 
			
		||||
	armClient.EXPECT().PatchResource(gomock.Any(), resourceID, diskUpdate).Return(response, nil).Times(1)
 | 
			
		||||
	armClient.EXPECT().CloseResponse(gomock.Any(), gomock.Any()).Times(1)
 | 
			
		||||
 | 
			
		||||
	diskClient := getTestDiskClient(armClient)
 | 
			
		||||
	rerr := diskClient.Update(context.TODO(), "rg", "disk1", diskUpdate)
 | 
			
		||||
	assert.Nil(t, rerr)
 | 
			
		||||
 | 
			
		||||
	response = &http.Response{
 | 
			
		||||
		StatusCode: http.StatusTooManyRequests,
 | 
			
		||||
		Body:       ioutil.NopCloser(bytes.NewReader([]byte("{}"))),
 | 
			
		||||
	}
 | 
			
		||||
	throttleErr := &retry.Error{
 | 
			
		||||
		HTTPStatusCode: http.StatusTooManyRequests,
 | 
			
		||||
		RawError:       fmt.Errorf("error"),
 | 
			
		||||
		Retriable:      true,
 | 
			
		||||
		RetryAfter:     time.Unix(100, 0),
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	armClient.EXPECT().PatchResource(gomock.Any(), resourceID, diskUpdate).Return(response, throttleErr).Times(1)
 | 
			
		||||
	armClient.EXPECT().CloseResponse(gomock.Any(), gomock.Any()).Times(1)
 | 
			
		||||
	rerr = diskClient.Update(context.TODO(), "rg", "disk1", diskUpdate)
 | 
			
		||||
	assert.Equal(t, throttleErr, rerr)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func getTestDiskUpdate() compute.DiskUpdate {
 | 
			
		||||
	return compute.DiskUpdate{
 | 
			
		||||
		DiskUpdateProperties: &compute.DiskUpdateProperties{
 | 
			
		||||
			DiskSizeGB: pointer.Int32(100),
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestDelete(t *testing.T) {
 | 
			
		||||
	ctrl := gomock.NewController(t)
 | 
			
		||||
	defer ctrl.Finish()
 | 
			
		||||
 | 
			
		||||
	r := getTestDisk("disk1")
 | 
			
		||||
	armClient := mockarmclient.NewMockInterface(ctrl)
 | 
			
		||||
	armClient.EXPECT().DeleteResource(gomock.Any(), pointer.StringDeref(r.ID, ""), "").Return(nil).Times(1)
 | 
			
		||||
 | 
			
		||||
	diskClient := getTestDiskClient(armClient)
 | 
			
		||||
	rerr := diskClient.Delete(context.TODO(), "rg", "disk1")
 | 
			
		||||
	assert.Nil(t, rerr)
 | 
			
		||||
 | 
			
		||||
	throttleErr := &retry.Error{
 | 
			
		||||
		HTTPStatusCode: http.StatusTooManyRequests,
 | 
			
		||||
		RawError:       fmt.Errorf("error"),
 | 
			
		||||
		Retriable:      true,
 | 
			
		||||
		RetryAfter:     time.Unix(100, 0),
 | 
			
		||||
	}
 | 
			
		||||
	armClient.EXPECT().DeleteResource(gomock.Any(), pointer.StringDeref(r.ID, ""), "").Return(throttleErr).Times(1)
 | 
			
		||||
	rerr = diskClient.Delete(context.TODO(), "rg", "disk1")
 | 
			
		||||
	assert.Equal(t, throttleErr, rerr)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func getTestDisk(name string) compute.Disk {
 | 
			
		||||
	return compute.Disk{
 | 
			
		||||
		ID:       pointer.String(fmt.Sprintf("/subscriptions/subscriptionID/resourceGroups/rg/providers/Microsoft.Compute/disks/%s", name)),
 | 
			
		||||
		Name:     pointer.String(name),
 | 
			
		||||
		Location: pointer.String("eastus"),
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func getTestDiskClient(armClient armclient.Interface) *Client {
 | 
			
		||||
	rateLimiterReader, rateLimiterWriter := azclients.NewRateLimiter(&azclients.RateLimitConfig{})
 | 
			
		||||
	return &Client{
 | 
			
		||||
		armClient:         armClient,
 | 
			
		||||
		subscriptionID:    "subscriptionID",
 | 
			
		||||
		rateLimiterReader: rateLimiterReader,
 | 
			
		||||
		rateLimiterWriter: rateLimiterWriter,
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
@@ -1,18 +0,0 @@
 | 
			
		||||
/*
 | 
			
		||||
Copyright 2020 The Kubernetes Authors.
 | 
			
		||||
 | 
			
		||||
Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
you may not use this file except in compliance with the License.
 | 
			
		||||
You may obtain a copy of the License at
 | 
			
		||||
 | 
			
		||||
    http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
 | 
			
		||||
Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
See the License for the specific language governing permissions and
 | 
			
		||||
limitations under the License.
 | 
			
		||||
*/
 | 
			
		||||
 | 
			
		||||
// Package diskclient implements the client for Disks.
 | 
			
		||||
package diskclient // import "k8s.io/legacy-cloud-providers/azure/clients/diskclient"
 | 
			
		||||
Some files were not shown because too many files have changed in this diff Show More
		Reference in New Issue
	
	Block a user