get rid of vendor as it gives gitea a lot of hiccups
This commit is contained in:
parent
f3160916d7
commit
294dd103ed
4776 changed files with 0 additions and 1766100 deletions
202
vendor/cloud.google.com/go/LICENSE
generated
vendored
202
vendor/cloud.google.com/go/LICENSE
generated
vendored
|
|
@ -1,202 +0,0 @@
|
||||||
|
|
||||||
Apache License
|
|
||||||
Version 2.0, January 2004
|
|
||||||
http://www.apache.org/licenses/
|
|
||||||
|
|
||||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
|
||||||
|
|
||||||
1. Definitions.
|
|
||||||
|
|
||||||
"License" shall mean the terms and conditions for use, reproduction,
|
|
||||||
and distribution as defined by Sections 1 through 9 of this document.
|
|
||||||
|
|
||||||
"Licensor" shall mean the copyright owner or entity authorized by
|
|
||||||
the copyright owner that is granting the License.
|
|
||||||
|
|
||||||
"Legal Entity" shall mean the union of the acting entity and all
|
|
||||||
other entities that control, are controlled by, or are under common
|
|
||||||
control with that entity. For the purposes of this definition,
|
|
||||||
"control" means (i) the power, direct or indirect, to cause the
|
|
||||||
direction or management of such entity, whether by contract or
|
|
||||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
|
||||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
|
||||||
|
|
||||||
"You" (or "Your") shall mean an individual or Legal Entity
|
|
||||||
exercising permissions granted by this License.
|
|
||||||
|
|
||||||
"Source" form shall mean the preferred form for making modifications,
|
|
||||||
including but not limited to software source code, documentation
|
|
||||||
source, and configuration files.
|
|
||||||
|
|
||||||
"Object" form shall mean any form resulting from mechanical
|
|
||||||
transformation or translation of a Source form, including but
|
|
||||||
not limited to compiled object code, generated documentation,
|
|
||||||
and conversions to other media types.
|
|
||||||
|
|
||||||
"Work" shall mean the work of authorship, whether in Source or
|
|
||||||
Object form, made available under the License, as indicated by a
|
|
||||||
copyright notice that is included in or attached to the work
|
|
||||||
(an example is provided in the Appendix below).
|
|
||||||
|
|
||||||
"Derivative Works" shall mean any work, whether in Source or Object
|
|
||||||
form, that is based on (or derived from) the Work and for which the
|
|
||||||
editorial revisions, annotations, elaborations, or other modifications
|
|
||||||
represent, as a whole, an original work of authorship. For the purposes
|
|
||||||
of this License, Derivative Works shall not include works that remain
|
|
||||||
separable from, or merely link (or bind by name) to the interfaces of,
|
|
||||||
the Work and Derivative Works thereof.
|
|
||||||
|
|
||||||
"Contribution" shall mean any work of authorship, including
|
|
||||||
the original version of the Work and any modifications or additions
|
|
||||||
to that Work or Derivative Works thereof, that is intentionally
|
|
||||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
|
||||||
or by an individual or Legal Entity authorized to submit on behalf of
|
|
||||||
the copyright owner. For the purposes of this definition, "submitted"
|
|
||||||
means any form of electronic, verbal, or written communication sent
|
|
||||||
to the Licensor or its representatives, including but not limited to
|
|
||||||
communication on electronic mailing lists, source code control systems,
|
|
||||||
and issue tracking systems that are managed by, or on behalf of, the
|
|
||||||
Licensor for the purpose of discussing and improving the Work, but
|
|
||||||
excluding communication that is conspicuously marked or otherwise
|
|
||||||
designated in writing by the copyright owner as "Not a Contribution."
|
|
||||||
|
|
||||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
|
||||||
on behalf of whom a Contribution has been received by Licensor and
|
|
||||||
subsequently incorporated within the Work.
|
|
||||||
|
|
||||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
|
||||||
this License, each Contributor hereby grants to You a perpetual,
|
|
||||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
|
||||||
copyright license to reproduce, prepare Derivative Works of,
|
|
||||||
publicly display, publicly perform, sublicense, and distribute the
|
|
||||||
Work and such Derivative Works in Source or Object form.
|
|
||||||
|
|
||||||
3. Grant of Patent License. Subject to the terms and conditions of
|
|
||||||
this License, each Contributor hereby grants to You a perpetual,
|
|
||||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
|
||||||
(except as stated in this section) patent license to make, have made,
|
|
||||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
|
||||||
where such license applies only to those patent claims licensable
|
|
||||||
by such Contributor that are necessarily infringed by their
|
|
||||||
Contribution(s) alone or by combination of their Contribution(s)
|
|
||||||
with the Work to which such Contribution(s) was submitted. If You
|
|
||||||
institute patent litigation against any entity (including a
|
|
||||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
|
||||||
or a Contribution incorporated within the Work constitutes direct
|
|
||||||
or contributory patent infringement, then any patent licenses
|
|
||||||
granted to You under this License for that Work shall terminate
|
|
||||||
as of the date such litigation is filed.
|
|
||||||
|
|
||||||
4. Redistribution. You may reproduce and distribute copies of the
|
|
||||||
Work or Derivative Works thereof in any medium, with or without
|
|
||||||
modifications, and in Source or Object form, provided that You
|
|
||||||
meet the following conditions:
|
|
||||||
|
|
||||||
(a) You must give any other recipients of the Work or
|
|
||||||
Derivative Works a copy of this License; and
|
|
||||||
|
|
||||||
(b) You must cause any modified files to carry prominent notices
|
|
||||||
stating that You changed the files; and
|
|
||||||
|
|
||||||
(c) You must retain, in the Source form of any Derivative Works
|
|
||||||
that You distribute, all copyright, patent, trademark, and
|
|
||||||
attribution notices from the Source form of the Work,
|
|
||||||
excluding those notices that do not pertain to any part of
|
|
||||||
the Derivative Works; and
|
|
||||||
|
|
||||||
(d) If the Work includes a "NOTICE" text file as part of its
|
|
||||||
distribution, then any Derivative Works that You distribute must
|
|
||||||
include a readable copy of the attribution notices contained
|
|
||||||
within such NOTICE file, excluding those notices that do not
|
|
||||||
pertain to any part of the Derivative Works, in at least one
|
|
||||||
of the following places: within a NOTICE text file distributed
|
|
||||||
as part of the Derivative Works; within the Source form or
|
|
||||||
documentation, if provided along with the Derivative Works; or,
|
|
||||||
within a display generated by the Derivative Works, if and
|
|
||||||
wherever such third-party notices normally appear. The contents
|
|
||||||
of the NOTICE file are for informational purposes only and
|
|
||||||
do not modify the License. You may add Your own attribution
|
|
||||||
notices within Derivative Works that You distribute, alongside
|
|
||||||
or as an addendum to the NOTICE text from the Work, provided
|
|
||||||
that such additional attribution notices cannot be construed
|
|
||||||
as modifying the License.
|
|
||||||
|
|
||||||
You may add Your own copyright statement to Your modifications and
|
|
||||||
may provide additional or different license terms and conditions
|
|
||||||
for use, reproduction, or distribution of Your modifications, or
|
|
||||||
for any such Derivative Works as a whole, provided Your use,
|
|
||||||
reproduction, and distribution of the Work otherwise complies with
|
|
||||||
the conditions stated in this License.
|
|
||||||
|
|
||||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
|
||||||
any Contribution intentionally submitted for inclusion in the Work
|
|
||||||
by You to the Licensor shall be under the terms and conditions of
|
|
||||||
this License, without any additional terms or conditions.
|
|
||||||
Notwithstanding the above, nothing herein shall supersede or modify
|
|
||||||
the terms of any separate license agreement you may have executed
|
|
||||||
with Licensor regarding such Contributions.
|
|
||||||
|
|
||||||
6. Trademarks. This License does not grant permission to use the trade
|
|
||||||
names, trademarks, service marks, or product names of the Licensor,
|
|
||||||
except as required for reasonable and customary use in describing the
|
|
||||||
origin of the Work and reproducing the content of the NOTICE file.
|
|
||||||
|
|
||||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
|
||||||
agreed to in writing, Licensor provides the Work (and each
|
|
||||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
||||||
implied, including, without limitation, any warranties or conditions
|
|
||||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
|
||||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
|
||||||
appropriateness of using or redistributing the Work and assume any
|
|
||||||
risks associated with Your exercise of permissions under this License.
|
|
||||||
|
|
||||||
8. Limitation of Liability. In no event and under no legal theory,
|
|
||||||
whether in tort (including negligence), contract, or otherwise,
|
|
||||||
unless required by applicable law (such as deliberate and grossly
|
|
||||||
negligent acts) or agreed to in writing, shall any Contributor be
|
|
||||||
liable to You for damages, including any direct, indirect, special,
|
|
||||||
incidental, or consequential damages of any character arising as a
|
|
||||||
result of this License or out of the use or inability to use the
|
|
||||||
Work (including but not limited to damages for loss of goodwill,
|
|
||||||
work stoppage, computer failure or malfunction, or any and all
|
|
||||||
other commercial damages or losses), even if such Contributor
|
|
||||||
has been advised of the possibility of such damages.
|
|
||||||
|
|
||||||
9. Accepting Warranty or Additional Liability. While redistributing
|
|
||||||
the Work or Derivative Works thereof, You may choose to offer,
|
|
||||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
|
||||||
or other liability obligations and/or rights consistent with this
|
|
||||||
License. However, in accepting such obligations, You may act only
|
|
||||||
on Your own behalf and on Your sole responsibility, not on behalf
|
|
||||||
of any other Contributor, and only if You agree to indemnify,
|
|
||||||
defend, and hold each Contributor harmless for any liability
|
|
||||||
incurred by, or claims asserted against, such Contributor by reason
|
|
||||||
of your accepting any such warranty or additional liability.
|
|
||||||
|
|
||||||
END OF TERMS AND CONDITIONS
|
|
||||||
|
|
||||||
APPENDIX: How to apply the Apache License to your work.
|
|
||||||
|
|
||||||
To apply the Apache License to your work, attach the following
|
|
||||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
|
||||||
replaced with your own identifying information. (Don't include
|
|
||||||
the brackets!) The text should be enclosed in the appropriate
|
|
||||||
comment syntax for the file format. We also recommend that a
|
|
||||||
file or class name and description of purpose be included on the
|
|
||||||
same "printed page" as the copyright notice for easier
|
|
||||||
identification within third-party archives.
|
|
||||||
|
|
||||||
Copyright [yyyy] [name of copyright owner]
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
202
vendor/cloud.google.com/go/compute/LICENSE
generated
vendored
202
vendor/cloud.google.com/go/compute/LICENSE
generated
vendored
|
|
@ -1,202 +0,0 @@
|
||||||
|
|
||||||
Apache License
|
|
||||||
Version 2.0, January 2004
|
|
||||||
http://www.apache.org/licenses/
|
|
||||||
|
|
||||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
|
||||||
|
|
||||||
1. Definitions.
|
|
||||||
|
|
||||||
"License" shall mean the terms and conditions for use, reproduction,
|
|
||||||
and distribution as defined by Sections 1 through 9 of this document.
|
|
||||||
|
|
||||||
"Licensor" shall mean the copyright owner or entity authorized by
|
|
||||||
the copyright owner that is granting the License.
|
|
||||||
|
|
||||||
"Legal Entity" shall mean the union of the acting entity and all
|
|
||||||
other entities that control, are controlled by, or are under common
|
|
||||||
control with that entity. For the purposes of this definition,
|
|
||||||
"control" means (i) the power, direct or indirect, to cause the
|
|
||||||
direction or management of such entity, whether by contract or
|
|
||||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
|
||||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
|
||||||
|
|
||||||
"You" (or "Your") shall mean an individual or Legal Entity
|
|
||||||
exercising permissions granted by this License.
|
|
||||||
|
|
||||||
"Source" form shall mean the preferred form for making modifications,
|
|
||||||
including but not limited to software source code, documentation
|
|
||||||
source, and configuration files.
|
|
||||||
|
|
||||||
"Object" form shall mean any form resulting from mechanical
|
|
||||||
transformation or translation of a Source form, including but
|
|
||||||
not limited to compiled object code, generated documentation,
|
|
||||||
and conversions to other media types.
|
|
||||||
|
|
||||||
"Work" shall mean the work of authorship, whether in Source or
|
|
||||||
Object form, made available under the License, as indicated by a
|
|
||||||
copyright notice that is included in or attached to the work
|
|
||||||
(an example is provided in the Appendix below).
|
|
||||||
|
|
||||||
"Derivative Works" shall mean any work, whether in Source or Object
|
|
||||||
form, that is based on (or derived from) the Work and for which the
|
|
||||||
editorial revisions, annotations, elaborations, or other modifications
|
|
||||||
represent, as a whole, an original work of authorship. For the purposes
|
|
||||||
of this License, Derivative Works shall not include works that remain
|
|
||||||
separable from, or merely link (or bind by name) to the interfaces of,
|
|
||||||
the Work and Derivative Works thereof.
|
|
||||||
|
|
||||||
"Contribution" shall mean any work of authorship, including
|
|
||||||
the original version of the Work and any modifications or additions
|
|
||||||
to that Work or Derivative Works thereof, that is intentionally
|
|
||||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
|
||||||
or by an individual or Legal Entity authorized to submit on behalf of
|
|
||||||
the copyright owner. For the purposes of this definition, "submitted"
|
|
||||||
means any form of electronic, verbal, or written communication sent
|
|
||||||
to the Licensor or its representatives, including but not limited to
|
|
||||||
communication on electronic mailing lists, source code control systems,
|
|
||||||
and issue tracking systems that are managed by, or on behalf of, the
|
|
||||||
Licensor for the purpose of discussing and improving the Work, but
|
|
||||||
excluding communication that is conspicuously marked or otherwise
|
|
||||||
designated in writing by the copyright owner as "Not a Contribution."
|
|
||||||
|
|
||||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
|
||||||
on behalf of whom a Contribution has been received by Licensor and
|
|
||||||
subsequently incorporated within the Work.
|
|
||||||
|
|
||||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
|
||||||
this License, each Contributor hereby grants to You a perpetual,
|
|
||||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
|
||||||
copyright license to reproduce, prepare Derivative Works of,
|
|
||||||
publicly display, publicly perform, sublicense, and distribute the
|
|
||||||
Work and such Derivative Works in Source or Object form.
|
|
||||||
|
|
||||||
3. Grant of Patent License. Subject to the terms and conditions of
|
|
||||||
this License, each Contributor hereby grants to You a perpetual,
|
|
||||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
|
||||||
(except as stated in this section) patent license to make, have made,
|
|
||||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
|
||||||
where such license applies only to those patent claims licensable
|
|
||||||
by such Contributor that are necessarily infringed by their
|
|
||||||
Contribution(s) alone or by combination of their Contribution(s)
|
|
||||||
with the Work to which such Contribution(s) was submitted. If You
|
|
||||||
institute patent litigation against any entity (including a
|
|
||||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
|
||||||
or a Contribution incorporated within the Work constitutes direct
|
|
||||||
or contributory patent infringement, then any patent licenses
|
|
||||||
granted to You under this License for that Work shall terminate
|
|
||||||
as of the date such litigation is filed.
|
|
||||||
|
|
||||||
4. Redistribution. You may reproduce and distribute copies of the
|
|
||||||
Work or Derivative Works thereof in any medium, with or without
|
|
||||||
modifications, and in Source or Object form, provided that You
|
|
||||||
meet the following conditions:
|
|
||||||
|
|
||||||
(a) You must give any other recipients of the Work or
|
|
||||||
Derivative Works a copy of this License; and
|
|
||||||
|
|
||||||
(b) You must cause any modified files to carry prominent notices
|
|
||||||
stating that You changed the files; and
|
|
||||||
|
|
||||||
(c) You must retain, in the Source form of any Derivative Works
|
|
||||||
that You distribute, all copyright, patent, trademark, and
|
|
||||||
attribution notices from the Source form of the Work,
|
|
||||||
excluding those notices that do not pertain to any part of
|
|
||||||
the Derivative Works; and
|
|
||||||
|
|
||||||
(d) If the Work includes a "NOTICE" text file as part of its
|
|
||||||
distribution, then any Derivative Works that You distribute must
|
|
||||||
include a readable copy of the attribution notices contained
|
|
||||||
within such NOTICE file, excluding those notices that do not
|
|
||||||
pertain to any part of the Derivative Works, in at least one
|
|
||||||
of the following places: within a NOTICE text file distributed
|
|
||||||
as part of the Derivative Works; within the Source form or
|
|
||||||
documentation, if provided along with the Derivative Works; or,
|
|
||||||
within a display generated by the Derivative Works, if and
|
|
||||||
wherever such third-party notices normally appear. The contents
|
|
||||||
of the NOTICE file are for informational purposes only and
|
|
||||||
do not modify the License. You may add Your own attribution
|
|
||||||
notices within Derivative Works that You distribute, alongside
|
|
||||||
or as an addendum to the NOTICE text from the Work, provided
|
|
||||||
that such additional attribution notices cannot be construed
|
|
||||||
as modifying the License.
|
|
||||||
|
|
||||||
You may add Your own copyright statement to Your modifications and
|
|
||||||
may provide additional or different license terms and conditions
|
|
||||||
for use, reproduction, or distribution of Your modifications, or
|
|
||||||
for any such Derivative Works as a whole, provided Your use,
|
|
||||||
reproduction, and distribution of the Work otherwise complies with
|
|
||||||
the conditions stated in this License.
|
|
||||||
|
|
||||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
|
||||||
any Contribution intentionally submitted for inclusion in the Work
|
|
||||||
by You to the Licensor shall be under the terms and conditions of
|
|
||||||
this License, without any additional terms or conditions.
|
|
||||||
Notwithstanding the above, nothing herein shall supersede or modify
|
|
||||||
the terms of any separate license agreement you may have executed
|
|
||||||
with Licensor regarding such Contributions.
|
|
||||||
|
|
||||||
6. Trademarks. This License does not grant permission to use the trade
|
|
||||||
names, trademarks, service marks, or product names of the Licensor,
|
|
||||||
except as required for reasonable and customary use in describing the
|
|
||||||
origin of the Work and reproducing the content of the NOTICE file.
|
|
||||||
|
|
||||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
|
||||||
agreed to in writing, Licensor provides the Work (and each
|
|
||||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
||||||
implied, including, without limitation, any warranties or conditions
|
|
||||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
|
||||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
|
||||||
appropriateness of using or redistributing the Work and assume any
|
|
||||||
risks associated with Your exercise of permissions under this License.
|
|
||||||
|
|
||||||
8. Limitation of Liability. In no event and under no legal theory,
|
|
||||||
whether in tort (including negligence), contract, or otherwise,
|
|
||||||
unless required by applicable law (such as deliberate and grossly
|
|
||||||
negligent acts) or agreed to in writing, shall any Contributor be
|
|
||||||
liable to You for damages, including any direct, indirect, special,
|
|
||||||
incidental, or consequential damages of any character arising as a
|
|
||||||
result of this License or out of the use or inability to use the
|
|
||||||
Work (including but not limited to damages for loss of goodwill,
|
|
||||||
work stoppage, computer failure or malfunction, or any and all
|
|
||||||
other commercial damages or losses), even if such Contributor
|
|
||||||
has been advised of the possibility of such damages.
|
|
||||||
|
|
||||||
9. Accepting Warranty or Additional Liability. While redistributing
|
|
||||||
the Work or Derivative Works thereof, You may choose to offer,
|
|
||||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
|
||||||
or other liability obligations and/or rights consistent with this
|
|
||||||
License. However, in accepting such obligations, You may act only
|
|
||||||
on Your own behalf and on Your sole responsibility, not on behalf
|
|
||||||
of any other Contributor, and only if You agree to indemnify,
|
|
||||||
defend, and hold each Contributor harmless for any liability
|
|
||||||
incurred by, or claims asserted against, such Contributor by reason
|
|
||||||
of your accepting any such warranty or additional liability.
|
|
||||||
|
|
||||||
END OF TERMS AND CONDITIONS
|
|
||||||
|
|
||||||
APPENDIX: How to apply the Apache License to your work.
|
|
||||||
|
|
||||||
To apply the Apache License to your work, attach the following
|
|
||||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
|
||||||
replaced with your own identifying information. (Don't include
|
|
||||||
the brackets!) The text should be enclosed in the appropriate
|
|
||||||
comment syntax for the file format. We also recommend that a
|
|
||||||
file or class name and description of purpose be included on the
|
|
||||||
same "printed page" as the copyright notice for easier
|
|
||||||
identification within third-party archives.
|
|
||||||
|
|
||||||
Copyright [yyyy] [name of copyright owner]
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
18
vendor/cloud.google.com/go/compute/internal/version.go
generated
vendored
18
vendor/cloud.google.com/go/compute/internal/version.go
generated
vendored
|
|
@ -1,18 +0,0 @@
|
||||||
// Copyright 2022 Google LLC
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package internal
|
|
||||||
|
|
||||||
// Version is the current tagged release of the library.
|
|
||||||
const Version = "1.14.0"
|
|
||||||
19
vendor/cloud.google.com/go/compute/metadata/CHANGES.md
generated
vendored
19
vendor/cloud.google.com/go/compute/metadata/CHANGES.md
generated
vendored
|
|
@ -1,19 +0,0 @@
|
||||||
# Changes
|
|
||||||
|
|
||||||
## [0.2.3](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.2.2...compute/metadata/v0.2.3) (2022-12-15)
|
|
||||||
|
|
||||||
|
|
||||||
### Bug Fixes
|
|
||||||
|
|
||||||
* **compute/metadata:** Switch DNS lookup to an absolute lookup ([119b410](https://github.com/googleapis/google-cloud-go/commit/119b41060c7895e45e48aee5621ad35607c4d021)), refs [#7165](https://github.com/googleapis/google-cloud-go/issues/7165)
|
|
||||||
|
|
||||||
## [0.2.2](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.2.1...compute/metadata/v0.2.2) (2022-12-01)
|
|
||||||
|
|
||||||
|
|
||||||
### Bug Fixes
|
|
||||||
|
|
||||||
* **compute/metadata:** Set IdleConnTimeout for http.Client ([#7084](https://github.com/googleapis/google-cloud-go/issues/7084)) ([766516a](https://github.com/googleapis/google-cloud-go/commit/766516aaf3816bfb3159efeea65aa3d1d205a3e2)), refs [#5430](https://github.com/googleapis/google-cloud-go/issues/5430)
|
|
||||||
|
|
||||||
## [0.1.0] (2022-10-26)
|
|
||||||
|
|
||||||
Initial release of metadata being it's own module.
|
|
||||||
202
vendor/cloud.google.com/go/compute/metadata/LICENSE
generated
vendored
202
vendor/cloud.google.com/go/compute/metadata/LICENSE
generated
vendored
|
|
@ -1,202 +0,0 @@
|
||||||
|
|
||||||
Apache License
|
|
||||||
Version 2.0, January 2004
|
|
||||||
http://www.apache.org/licenses/
|
|
||||||
|
|
||||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
|
||||||
|
|
||||||
1. Definitions.
|
|
||||||
|
|
||||||
"License" shall mean the terms and conditions for use, reproduction,
|
|
||||||
and distribution as defined by Sections 1 through 9 of this document.
|
|
||||||
|
|
||||||
"Licensor" shall mean the copyright owner or entity authorized by
|
|
||||||
the copyright owner that is granting the License.
|
|
||||||
|
|
||||||
"Legal Entity" shall mean the union of the acting entity and all
|
|
||||||
other entities that control, are controlled by, or are under common
|
|
||||||
control with that entity. For the purposes of this definition,
|
|
||||||
"control" means (i) the power, direct or indirect, to cause the
|
|
||||||
direction or management of such entity, whether by contract or
|
|
||||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
|
||||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
|
||||||
|
|
||||||
"You" (or "Your") shall mean an individual or Legal Entity
|
|
||||||
exercising permissions granted by this License.
|
|
||||||
|
|
||||||
"Source" form shall mean the preferred form for making modifications,
|
|
||||||
including but not limited to software source code, documentation
|
|
||||||
source, and configuration files.
|
|
||||||
|
|
||||||
"Object" form shall mean any form resulting from mechanical
|
|
||||||
transformation or translation of a Source form, including but
|
|
||||||
not limited to compiled object code, generated documentation,
|
|
||||||
and conversions to other media types.
|
|
||||||
|
|
||||||
"Work" shall mean the work of authorship, whether in Source or
|
|
||||||
Object form, made available under the License, as indicated by a
|
|
||||||
copyright notice that is included in or attached to the work
|
|
||||||
(an example is provided in the Appendix below).
|
|
||||||
|
|
||||||
"Derivative Works" shall mean any work, whether in Source or Object
|
|
||||||
form, that is based on (or derived from) the Work and for which the
|
|
||||||
editorial revisions, annotations, elaborations, or other modifications
|
|
||||||
represent, as a whole, an original work of authorship. For the purposes
|
|
||||||
of this License, Derivative Works shall not include works that remain
|
|
||||||
separable from, or merely link (or bind by name) to the interfaces of,
|
|
||||||
the Work and Derivative Works thereof.
|
|
||||||
|
|
||||||
"Contribution" shall mean any work of authorship, including
|
|
||||||
the original version of the Work and any modifications or additions
|
|
||||||
to that Work or Derivative Works thereof, that is intentionally
|
|
||||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
|
||||||
or by an individual or Legal Entity authorized to submit on behalf of
|
|
||||||
the copyright owner. For the purposes of this definition, "submitted"
|
|
||||||
means any form of electronic, verbal, or written communication sent
|
|
||||||
to the Licensor or its representatives, including but not limited to
|
|
||||||
communication on electronic mailing lists, source code control systems,
|
|
||||||
and issue tracking systems that are managed by, or on behalf of, the
|
|
||||||
Licensor for the purpose of discussing and improving the Work, but
|
|
||||||
excluding communication that is conspicuously marked or otherwise
|
|
||||||
designated in writing by the copyright owner as "Not a Contribution."
|
|
||||||
|
|
||||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
|
||||||
on behalf of whom a Contribution has been received by Licensor and
|
|
||||||
subsequently incorporated within the Work.
|
|
||||||
|
|
||||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
|
||||||
this License, each Contributor hereby grants to You a perpetual,
|
|
||||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
|
||||||
copyright license to reproduce, prepare Derivative Works of,
|
|
||||||
publicly display, publicly perform, sublicense, and distribute the
|
|
||||||
Work and such Derivative Works in Source or Object form.
|
|
||||||
|
|
||||||
3. Grant of Patent License. Subject to the terms and conditions of
|
|
||||||
this License, each Contributor hereby grants to You a perpetual,
|
|
||||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
|
||||||
(except as stated in this section) patent license to make, have made,
|
|
||||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
|
||||||
where such license applies only to those patent claims licensable
|
|
||||||
by such Contributor that are necessarily infringed by their
|
|
||||||
Contribution(s) alone or by combination of their Contribution(s)
|
|
||||||
with the Work to which such Contribution(s) was submitted. If You
|
|
||||||
institute patent litigation against any entity (including a
|
|
||||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
|
||||||
or a Contribution incorporated within the Work constitutes direct
|
|
||||||
or contributory patent infringement, then any patent licenses
|
|
||||||
granted to You under this License for that Work shall terminate
|
|
||||||
as of the date such litigation is filed.
|
|
||||||
|
|
||||||
4. Redistribution. You may reproduce and distribute copies of the
|
|
||||||
Work or Derivative Works thereof in any medium, with or without
|
|
||||||
modifications, and in Source or Object form, provided that You
|
|
||||||
meet the following conditions:
|
|
||||||
|
|
||||||
(a) You must give any other recipients of the Work or
|
|
||||||
Derivative Works a copy of this License; and
|
|
||||||
|
|
||||||
(b) You must cause any modified files to carry prominent notices
|
|
||||||
stating that You changed the files; and
|
|
||||||
|
|
||||||
(c) You must retain, in the Source form of any Derivative Works
|
|
||||||
that You distribute, all copyright, patent, trademark, and
|
|
||||||
attribution notices from the Source form of the Work,
|
|
||||||
excluding those notices that do not pertain to any part of
|
|
||||||
the Derivative Works; and
|
|
||||||
|
|
||||||
(d) If the Work includes a "NOTICE" text file as part of its
|
|
||||||
distribution, then any Derivative Works that You distribute must
|
|
||||||
include a readable copy of the attribution notices contained
|
|
||||||
within such NOTICE file, excluding those notices that do not
|
|
||||||
pertain to any part of the Derivative Works, in at least one
|
|
||||||
of the following places: within a NOTICE text file distributed
|
|
||||||
as part of the Derivative Works; within the Source form or
|
|
||||||
documentation, if provided along with the Derivative Works; or,
|
|
||||||
within a display generated by the Derivative Works, if and
|
|
||||||
wherever such third-party notices normally appear. The contents
|
|
||||||
of the NOTICE file are for informational purposes only and
|
|
||||||
do not modify the License. You may add Your own attribution
|
|
||||||
notices within Derivative Works that You distribute, alongside
|
|
||||||
or as an addendum to the NOTICE text from the Work, provided
|
|
||||||
that such additional attribution notices cannot be construed
|
|
||||||
as modifying the License.
|
|
||||||
|
|
||||||
You may add Your own copyright statement to Your modifications and
|
|
||||||
may provide additional or different license terms and conditions
|
|
||||||
for use, reproduction, or distribution of Your modifications, or
|
|
||||||
for any such Derivative Works as a whole, provided Your use,
|
|
||||||
reproduction, and distribution of the Work otherwise complies with
|
|
||||||
the conditions stated in this License.
|
|
||||||
|
|
||||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
|
||||||
any Contribution intentionally submitted for inclusion in the Work
|
|
||||||
by You to the Licensor shall be under the terms and conditions of
|
|
||||||
this License, without any additional terms or conditions.
|
|
||||||
Notwithstanding the above, nothing herein shall supersede or modify
|
|
||||||
the terms of any separate license agreement you may have executed
|
|
||||||
with Licensor regarding such Contributions.
|
|
||||||
|
|
||||||
6. Trademarks. This License does not grant permission to use the trade
|
|
||||||
names, trademarks, service marks, or product names of the Licensor,
|
|
||||||
except as required for reasonable and customary use in describing the
|
|
||||||
origin of the Work and reproducing the content of the NOTICE file.
|
|
||||||
|
|
||||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
|
||||||
agreed to in writing, Licensor provides the Work (and each
|
|
||||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
||||||
implied, including, without limitation, any warranties or conditions
|
|
||||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
|
||||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
|
||||||
appropriateness of using or redistributing the Work and assume any
|
|
||||||
risks associated with Your exercise of permissions under this License.
|
|
||||||
|
|
||||||
8. Limitation of Liability. In no event and under no legal theory,
|
|
||||||
whether in tort (including negligence), contract, or otherwise,
|
|
||||||
unless required by applicable law (such as deliberate and grossly
|
|
||||||
negligent acts) or agreed to in writing, shall any Contributor be
|
|
||||||
liable to You for damages, including any direct, indirect, special,
|
|
||||||
incidental, or consequential damages of any character arising as a
|
|
||||||
result of this License or out of the use or inability to use the
|
|
||||||
Work (including but not limited to damages for loss of goodwill,
|
|
||||||
work stoppage, computer failure or malfunction, or any and all
|
|
||||||
other commercial damages or losses), even if such Contributor
|
|
||||||
has been advised of the possibility of such damages.
|
|
||||||
|
|
||||||
9. Accepting Warranty or Additional Liability. While redistributing
|
|
||||||
the Work or Derivative Works thereof, You may choose to offer,
|
|
||||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
|
||||||
or other liability obligations and/or rights consistent with this
|
|
||||||
License. However, in accepting such obligations, You may act only
|
|
||||||
on Your own behalf and on Your sole responsibility, not on behalf
|
|
||||||
of any other Contributor, and only if You agree to indemnify,
|
|
||||||
defend, and hold each Contributor harmless for any liability
|
|
||||||
incurred by, or claims asserted against, such Contributor by reason
|
|
||||||
of your accepting any such warranty or additional liability.
|
|
||||||
|
|
||||||
END OF TERMS AND CONDITIONS
|
|
||||||
|
|
||||||
APPENDIX: How to apply the Apache License to your work.
|
|
||||||
|
|
||||||
To apply the Apache License to your work, attach the following
|
|
||||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
|
||||||
replaced with your own identifying information. (Don't include
|
|
||||||
the brackets!) The text should be enclosed in the appropriate
|
|
||||||
comment syntax for the file format. We also recommend that a
|
|
||||||
file or class name and description of purpose be included on the
|
|
||||||
same "printed page" as the copyright notice for easier
|
|
||||||
identification within third-party archives.
|
|
||||||
|
|
||||||
Copyright [yyyy] [name of copyright owner]
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
27
vendor/cloud.google.com/go/compute/metadata/README.md
generated
vendored
27
vendor/cloud.google.com/go/compute/metadata/README.md
generated
vendored
|
|
@ -1,27 +0,0 @@
|
||||||
# Compute API
|
|
||||||
|
|
||||||
[](https://pkg.go.dev/cloud.google.com/go/compute/metadata)
|
|
||||||
|
|
||||||
This is a utility library for communicating with Google Cloud metadata service
|
|
||||||
on Google Cloud.
|
|
||||||
|
|
||||||
## Install
|
|
||||||
|
|
||||||
```bash
|
|
||||||
go get cloud.google.com/go/compute/metadata
|
|
||||||
```
|
|
||||||
|
|
||||||
## Go Version Support
|
|
||||||
|
|
||||||
See the [Go Versions Supported](https://github.com/googleapis/google-cloud-go#go-versions-supported)
|
|
||||||
section in the root directory's README.
|
|
||||||
|
|
||||||
## Contributing
|
|
||||||
|
|
||||||
Contributions are welcome. Please, see the [CONTRIBUTING](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/main/CONTRIBUTING.md)
|
|
||||||
document for details.
|
|
||||||
|
|
||||||
Please note that this project is released with a Contributor Code of Conduct.
|
|
||||||
By participating in this project you agree to abide by its terms. See
|
|
||||||
[Contributor Code of Conduct](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/main/CONTRIBUTING.md#contributor-code-of-conduct)
|
|
||||||
for more information.
|
|
||||||
543
vendor/cloud.google.com/go/compute/metadata/metadata.go
generated
vendored
543
vendor/cloud.google.com/go/compute/metadata/metadata.go
generated
vendored
|
|
@ -1,543 +0,0 @@
|
||||||
// Copyright 2014 Google LLC
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
// Package metadata provides access to Google Compute Engine (GCE)
|
|
||||||
// metadata and API service accounts.
|
|
||||||
//
|
|
||||||
// This package is a wrapper around the GCE metadata service,
|
|
||||||
// as documented at https://cloud.google.com/compute/docs/metadata/overview.
|
|
||||||
package metadata // import "cloud.google.com/go/compute/metadata"
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"io/ioutil"
|
|
||||||
"net"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"os"
|
|
||||||
"runtime"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
// metadataIP is the documented metadata server IP address.
|
|
||||||
metadataIP = "169.254.169.254"
|
|
||||||
|
|
||||||
// metadataHostEnv is the environment variable specifying the
|
|
||||||
// GCE metadata hostname. If empty, the default value of
|
|
||||||
// metadataIP ("169.254.169.254") is used instead.
|
|
||||||
// This is variable name is not defined by any spec, as far as
|
|
||||||
// I know; it was made up for the Go package.
|
|
||||||
metadataHostEnv = "GCE_METADATA_HOST"
|
|
||||||
|
|
||||||
userAgent = "gcloud-golang/0.1"
|
|
||||||
)
|
|
||||||
|
|
||||||
type cachedValue struct {
|
|
||||||
k string
|
|
||||||
trim bool
|
|
||||||
mu sync.Mutex
|
|
||||||
v string
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
projID = &cachedValue{k: "project/project-id", trim: true}
|
|
||||||
projNum = &cachedValue{k: "project/numeric-project-id", trim: true}
|
|
||||||
instID = &cachedValue{k: "instance/id", trim: true}
|
|
||||||
)
|
|
||||||
|
|
||||||
var defaultClient = &Client{hc: newDefaultHTTPClient()}
|
|
||||||
|
|
||||||
func newDefaultHTTPClient() *http.Client {
|
|
||||||
return &http.Client{
|
|
||||||
Transport: &http.Transport{
|
|
||||||
Dial: (&net.Dialer{
|
|
||||||
Timeout: 2 * time.Second,
|
|
||||||
KeepAlive: 30 * time.Second,
|
|
||||||
}).Dial,
|
|
||||||
IdleConnTimeout: 60 * time.Second,
|
|
||||||
},
|
|
||||||
Timeout: 5 * time.Second,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// NotDefinedError is returned when requested metadata is not defined.
|
|
||||||
//
|
|
||||||
// The underlying string is the suffix after "/computeMetadata/v1/".
|
|
||||||
//
|
|
||||||
// This error is not returned if the value is defined to be the empty
|
|
||||||
// string.
|
|
||||||
type NotDefinedError string
|
|
||||||
|
|
||||||
func (suffix NotDefinedError) Error() string {
|
|
||||||
return fmt.Sprintf("metadata: GCE metadata %q not defined", string(suffix))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *cachedValue) get(cl *Client) (v string, err error) {
|
|
||||||
defer c.mu.Unlock()
|
|
||||||
c.mu.Lock()
|
|
||||||
if c.v != "" {
|
|
||||||
return c.v, nil
|
|
||||||
}
|
|
||||||
if c.trim {
|
|
||||||
v, err = cl.getTrimmed(c.k)
|
|
||||||
} else {
|
|
||||||
v, err = cl.Get(c.k)
|
|
||||||
}
|
|
||||||
if err == nil {
|
|
||||||
c.v = v
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
onGCEOnce sync.Once
|
|
||||||
onGCE bool
|
|
||||||
)
|
|
||||||
|
|
||||||
// OnGCE reports whether this process is running on Google Compute Engine.
|
|
||||||
func OnGCE() bool {
|
|
||||||
onGCEOnce.Do(initOnGCE)
|
|
||||||
return onGCE
|
|
||||||
}
|
|
||||||
|
|
||||||
func initOnGCE() {
|
|
||||||
onGCE = testOnGCE()
|
|
||||||
}
|
|
||||||
|
|
||||||
func testOnGCE() bool {
|
|
||||||
// The user explicitly said they're on GCE, so trust them.
|
|
||||||
if os.Getenv(metadataHostEnv) != "" {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
resc := make(chan bool, 2)
|
|
||||||
|
|
||||||
// Try two strategies in parallel.
|
|
||||||
// See https://github.com/googleapis/google-cloud-go/issues/194
|
|
||||||
go func() {
|
|
||||||
req, _ := http.NewRequest("GET", "http://"+metadataIP, nil)
|
|
||||||
req.Header.Set("User-Agent", userAgent)
|
|
||||||
res, err := newDefaultHTTPClient().Do(req.WithContext(ctx))
|
|
||||||
if err != nil {
|
|
||||||
resc <- false
|
|
||||||
return
|
|
||||||
}
|
|
||||||
defer res.Body.Close()
|
|
||||||
resc <- res.Header.Get("Metadata-Flavor") == "Google"
|
|
||||||
}()
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
resolver := &net.Resolver{}
|
|
||||||
addrs, err := resolver.LookupHost(ctx, "metadata.google.internal.")
|
|
||||||
if err != nil || len(addrs) == 0 {
|
|
||||||
resc <- false
|
|
||||||
return
|
|
||||||
}
|
|
||||||
resc <- strsContains(addrs, metadataIP)
|
|
||||||
}()
|
|
||||||
|
|
||||||
tryHarder := systemInfoSuggestsGCE()
|
|
||||||
if tryHarder {
|
|
||||||
res := <-resc
|
|
||||||
if res {
|
|
||||||
// The first strategy succeeded, so let's use it.
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
// Wait for either the DNS or metadata server probe to
|
|
||||||
// contradict the other one and say we are running on
|
|
||||||
// GCE. Give it a lot of time to do so, since the system
|
|
||||||
// info already suggests we're running on a GCE BIOS.
|
|
||||||
timer := time.NewTimer(5 * time.Second)
|
|
||||||
defer timer.Stop()
|
|
||||||
select {
|
|
||||||
case res = <-resc:
|
|
||||||
return res
|
|
||||||
case <-timer.C:
|
|
||||||
// Too slow. Who knows what this system is.
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// There's no hint from the system info that we're running on
|
|
||||||
// GCE, so use the first probe's result as truth, whether it's
|
|
||||||
// true or false. The goal here is to optimize for speed for
|
|
||||||
// users who are NOT running on GCE. We can't assume that
|
|
||||||
// either a DNS lookup or an HTTP request to a blackholed IP
|
|
||||||
// address is fast. Worst case this should return when the
|
|
||||||
// metaClient's Transport.ResponseHeaderTimeout or
|
|
||||||
// Transport.Dial.Timeout fires (in two seconds).
|
|
||||||
return <-resc
|
|
||||||
}
|
|
||||||
|
|
||||||
// systemInfoSuggestsGCE reports whether the local system (without
|
|
||||||
// doing network requests) suggests that we're running on GCE. If this
|
|
||||||
// returns true, testOnGCE tries a bit harder to reach its metadata
|
|
||||||
// server.
|
|
||||||
func systemInfoSuggestsGCE() bool {
|
|
||||||
if runtime.GOOS != "linux" {
|
|
||||||
// We don't have any non-Linux clues available, at least yet.
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
slurp, _ := ioutil.ReadFile("/sys/class/dmi/id/product_name")
|
|
||||||
name := strings.TrimSpace(string(slurp))
|
|
||||||
return name == "Google" || name == "Google Compute Engine"
|
|
||||||
}
|
|
||||||
|
|
||||||
// Subscribe calls Client.Subscribe on the default client.
|
|
||||||
func Subscribe(suffix string, fn func(v string, ok bool) error) error {
|
|
||||||
return defaultClient.Subscribe(suffix, fn)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get calls Client.Get on the default client.
|
|
||||||
func Get(suffix string) (string, error) { return defaultClient.Get(suffix) }
|
|
||||||
|
|
||||||
// ProjectID returns the current instance's project ID string.
|
|
||||||
func ProjectID() (string, error) { return defaultClient.ProjectID() }
|
|
||||||
|
|
||||||
// NumericProjectID returns the current instance's numeric project ID.
|
|
||||||
func NumericProjectID() (string, error) { return defaultClient.NumericProjectID() }
|
|
||||||
|
|
||||||
// InternalIP returns the instance's primary internal IP address.
|
|
||||||
func InternalIP() (string, error) { return defaultClient.InternalIP() }
|
|
||||||
|
|
||||||
// ExternalIP returns the instance's primary external (public) IP address.
|
|
||||||
func ExternalIP() (string, error) { return defaultClient.ExternalIP() }
|
|
||||||
|
|
||||||
// Email calls Client.Email on the default client.
|
|
||||||
func Email(serviceAccount string) (string, error) { return defaultClient.Email(serviceAccount) }
|
|
||||||
|
|
||||||
// Hostname returns the instance's hostname. This will be of the form
|
|
||||||
// "<instanceID>.c.<projID>.internal".
|
|
||||||
func Hostname() (string, error) { return defaultClient.Hostname() }
|
|
||||||
|
|
||||||
// InstanceTags returns the list of user-defined instance tags,
|
|
||||||
// assigned when initially creating a GCE instance.
|
|
||||||
func InstanceTags() ([]string, error) { return defaultClient.InstanceTags() }
|
|
||||||
|
|
||||||
// InstanceID returns the current VM's numeric instance ID.
|
|
||||||
func InstanceID() (string, error) { return defaultClient.InstanceID() }
|
|
||||||
|
|
||||||
// InstanceName returns the current VM's instance ID string.
|
|
||||||
func InstanceName() (string, error) { return defaultClient.InstanceName() }
|
|
||||||
|
|
||||||
// Zone returns the current VM's zone, such as "us-central1-b".
|
|
||||||
func Zone() (string, error) { return defaultClient.Zone() }
|
|
||||||
|
|
||||||
// InstanceAttributes calls Client.InstanceAttributes on the default client.
|
|
||||||
func InstanceAttributes() ([]string, error) { return defaultClient.InstanceAttributes() }
|
|
||||||
|
|
||||||
// ProjectAttributes calls Client.ProjectAttributes on the default client.
|
|
||||||
func ProjectAttributes() ([]string, error) { return defaultClient.ProjectAttributes() }
|
|
||||||
|
|
||||||
// InstanceAttributeValue calls Client.InstanceAttributeValue on the default client.
|
|
||||||
func InstanceAttributeValue(attr string) (string, error) {
|
|
||||||
return defaultClient.InstanceAttributeValue(attr)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ProjectAttributeValue calls Client.ProjectAttributeValue on the default client.
|
|
||||||
func ProjectAttributeValue(attr string) (string, error) {
|
|
||||||
return defaultClient.ProjectAttributeValue(attr)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Scopes calls Client.Scopes on the default client.
|
|
||||||
func Scopes(serviceAccount string) ([]string, error) { return defaultClient.Scopes(serviceAccount) }
|
|
||||||
|
|
||||||
func strsContains(ss []string, s string) bool {
|
|
||||||
for _, v := range ss {
|
|
||||||
if v == s {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// A Client provides metadata.
|
|
||||||
type Client struct {
|
|
||||||
hc *http.Client
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewClient returns a Client that can be used to fetch metadata.
|
|
||||||
// Returns the client that uses the specified http.Client for HTTP requests.
|
|
||||||
// If nil is specified, returns the default client.
|
|
||||||
func NewClient(c *http.Client) *Client {
|
|
||||||
if c == nil {
|
|
||||||
return defaultClient
|
|
||||||
}
|
|
||||||
|
|
||||||
return &Client{hc: c}
|
|
||||||
}
|
|
||||||
|
|
||||||
// getETag returns a value from the metadata service as well as the associated ETag.
|
|
||||||
// This func is otherwise equivalent to Get.
|
|
||||||
func (c *Client) getETag(suffix string) (value, etag string, err error) {
|
|
||||||
ctx := context.TODO()
|
|
||||||
// Using a fixed IP makes it very difficult to spoof the metadata service in
|
|
||||||
// a container, which is an important use-case for local testing of cloud
|
|
||||||
// deployments. To enable spoofing of the metadata service, the environment
|
|
||||||
// variable GCE_METADATA_HOST is first inspected to decide where metadata
|
|
||||||
// requests shall go.
|
|
||||||
host := os.Getenv(metadataHostEnv)
|
|
||||||
if host == "" {
|
|
||||||
// Using 169.254.169.254 instead of "metadata" here because Go
|
|
||||||
// binaries built with the "netgo" tag and without cgo won't
|
|
||||||
// know the search suffix for "metadata" is
|
|
||||||
// ".google.internal", and this IP address is documented as
|
|
||||||
// being stable anyway.
|
|
||||||
host = metadataIP
|
|
||||||
}
|
|
||||||
suffix = strings.TrimLeft(suffix, "/")
|
|
||||||
u := "http://" + host + "/computeMetadata/v1/" + suffix
|
|
||||||
req, err := http.NewRequest("GET", u, nil)
|
|
||||||
if err != nil {
|
|
||||||
return "", "", err
|
|
||||||
}
|
|
||||||
req.Header.Set("Metadata-Flavor", "Google")
|
|
||||||
req.Header.Set("User-Agent", userAgent)
|
|
||||||
var res *http.Response
|
|
||||||
var reqErr error
|
|
||||||
retryer := newRetryer()
|
|
||||||
for {
|
|
||||||
res, reqErr = c.hc.Do(req)
|
|
||||||
var code int
|
|
||||||
if res != nil {
|
|
||||||
code = res.StatusCode
|
|
||||||
}
|
|
||||||
if delay, shouldRetry := retryer.Retry(code, reqErr); shouldRetry {
|
|
||||||
if err := sleep(ctx, delay); err != nil {
|
|
||||||
return "", "", err
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if reqErr != nil {
|
|
||||||
return "", "", reqErr
|
|
||||||
}
|
|
||||||
defer res.Body.Close()
|
|
||||||
if res.StatusCode == http.StatusNotFound {
|
|
||||||
return "", "", NotDefinedError(suffix)
|
|
||||||
}
|
|
||||||
all, err := ioutil.ReadAll(res.Body)
|
|
||||||
if err != nil {
|
|
||||||
return "", "", err
|
|
||||||
}
|
|
||||||
if res.StatusCode != 200 {
|
|
||||||
return "", "", &Error{Code: res.StatusCode, Message: string(all)}
|
|
||||||
}
|
|
||||||
return string(all), res.Header.Get("Etag"), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get returns a value from the metadata service.
|
|
||||||
// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/".
|
|
||||||
//
|
|
||||||
// If the GCE_METADATA_HOST environment variable is not defined, a default of
|
|
||||||
// 169.254.169.254 will be used instead.
|
|
||||||
//
|
|
||||||
// If the requested metadata is not defined, the returned error will
|
|
||||||
// be of type NotDefinedError.
|
|
||||||
func (c *Client) Get(suffix string) (string, error) {
|
|
||||||
val, _, err := c.getETag(suffix)
|
|
||||||
return val, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Client) getTrimmed(suffix string) (s string, err error) {
|
|
||||||
s, err = c.Get(suffix)
|
|
||||||
s = strings.TrimSpace(s)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Client) lines(suffix string) ([]string, error) {
|
|
||||||
j, err := c.Get(suffix)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
s := strings.Split(strings.TrimSpace(j), "\n")
|
|
||||||
for i := range s {
|
|
||||||
s[i] = strings.TrimSpace(s[i])
|
|
||||||
}
|
|
||||||
return s, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ProjectID returns the current instance's project ID string.
|
|
||||||
func (c *Client) ProjectID() (string, error) { return projID.get(c) }
|
|
||||||
|
|
||||||
// NumericProjectID returns the current instance's numeric project ID.
|
|
||||||
func (c *Client) NumericProjectID() (string, error) { return projNum.get(c) }
|
|
||||||
|
|
||||||
// InstanceID returns the current VM's numeric instance ID.
|
|
||||||
func (c *Client) InstanceID() (string, error) { return instID.get(c) }
|
|
||||||
|
|
||||||
// InternalIP returns the instance's primary internal IP address.
|
|
||||||
func (c *Client) InternalIP() (string, error) {
|
|
||||||
return c.getTrimmed("instance/network-interfaces/0/ip")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Email returns the email address associated with the service account.
|
|
||||||
// The account may be empty or the string "default" to use the instance's
|
|
||||||
// main account.
|
|
||||||
func (c *Client) Email(serviceAccount string) (string, error) {
|
|
||||||
if serviceAccount == "" {
|
|
||||||
serviceAccount = "default"
|
|
||||||
}
|
|
||||||
return c.getTrimmed("instance/service-accounts/" + serviceAccount + "/email")
|
|
||||||
}
|
|
||||||
|
|
||||||
// ExternalIP returns the instance's primary external (public) IP address.
|
|
||||||
func (c *Client) ExternalIP() (string, error) {
|
|
||||||
return c.getTrimmed("instance/network-interfaces/0/access-configs/0/external-ip")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Hostname returns the instance's hostname. This will be of the form
|
|
||||||
// "<instanceID>.c.<projID>.internal".
|
|
||||||
func (c *Client) Hostname() (string, error) {
|
|
||||||
return c.getTrimmed("instance/hostname")
|
|
||||||
}
|
|
||||||
|
|
||||||
// InstanceTags returns the list of user-defined instance tags,
|
|
||||||
// assigned when initially creating a GCE instance.
|
|
||||||
func (c *Client) InstanceTags() ([]string, error) {
|
|
||||||
var s []string
|
|
||||||
j, err := c.Get("instance/tags")
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if err := json.NewDecoder(strings.NewReader(j)).Decode(&s); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return s, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// InstanceName returns the current VM's instance ID string.
|
|
||||||
func (c *Client) InstanceName() (string, error) {
|
|
||||||
return c.getTrimmed("instance/name")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Zone returns the current VM's zone, such as "us-central1-b".
|
|
||||||
func (c *Client) Zone() (string, error) {
|
|
||||||
zone, err := c.getTrimmed("instance/zone")
|
|
||||||
// zone is of the form "projects/<projNum>/zones/<zoneName>".
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
return zone[strings.LastIndex(zone, "/")+1:], nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// InstanceAttributes returns the list of user-defined attributes,
|
|
||||||
// assigned when initially creating a GCE VM instance. The value of an
|
|
||||||
// attribute can be obtained with InstanceAttributeValue.
|
|
||||||
func (c *Client) InstanceAttributes() ([]string, error) { return c.lines("instance/attributes/") }
|
|
||||||
|
|
||||||
// ProjectAttributes returns the list of user-defined attributes
|
|
||||||
// applying to the project as a whole, not just this VM. The value of
|
|
||||||
// an attribute can be obtained with ProjectAttributeValue.
|
|
||||||
func (c *Client) ProjectAttributes() ([]string, error) { return c.lines("project/attributes/") }
|
|
||||||
|
|
||||||
// InstanceAttributeValue returns the value of the provided VM
|
|
||||||
// instance attribute.
|
|
||||||
//
|
|
||||||
// If the requested attribute is not defined, the returned error will
|
|
||||||
// be of type NotDefinedError.
|
|
||||||
//
|
|
||||||
// InstanceAttributeValue may return ("", nil) if the attribute was
|
|
||||||
// defined to be the empty string.
|
|
||||||
func (c *Client) InstanceAttributeValue(attr string) (string, error) {
|
|
||||||
return c.Get("instance/attributes/" + attr)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ProjectAttributeValue returns the value of the provided
|
|
||||||
// project attribute.
|
|
||||||
//
|
|
||||||
// If the requested attribute is not defined, the returned error will
|
|
||||||
// be of type NotDefinedError.
|
|
||||||
//
|
|
||||||
// ProjectAttributeValue may return ("", nil) if the attribute was
|
|
||||||
// defined to be the empty string.
|
|
||||||
func (c *Client) ProjectAttributeValue(attr string) (string, error) {
|
|
||||||
return c.Get("project/attributes/" + attr)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Scopes returns the service account scopes for the given account.
|
|
||||||
// The account may be empty or the string "default" to use the instance's
|
|
||||||
// main account.
|
|
||||||
func (c *Client) Scopes(serviceAccount string) ([]string, error) {
|
|
||||||
if serviceAccount == "" {
|
|
||||||
serviceAccount = "default"
|
|
||||||
}
|
|
||||||
return c.lines("instance/service-accounts/" + serviceAccount + "/scopes")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Subscribe subscribes to a value from the metadata service.
|
|
||||||
// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/".
|
|
||||||
// The suffix may contain query parameters.
|
|
||||||
//
|
|
||||||
// Subscribe calls fn with the latest metadata value indicated by the provided
|
|
||||||
// suffix. If the metadata value is deleted, fn is called with the empty string
|
|
||||||
// and ok false. Subscribe blocks until fn returns a non-nil error or the value
|
|
||||||
// is deleted. Subscribe returns the error value returned from the last call to
|
|
||||||
// fn, which may be nil when ok == false.
|
|
||||||
func (c *Client) Subscribe(suffix string, fn func(v string, ok bool) error) error {
|
|
||||||
const failedSubscribeSleep = time.Second * 5
|
|
||||||
|
|
||||||
// First check to see if the metadata value exists at all.
|
|
||||||
val, lastETag, err := c.getETag(suffix)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := fn(val, true); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
ok := true
|
|
||||||
if strings.ContainsRune(suffix, '?') {
|
|
||||||
suffix += "&wait_for_change=true&last_etag="
|
|
||||||
} else {
|
|
||||||
suffix += "?wait_for_change=true&last_etag="
|
|
||||||
}
|
|
||||||
for {
|
|
||||||
val, etag, err := c.getETag(suffix + url.QueryEscape(lastETag))
|
|
||||||
if err != nil {
|
|
||||||
if _, deleted := err.(NotDefinedError); !deleted {
|
|
||||||
time.Sleep(failedSubscribeSleep)
|
|
||||||
continue // Retry on other errors.
|
|
||||||
}
|
|
||||||
ok = false
|
|
||||||
}
|
|
||||||
lastETag = etag
|
|
||||||
|
|
||||||
if err := fn(val, ok); err != nil || !ok {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Error contains an error response from the server.
|
|
||||||
type Error struct {
|
|
||||||
// Code is the HTTP response status code.
|
|
||||||
Code int
|
|
||||||
// Message is the server response message.
|
|
||||||
Message string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *Error) Error() string {
|
|
||||||
return fmt.Sprintf("compute: Received %d `%s`", e.Code, e.Message)
|
|
||||||
}
|
|
||||||
114
vendor/cloud.google.com/go/compute/metadata/retry.go
generated
vendored
114
vendor/cloud.google.com/go/compute/metadata/retry.go
generated
vendored
|
|
@ -1,114 +0,0 @@
|
||||||
// Copyright 2021 Google LLC
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package metadata
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"io"
|
|
||||||
"math/rand"
|
|
||||||
"net/http"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
maxRetryAttempts = 5
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
syscallRetryable = func(err error) bool { return false }
|
|
||||||
)
|
|
||||||
|
|
||||||
// defaultBackoff is basically equivalent to gax.Backoff without the need for
|
|
||||||
// the dependency.
|
|
||||||
type defaultBackoff struct {
|
|
||||||
max time.Duration
|
|
||||||
mul float64
|
|
||||||
cur time.Duration
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *defaultBackoff) Pause() time.Duration {
|
|
||||||
d := time.Duration(1 + rand.Int63n(int64(b.cur)))
|
|
||||||
b.cur = time.Duration(float64(b.cur) * b.mul)
|
|
||||||
if b.cur > b.max {
|
|
||||||
b.cur = b.max
|
|
||||||
}
|
|
||||||
return d
|
|
||||||
}
|
|
||||||
|
|
||||||
// sleep is the equivalent of gax.Sleep without the need for the dependency.
|
|
||||||
func sleep(ctx context.Context, d time.Duration) error {
|
|
||||||
t := time.NewTimer(d)
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
t.Stop()
|
|
||||||
return ctx.Err()
|
|
||||||
case <-t.C:
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func newRetryer() *metadataRetryer {
|
|
||||||
return &metadataRetryer{bo: &defaultBackoff{
|
|
||||||
cur: 100 * time.Millisecond,
|
|
||||||
max: 30 * time.Second,
|
|
||||||
mul: 2,
|
|
||||||
}}
|
|
||||||
}
|
|
||||||
|
|
||||||
type backoff interface {
|
|
||||||
Pause() time.Duration
|
|
||||||
}
|
|
||||||
|
|
||||||
type metadataRetryer struct {
|
|
||||||
bo backoff
|
|
||||||
attempts int
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *metadataRetryer) Retry(status int, err error) (time.Duration, bool) {
|
|
||||||
if status == http.StatusOK {
|
|
||||||
return 0, false
|
|
||||||
}
|
|
||||||
retryOk := shouldRetry(status, err)
|
|
||||||
if !retryOk {
|
|
||||||
return 0, false
|
|
||||||
}
|
|
||||||
if r.attempts == maxRetryAttempts {
|
|
||||||
return 0, false
|
|
||||||
}
|
|
||||||
r.attempts++
|
|
||||||
return r.bo.Pause(), true
|
|
||||||
}
|
|
||||||
|
|
||||||
func shouldRetry(status int, err error) bool {
|
|
||||||
if 500 <= status && status <= 599 {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
if err == io.ErrUnexpectedEOF {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
// Transient network errors should be retried.
|
|
||||||
if syscallRetryable(err) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
if err, ok := err.(interface{ Temporary() bool }); ok {
|
|
||||||
if err.Temporary() {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if err, ok := err.(interface{ Unwrap() error }); ok {
|
|
||||||
return shouldRetry(status, err.Unwrap())
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
26
vendor/cloud.google.com/go/compute/metadata/retry_linux.go
generated
vendored
26
vendor/cloud.google.com/go/compute/metadata/retry_linux.go
generated
vendored
|
|
@ -1,26 +0,0 @@
|
||||||
// Copyright 2021 Google LLC
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
//go:build linux
|
|
||||||
// +build linux
|
|
||||||
|
|
||||||
package metadata
|
|
||||||
|
|
||||||
import "syscall"
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
// Initialize syscallRetryable to return true on transient socket-level
|
|
||||||
// errors. These errors are specific to Linux.
|
|
||||||
syscallRetryable = func(err error) bool { return err == syscall.ECONNRESET || err == syscall.ECONNREFUSED }
|
|
||||||
}
|
|
||||||
23
vendor/cloud.google.com/go/compute/metadata/tidyfix.go
generated
vendored
23
vendor/cloud.google.com/go/compute/metadata/tidyfix.go
generated
vendored
|
|
@ -1,23 +0,0 @@
|
||||||
// Copyright 2022 Google LLC
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
// This file, and the {{.RootMod}} import, won't actually become part of
|
|
||||||
// the resultant binary.
|
|
||||||
//go:build modhack
|
|
||||||
// +build modhack
|
|
||||||
|
|
||||||
package metadata
|
|
||||||
|
|
||||||
// Necessary for safely adding multi-module repo. See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository
|
|
||||||
import _ "cloud.google.com/go/compute/internal"
|
|
||||||
62
vendor/cloud.google.com/go/iam/CHANGES.md
generated
vendored
62
vendor/cloud.google.com/go/iam/CHANGES.md
generated
vendored
|
|
@ -1,62 +0,0 @@
|
||||||
# Changes
|
|
||||||
|
|
||||||
## [0.8.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.7.0...iam/v0.8.0) (2022-12-05)
|
|
||||||
|
|
||||||
|
|
||||||
### Features
|
|
||||||
|
|
||||||
* **iam:** Start generating and refresh some libraries ([#7089](https://github.com/googleapis/google-cloud-go/issues/7089)) ([a9045ff](https://github.com/googleapis/google-cloud-go/commit/a9045ff191a711089c37f1d94a63522d9939ce38))
|
|
||||||
|
|
||||||
## [0.7.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.6.0...iam/v0.7.0) (2022-11-03)
|
|
||||||
|
|
||||||
|
|
||||||
### Features
|
|
||||||
|
|
||||||
* **iam:** rewrite signatures in terms of new location ([3c4b2b3](https://github.com/googleapis/google-cloud-go/commit/3c4b2b34565795537aac1661e6af2442437e34ad))
|
|
||||||
|
|
||||||
## [0.6.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.5.0...iam/v0.6.0) (2022-10-25)
|
|
||||||
|
|
||||||
|
|
||||||
### Features
|
|
||||||
|
|
||||||
* **iam:** start generating stubs dir ([de2d180](https://github.com/googleapis/google-cloud-go/commit/de2d18066dc613b72f6f8db93ca60146dabcfdcc))
|
|
||||||
|
|
||||||
## [0.5.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.4.0...iam/v0.5.0) (2022-09-28)
|
|
||||||
|
|
||||||
|
|
||||||
### Features
|
|
||||||
|
|
||||||
* **iam:** remove ListApplicablePolicies ([52dddd1](https://github.com/googleapis/google-cloud-go/commit/52dddd1ed89fbe77e1859311c3b993a77a82bfc7))
|
|
||||||
|
|
||||||
## [0.4.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.3.0...iam/v0.4.0) (2022-09-06)
|
|
||||||
|
|
||||||
|
|
||||||
### Features
|
|
||||||
|
|
||||||
* **iam:** start generating apiv2 ([#6605](https://github.com/googleapis/google-cloud-go/issues/6605)) ([a6004e7](https://github.com/googleapis/google-cloud-go/commit/a6004e762f782869cd85688937475744f7b17e50))
|
|
||||||
|
|
||||||
## [0.3.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.2.0...iam/v0.3.0) (2022-02-23)
|
|
||||||
|
|
||||||
|
|
||||||
### Features
|
|
||||||
|
|
||||||
* **iam:** set versionClient to module version ([55f0d92](https://github.com/googleapis/google-cloud-go/commit/55f0d92bf112f14b024b4ab0076c9875a17423c9))
|
|
||||||
|
|
||||||
## [0.2.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.1.1...iam/v0.2.0) (2022-02-14)
|
|
||||||
|
|
||||||
|
|
||||||
### Features
|
|
||||||
|
|
||||||
* **iam:** add file for tracking version ([17b36ea](https://github.com/googleapis/google-cloud-go/commit/17b36ead42a96b1a01105122074e65164357519e))
|
|
||||||
|
|
||||||
### [0.1.1](https://www.github.com/googleapis/google-cloud-go/compare/iam/v0.1.0...iam/v0.1.1) (2022-01-14)
|
|
||||||
|
|
||||||
|
|
||||||
### Bug Fixes
|
|
||||||
|
|
||||||
* **iam:** run formatter ([#5277](https://www.github.com/googleapis/google-cloud-go/issues/5277)) ([8682e4e](https://www.github.com/googleapis/google-cloud-go/commit/8682e4ed57a4428a659fbc225f56c91767e2a4a9))
|
|
||||||
|
|
||||||
## v0.1.0
|
|
||||||
|
|
||||||
This is the first tag to carve out iam as its own module. See
|
|
||||||
[Add a module to a multi-module repository](https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository).
|
|
||||||
202
vendor/cloud.google.com/go/iam/LICENSE
generated
vendored
202
vendor/cloud.google.com/go/iam/LICENSE
generated
vendored
|
|
@ -1,202 +0,0 @@
|
||||||
|
|
||||||
Apache License
|
|
||||||
Version 2.0, January 2004
|
|
||||||
http://www.apache.org/licenses/
|
|
||||||
|
|
||||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
|
||||||
|
|
||||||
1. Definitions.
|
|
||||||
|
|
||||||
"License" shall mean the terms and conditions for use, reproduction,
|
|
||||||
and distribution as defined by Sections 1 through 9 of this document.
|
|
||||||
|
|
||||||
"Licensor" shall mean the copyright owner or entity authorized by
|
|
||||||
the copyright owner that is granting the License.
|
|
||||||
|
|
||||||
"Legal Entity" shall mean the union of the acting entity and all
|
|
||||||
other entities that control, are controlled by, or are under common
|
|
||||||
control with that entity. For the purposes of this definition,
|
|
||||||
"control" means (i) the power, direct or indirect, to cause the
|
|
||||||
direction or management of such entity, whether by contract or
|
|
||||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
|
||||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
|
||||||
|
|
||||||
"You" (or "Your") shall mean an individual or Legal Entity
|
|
||||||
exercising permissions granted by this License.
|
|
||||||
|
|
||||||
"Source" form shall mean the preferred form for making modifications,
|
|
||||||
including but not limited to software source code, documentation
|
|
||||||
source, and configuration files.
|
|
||||||
|
|
||||||
"Object" form shall mean any form resulting from mechanical
|
|
||||||
transformation or translation of a Source form, including but
|
|
||||||
not limited to compiled object code, generated documentation,
|
|
||||||
and conversions to other media types.
|
|
||||||
|
|
||||||
"Work" shall mean the work of authorship, whether in Source or
|
|
||||||
Object form, made available under the License, as indicated by a
|
|
||||||
copyright notice that is included in or attached to the work
|
|
||||||
(an example is provided in the Appendix below).
|
|
||||||
|
|
||||||
"Derivative Works" shall mean any work, whether in Source or Object
|
|
||||||
form, that is based on (or derived from) the Work and for which the
|
|
||||||
editorial revisions, annotations, elaborations, or other modifications
|
|
||||||
represent, as a whole, an original work of authorship. For the purposes
|
|
||||||
of this License, Derivative Works shall not include works that remain
|
|
||||||
separable from, or merely link (or bind by name) to the interfaces of,
|
|
||||||
the Work and Derivative Works thereof.
|
|
||||||
|
|
||||||
"Contribution" shall mean any work of authorship, including
|
|
||||||
the original version of the Work and any modifications or additions
|
|
||||||
to that Work or Derivative Works thereof, that is intentionally
|
|
||||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
|
||||||
or by an individual or Legal Entity authorized to submit on behalf of
|
|
||||||
the copyright owner. For the purposes of this definition, "submitted"
|
|
||||||
means any form of electronic, verbal, or written communication sent
|
|
||||||
to the Licensor or its representatives, including but not limited to
|
|
||||||
communication on electronic mailing lists, source code control systems,
|
|
||||||
and issue tracking systems that are managed by, or on behalf of, the
|
|
||||||
Licensor for the purpose of discussing and improving the Work, but
|
|
||||||
excluding communication that is conspicuously marked or otherwise
|
|
||||||
designated in writing by the copyright owner as "Not a Contribution."
|
|
||||||
|
|
||||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
|
||||||
on behalf of whom a Contribution has been received by Licensor and
|
|
||||||
subsequently incorporated within the Work.
|
|
||||||
|
|
||||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
|
||||||
this License, each Contributor hereby grants to You a perpetual,
|
|
||||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
|
||||||
copyright license to reproduce, prepare Derivative Works of,
|
|
||||||
publicly display, publicly perform, sublicense, and distribute the
|
|
||||||
Work and such Derivative Works in Source or Object form.
|
|
||||||
|
|
||||||
3. Grant of Patent License. Subject to the terms and conditions of
|
|
||||||
this License, each Contributor hereby grants to You a perpetual,
|
|
||||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
|
||||||
(except as stated in this section) patent license to make, have made,
|
|
||||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
|
||||||
where such license applies only to those patent claims licensable
|
|
||||||
by such Contributor that are necessarily infringed by their
|
|
||||||
Contribution(s) alone or by combination of their Contribution(s)
|
|
||||||
with the Work to which such Contribution(s) was submitted. If You
|
|
||||||
institute patent litigation against any entity (including a
|
|
||||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
|
||||||
or a Contribution incorporated within the Work constitutes direct
|
|
||||||
or contributory patent infringement, then any patent licenses
|
|
||||||
granted to You under this License for that Work shall terminate
|
|
||||||
as of the date such litigation is filed.
|
|
||||||
|
|
||||||
4. Redistribution. You may reproduce and distribute copies of the
|
|
||||||
Work or Derivative Works thereof in any medium, with or without
|
|
||||||
modifications, and in Source or Object form, provided that You
|
|
||||||
meet the following conditions:
|
|
||||||
|
|
||||||
(a) You must give any other recipients of the Work or
|
|
||||||
Derivative Works a copy of this License; and
|
|
||||||
|
|
||||||
(b) You must cause any modified files to carry prominent notices
|
|
||||||
stating that You changed the files; and
|
|
||||||
|
|
||||||
(c) You must retain, in the Source form of any Derivative Works
|
|
||||||
that You distribute, all copyright, patent, trademark, and
|
|
||||||
attribution notices from the Source form of the Work,
|
|
||||||
excluding those notices that do not pertain to any part of
|
|
||||||
the Derivative Works; and
|
|
||||||
|
|
||||||
(d) If the Work includes a "NOTICE" text file as part of its
|
|
||||||
distribution, then any Derivative Works that You distribute must
|
|
||||||
include a readable copy of the attribution notices contained
|
|
||||||
within such NOTICE file, excluding those notices that do not
|
|
||||||
pertain to any part of the Derivative Works, in at least one
|
|
||||||
of the following places: within a NOTICE text file distributed
|
|
||||||
as part of the Derivative Works; within the Source form or
|
|
||||||
documentation, if provided along with the Derivative Works; or,
|
|
||||||
within a display generated by the Derivative Works, if and
|
|
||||||
wherever such third-party notices normally appear. The contents
|
|
||||||
of the NOTICE file are for informational purposes only and
|
|
||||||
do not modify the License. You may add Your own attribution
|
|
||||||
notices within Derivative Works that You distribute, alongside
|
|
||||||
or as an addendum to the NOTICE text from the Work, provided
|
|
||||||
that such additional attribution notices cannot be construed
|
|
||||||
as modifying the License.
|
|
||||||
|
|
||||||
You may add Your own copyright statement to Your modifications and
|
|
||||||
may provide additional or different license terms and conditions
|
|
||||||
for use, reproduction, or distribution of Your modifications, or
|
|
||||||
for any such Derivative Works as a whole, provided Your use,
|
|
||||||
reproduction, and distribution of the Work otherwise complies with
|
|
||||||
the conditions stated in this License.
|
|
||||||
|
|
||||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
|
||||||
any Contribution intentionally submitted for inclusion in the Work
|
|
||||||
by You to the Licensor shall be under the terms and conditions of
|
|
||||||
this License, without any additional terms or conditions.
|
|
||||||
Notwithstanding the above, nothing herein shall supersede or modify
|
|
||||||
the terms of any separate license agreement you may have executed
|
|
||||||
with Licensor regarding such Contributions.
|
|
||||||
|
|
||||||
6. Trademarks. This License does not grant permission to use the trade
|
|
||||||
names, trademarks, service marks, or product names of the Licensor,
|
|
||||||
except as required for reasonable and customary use in describing the
|
|
||||||
origin of the Work and reproducing the content of the NOTICE file.
|
|
||||||
|
|
||||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
|
||||||
agreed to in writing, Licensor provides the Work (and each
|
|
||||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
||||||
implied, including, without limitation, any warranties or conditions
|
|
||||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
|
||||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
|
||||||
appropriateness of using or redistributing the Work and assume any
|
|
||||||
risks associated with Your exercise of permissions under this License.
|
|
||||||
|
|
||||||
8. Limitation of Liability. In no event and under no legal theory,
|
|
||||||
whether in tort (including negligence), contract, or otherwise,
|
|
||||||
unless required by applicable law (such as deliberate and grossly
|
|
||||||
negligent acts) or agreed to in writing, shall any Contributor be
|
|
||||||
liable to You for damages, including any direct, indirect, special,
|
|
||||||
incidental, or consequential damages of any character arising as a
|
|
||||||
result of this License or out of the use or inability to use the
|
|
||||||
Work (including but not limited to damages for loss of goodwill,
|
|
||||||
work stoppage, computer failure or malfunction, or any and all
|
|
||||||
other commercial damages or losses), even if such Contributor
|
|
||||||
has been advised of the possibility of such damages.
|
|
||||||
|
|
||||||
9. Accepting Warranty or Additional Liability. While redistributing
|
|
||||||
the Work or Derivative Works thereof, You may choose to offer,
|
|
||||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
|
||||||
or other liability obligations and/or rights consistent with this
|
|
||||||
License. However, in accepting such obligations, You may act only
|
|
||||||
on Your own behalf and on Your sole responsibility, not on behalf
|
|
||||||
of any other Contributor, and only if You agree to indemnify,
|
|
||||||
defend, and hold each Contributor harmless for any liability
|
|
||||||
incurred by, or claims asserted against, such Contributor by reason
|
|
||||||
of your accepting any such warranty or additional liability.
|
|
||||||
|
|
||||||
END OF TERMS AND CONDITIONS
|
|
||||||
|
|
||||||
APPENDIX: How to apply the Apache License to your work.
|
|
||||||
|
|
||||||
To apply the Apache License to your work, attach the following
|
|
||||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
|
||||||
replaced with your own identifying information. (Don't include
|
|
||||||
the brackets!) The text should be enclosed in the appropriate
|
|
||||||
comment syntax for the file format. We also recommend that a
|
|
||||||
file or class name and description of purpose be included on the
|
|
||||||
same "printed page" as the copyright notice for easier
|
|
||||||
identification within third-party archives.
|
|
||||||
|
|
||||||
Copyright [yyyy] [name of copyright owner]
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
40
vendor/cloud.google.com/go/iam/README.md
generated
vendored
40
vendor/cloud.google.com/go/iam/README.md
generated
vendored
|
|
@ -1,40 +0,0 @@
|
||||||
# IAM API
|
|
||||||
|
|
||||||
[](https://pkg.go.dev/cloud.google.com/go/iam)
|
|
||||||
|
|
||||||
Go Client Library for IAM API.
|
|
||||||
|
|
||||||
## Install
|
|
||||||
|
|
||||||
```bash
|
|
||||||
go get cloud.google.com/go/iam
|
|
||||||
```
|
|
||||||
|
|
||||||
## Stability
|
|
||||||
|
|
||||||
The stability of this module is indicated by SemVer.
|
|
||||||
|
|
||||||
However, a `v1+` module may have breaking changes in two scenarios:
|
|
||||||
|
|
||||||
* Packages with `alpha` or `beta` in the import path
|
|
||||||
* The GoDoc has an explicit stability disclaimer (for example, for an experimental feature).
|
|
||||||
|
|
||||||
## Go Version Support
|
|
||||||
|
|
||||||
See the [Go Versions Supported](https://github.com/googleapis/google-cloud-go#go-versions-supported)
|
|
||||||
section in the root directory's README.
|
|
||||||
|
|
||||||
## Authorization
|
|
||||||
|
|
||||||
See the [Authorization](https://github.com/googleapis/google-cloud-go#authorization)
|
|
||||||
section in the root directory's README.
|
|
||||||
|
|
||||||
## Contributing
|
|
||||||
|
|
||||||
Contributions are welcome. Please, see the [CONTRIBUTING](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/main/CONTRIBUTING.md)
|
|
||||||
document for details.
|
|
||||||
|
|
||||||
Please note that this project is released with a Contributor Code of Conduct.
|
|
||||||
By participating in this project you agree to abide by its terms. See
|
|
||||||
[Contributor Code of Conduct](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/main/CONTRIBUTING.md#contributor-code-of-conduct)
|
|
||||||
for more information.
|
|
||||||
672
vendor/cloud.google.com/go/iam/apiv1/iampb/iam_policy.pb.go
generated
vendored
672
vendor/cloud.google.com/go/iam/apiv1/iampb/iam_policy.pb.go
generated
vendored
|
|
@ -1,672 +0,0 @@
|
||||||
// Copyright 2022 Google LLC
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
|
||||||
// versions:
|
|
||||||
// protoc-gen-go v1.26.0
|
|
||||||
// protoc v3.21.5
|
|
||||||
// source: google/iam/v1/iam_policy.proto
|
|
||||||
|
|
||||||
package iampb
|
|
||||||
|
|
||||||
import (
|
|
||||||
context "context"
|
|
||||||
reflect "reflect"
|
|
||||||
sync "sync"
|
|
||||||
|
|
||||||
_ "google.golang.org/genproto/googleapis/api/annotations"
|
|
||||||
grpc "google.golang.org/grpc"
|
|
||||||
codes "google.golang.org/grpc/codes"
|
|
||||||
status "google.golang.org/grpc/status"
|
|
||||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
|
||||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
|
||||||
fieldmaskpb "google.golang.org/protobuf/types/known/fieldmaskpb"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
// Verify that this generated code is sufficiently up-to-date.
|
|
||||||
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
|
|
||||||
// Verify that runtime/protoimpl is sufficiently up-to-date.
|
|
||||||
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
|
||||||
)
|
|
||||||
|
|
||||||
// Request message for `SetIamPolicy` method.
|
|
||||||
type SetIamPolicyRequest struct {
|
|
||||||
state protoimpl.MessageState
|
|
||||||
sizeCache protoimpl.SizeCache
|
|
||||||
unknownFields protoimpl.UnknownFields
|
|
||||||
|
|
||||||
// REQUIRED: The resource for which the policy is being specified.
|
|
||||||
// See the operation documentation for the appropriate value for this field.
|
|
||||||
Resource string `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource,omitempty"`
|
|
||||||
// REQUIRED: The complete policy to be applied to the `resource`. The size of
|
|
||||||
// the policy is limited to a few 10s of KB. An empty policy is a
|
|
||||||
// valid policy but certain Cloud Platform services (such as Projects)
|
|
||||||
// might reject them.
|
|
||||||
Policy *Policy `protobuf:"bytes,2,opt,name=policy,proto3" json:"policy,omitempty"`
|
|
||||||
// OPTIONAL: A FieldMask specifying which fields of the policy to modify. Only
|
|
||||||
// the fields in the mask will be modified. If no mask is provided, the
|
|
||||||
// following default mask is used:
|
|
||||||
//
|
|
||||||
// `paths: "bindings, etag"`
|
|
||||||
UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,3,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *SetIamPolicyRequest) Reset() {
|
|
||||||
*x = SetIamPolicyRequest{}
|
|
||||||
if protoimpl.UnsafeEnabled {
|
|
||||||
mi := &file_google_iam_v1_iam_policy_proto_msgTypes[0]
|
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
|
||||||
ms.StoreMessageInfo(mi)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *SetIamPolicyRequest) String() string {
|
|
||||||
return protoimpl.X.MessageStringOf(x)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*SetIamPolicyRequest) ProtoMessage() {}
|
|
||||||
|
|
||||||
func (x *SetIamPolicyRequest) ProtoReflect() protoreflect.Message {
|
|
||||||
mi := &file_google_iam_v1_iam_policy_proto_msgTypes[0]
|
|
||||||
if protoimpl.UnsafeEnabled && x != nil {
|
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
|
||||||
if ms.LoadMessageInfo() == nil {
|
|
||||||
ms.StoreMessageInfo(mi)
|
|
||||||
}
|
|
||||||
return ms
|
|
||||||
}
|
|
||||||
return mi.MessageOf(x)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Deprecated: Use SetIamPolicyRequest.ProtoReflect.Descriptor instead.
|
|
||||||
func (*SetIamPolicyRequest) Descriptor() ([]byte, []int) {
|
|
||||||
return file_google_iam_v1_iam_policy_proto_rawDescGZIP(), []int{0}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *SetIamPolicyRequest) GetResource() string {
|
|
||||||
if x != nil {
|
|
||||||
return x.Resource
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *SetIamPolicyRequest) GetPolicy() *Policy {
|
|
||||||
if x != nil {
|
|
||||||
return x.Policy
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *SetIamPolicyRequest) GetUpdateMask() *fieldmaskpb.FieldMask {
|
|
||||||
if x != nil {
|
|
||||||
return x.UpdateMask
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Request message for `GetIamPolicy` method.
|
|
||||||
type GetIamPolicyRequest struct {
|
|
||||||
state protoimpl.MessageState
|
|
||||||
sizeCache protoimpl.SizeCache
|
|
||||||
unknownFields protoimpl.UnknownFields
|
|
||||||
|
|
||||||
// REQUIRED: The resource for which the policy is being requested.
|
|
||||||
// See the operation documentation for the appropriate value for this field.
|
|
||||||
Resource string `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource,omitempty"`
|
|
||||||
// OPTIONAL: A `GetPolicyOptions` object for specifying options to
|
|
||||||
// `GetIamPolicy`.
|
|
||||||
Options *GetPolicyOptions `protobuf:"bytes,2,opt,name=options,proto3" json:"options,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *GetIamPolicyRequest) Reset() {
|
|
||||||
*x = GetIamPolicyRequest{}
|
|
||||||
if protoimpl.UnsafeEnabled {
|
|
||||||
mi := &file_google_iam_v1_iam_policy_proto_msgTypes[1]
|
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
|
||||||
ms.StoreMessageInfo(mi)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *GetIamPolicyRequest) String() string {
|
|
||||||
return protoimpl.X.MessageStringOf(x)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*GetIamPolicyRequest) ProtoMessage() {}
|
|
||||||
|
|
||||||
func (x *GetIamPolicyRequest) ProtoReflect() protoreflect.Message {
|
|
||||||
mi := &file_google_iam_v1_iam_policy_proto_msgTypes[1]
|
|
||||||
if protoimpl.UnsafeEnabled && x != nil {
|
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
|
||||||
if ms.LoadMessageInfo() == nil {
|
|
||||||
ms.StoreMessageInfo(mi)
|
|
||||||
}
|
|
||||||
return ms
|
|
||||||
}
|
|
||||||
return mi.MessageOf(x)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Deprecated: Use GetIamPolicyRequest.ProtoReflect.Descriptor instead.
|
|
||||||
func (*GetIamPolicyRequest) Descriptor() ([]byte, []int) {
|
|
||||||
return file_google_iam_v1_iam_policy_proto_rawDescGZIP(), []int{1}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *GetIamPolicyRequest) GetResource() string {
|
|
||||||
if x != nil {
|
|
||||||
return x.Resource
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *GetIamPolicyRequest) GetOptions() *GetPolicyOptions {
|
|
||||||
if x != nil {
|
|
||||||
return x.Options
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Request message for `TestIamPermissions` method.
|
|
||||||
type TestIamPermissionsRequest struct {
|
|
||||||
state protoimpl.MessageState
|
|
||||||
sizeCache protoimpl.SizeCache
|
|
||||||
unknownFields protoimpl.UnknownFields
|
|
||||||
|
|
||||||
// REQUIRED: The resource for which the policy detail is being requested.
|
|
||||||
// See the operation documentation for the appropriate value for this field.
|
|
||||||
Resource string `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource,omitempty"`
|
|
||||||
// The set of permissions to check for the `resource`. Permissions with
|
|
||||||
// wildcards (such as '*' or 'storage.*') are not allowed. For more
|
|
||||||
// information see
|
|
||||||
// [IAM Overview](https://cloud.google.com/iam/docs/overview#permissions).
|
|
||||||
Permissions []string `protobuf:"bytes,2,rep,name=permissions,proto3" json:"permissions,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *TestIamPermissionsRequest) Reset() {
|
|
||||||
*x = TestIamPermissionsRequest{}
|
|
||||||
if protoimpl.UnsafeEnabled {
|
|
||||||
mi := &file_google_iam_v1_iam_policy_proto_msgTypes[2]
|
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
|
||||||
ms.StoreMessageInfo(mi)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *TestIamPermissionsRequest) String() string {
|
|
||||||
return protoimpl.X.MessageStringOf(x)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*TestIamPermissionsRequest) ProtoMessage() {}
|
|
||||||
|
|
||||||
func (x *TestIamPermissionsRequest) ProtoReflect() protoreflect.Message {
|
|
||||||
mi := &file_google_iam_v1_iam_policy_proto_msgTypes[2]
|
|
||||||
if protoimpl.UnsafeEnabled && x != nil {
|
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
|
||||||
if ms.LoadMessageInfo() == nil {
|
|
||||||
ms.StoreMessageInfo(mi)
|
|
||||||
}
|
|
||||||
return ms
|
|
||||||
}
|
|
||||||
return mi.MessageOf(x)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Deprecated: Use TestIamPermissionsRequest.ProtoReflect.Descriptor instead.
|
|
||||||
func (*TestIamPermissionsRequest) Descriptor() ([]byte, []int) {
|
|
||||||
return file_google_iam_v1_iam_policy_proto_rawDescGZIP(), []int{2}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *TestIamPermissionsRequest) GetResource() string {
|
|
||||||
if x != nil {
|
|
||||||
return x.Resource
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *TestIamPermissionsRequest) GetPermissions() []string {
|
|
||||||
if x != nil {
|
|
||||||
return x.Permissions
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Response message for `TestIamPermissions` method.
|
|
||||||
type TestIamPermissionsResponse struct {
|
|
||||||
state protoimpl.MessageState
|
|
||||||
sizeCache protoimpl.SizeCache
|
|
||||||
unknownFields protoimpl.UnknownFields
|
|
||||||
|
|
||||||
// A subset of `TestPermissionsRequest.permissions` that the caller is
|
|
||||||
// allowed.
|
|
||||||
Permissions []string `protobuf:"bytes,1,rep,name=permissions,proto3" json:"permissions,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *TestIamPermissionsResponse) Reset() {
|
|
||||||
*x = TestIamPermissionsResponse{}
|
|
||||||
if protoimpl.UnsafeEnabled {
|
|
||||||
mi := &file_google_iam_v1_iam_policy_proto_msgTypes[3]
|
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
|
||||||
ms.StoreMessageInfo(mi)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *TestIamPermissionsResponse) String() string {
|
|
||||||
return protoimpl.X.MessageStringOf(x)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*TestIamPermissionsResponse) ProtoMessage() {}
|
|
||||||
|
|
||||||
func (x *TestIamPermissionsResponse) ProtoReflect() protoreflect.Message {
|
|
||||||
mi := &file_google_iam_v1_iam_policy_proto_msgTypes[3]
|
|
||||||
if protoimpl.UnsafeEnabled && x != nil {
|
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
|
||||||
if ms.LoadMessageInfo() == nil {
|
|
||||||
ms.StoreMessageInfo(mi)
|
|
||||||
}
|
|
||||||
return ms
|
|
||||||
}
|
|
||||||
return mi.MessageOf(x)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Deprecated: Use TestIamPermissionsResponse.ProtoReflect.Descriptor instead.
|
|
||||||
func (*TestIamPermissionsResponse) Descriptor() ([]byte, []int) {
|
|
||||||
return file_google_iam_v1_iam_policy_proto_rawDescGZIP(), []int{3}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *TestIamPermissionsResponse) GetPermissions() []string {
|
|
||||||
if x != nil {
|
|
||||||
return x.Permissions
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var File_google_iam_v1_iam_policy_proto protoreflect.FileDescriptor
|
|
||||||
|
|
||||||
var file_google_iam_v1_iam_policy_proto_rawDesc = []byte{
|
|
||||||
0x0a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x69, 0x61, 0x6d, 0x2f, 0x76, 0x31, 0x2f,
|
|
||||||
0x69, 0x61, 0x6d, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
|
|
||||||
0x12, 0x0d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x1a,
|
|
||||||
0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f,
|
|
||||||
0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x67,
|
|
||||||
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74,
|
|
||||||
0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61,
|
|
||||||
0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f,
|
|
||||||
0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f,
|
|
||||||
0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f,
|
|
||||||
0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x69, 0x61, 0x6d, 0x2f, 0x76,
|
|
||||||
0x31, 0x2f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a,
|
|
||||||
0x1a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x69, 0x61, 0x6d, 0x2f, 0x76, 0x31, 0x2f, 0x70,
|
|
||||||
0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f,
|
|
||||||
0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x66, 0x69, 0x65,
|
|
||||||
0x6c, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xad, 0x01,
|
|
||||||
0x0a, 0x13, 0x53, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65,
|
|
||||||
0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63,
|
|
||||||
0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x09, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x03, 0x0a,
|
|
||||||
0x01, 0x2a, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x32, 0x0a, 0x06,
|
|
||||||
0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x67,
|
|
||||||
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c,
|
|
||||||
0x69, 0x63, 0x79, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79,
|
|
||||||
0x12, 0x3b, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18,
|
|
||||||
0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
|
|
||||||
0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73,
|
|
||||||
0x6b, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x22, 0x77, 0x0a,
|
|
||||||
0x13, 0x47, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71,
|
|
||||||
0x75, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
|
|
||||||
0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x09, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x03, 0x0a, 0x01,
|
|
||||||
0x2a, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x39, 0x0a, 0x07, 0x6f,
|
|
||||||
0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67,
|
|
||||||
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74,
|
|
||||||
0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f,
|
|
||||||
0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x69, 0x0a, 0x19, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61,
|
|
||||||
0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75,
|
|
||||||
0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18,
|
|
||||||
0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x09, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x03, 0x0a, 0x01, 0x2a,
|
|
||||||
0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x25, 0x0a, 0x0b, 0x70, 0x65,
|
|
||||||
0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x42,
|
|
||||||
0x03, 0xe0, 0x41, 0x02, 0x52, 0x0b, 0x70, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e,
|
|
||||||
0x73, 0x22, 0x3e, 0x0a, 0x1a, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d,
|
|
||||||
0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12,
|
|
||||||
0x20, 0x0a, 0x0b, 0x70, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01,
|
|
||||||
0x20, 0x03, 0x28, 0x09, 0x52, 0x0b, 0x70, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e,
|
|
||||||
0x73, 0x32, 0xb4, 0x03, 0x0a, 0x09, 0x49, 0x41, 0x4d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12,
|
|
||||||
0x74, 0x0a, 0x0c, 0x53, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12,
|
|
||||||
0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e,
|
|
||||||
0x53, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75,
|
|
||||||
0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d,
|
|
||||||
0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x29, 0x82, 0xd3, 0xe4, 0x93,
|
|
||||||
0x02, 0x23, 0x22, 0x1e, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63,
|
|
||||||
0x65, 0x3d, 0x2a, 0x2a, 0x7d, 0x3a, 0x73, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69,
|
|
||||||
0x63, 0x79, 0x3a, 0x01, 0x2a, 0x12, 0x74, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50,
|
|
||||||
0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69,
|
|
||||||
0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69,
|
|
||||||
0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
|
|
||||||
0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79,
|
|
||||||
0x22, 0x29, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x23, 0x22, 0x1e, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x72,
|
|
||||||
0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x3d, 0x2a, 0x2a, 0x7d, 0x3a, 0x67, 0x65, 0x74, 0x49,
|
|
||||||
0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x3a, 0x01, 0x2a, 0x12, 0x9a, 0x01, 0x0a, 0x12,
|
|
||||||
0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f,
|
|
||||||
0x6e, 0x73, 0x12, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e,
|
|
||||||
0x76, 0x31, 0x2e, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73,
|
|
||||||
0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x67,
|
|
||||||
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x65, 0x73,
|
|
||||||
0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52,
|
|
||||||
0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2f, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x29, 0x22,
|
|
||||||
0x24, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x3d, 0x2a,
|
|
||||||
0x2a, 0x7d, 0x3a, 0x74, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73,
|
|
||||||
0x73, 0x69, 0x6f, 0x6e, 0x73, 0x3a, 0x01, 0x2a, 0x1a, 0x1e, 0xca, 0x41, 0x1b, 0x69, 0x61, 0x6d,
|
|
||||||
0x2d, 0x6d, 0x65, 0x74, 0x61, 0x2d, 0x61, 0x70, 0x69, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
|
|
||||||
0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x42, 0x86, 0x01, 0x0a, 0x11, 0x63, 0x6f, 0x6d,
|
|
||||||
0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x42, 0x0e,
|
|
||||||
0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01,
|
|
||||||
0x5a, 0x30, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e,
|
|
||||||
0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f,
|
|
||||||
0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x69, 0x61, 0x6d, 0x2f, 0x76, 0x31, 0x3b, 0x69,
|
|
||||||
0x61, 0x6d, 0xf8, 0x01, 0x01, 0xaa, 0x02, 0x13, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43,
|
|
||||||
0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x49, 0x61, 0x6d, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x13, 0x47, 0x6f,
|
|
||||||
0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x49, 0x61, 0x6d, 0x5c, 0x56,
|
|
||||||
0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
file_google_iam_v1_iam_policy_proto_rawDescOnce sync.Once
|
|
||||||
file_google_iam_v1_iam_policy_proto_rawDescData = file_google_iam_v1_iam_policy_proto_rawDesc
|
|
||||||
)
|
|
||||||
|
|
||||||
func file_google_iam_v1_iam_policy_proto_rawDescGZIP() []byte {
|
|
||||||
file_google_iam_v1_iam_policy_proto_rawDescOnce.Do(func() {
|
|
||||||
file_google_iam_v1_iam_policy_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_iam_v1_iam_policy_proto_rawDescData)
|
|
||||||
})
|
|
||||||
return file_google_iam_v1_iam_policy_proto_rawDescData
|
|
||||||
}
|
|
||||||
|
|
||||||
var file_google_iam_v1_iam_policy_proto_msgTypes = make([]protoimpl.MessageInfo, 4)
|
|
||||||
var file_google_iam_v1_iam_policy_proto_goTypes = []interface{}{
|
|
||||||
(*SetIamPolicyRequest)(nil), // 0: google.iam.v1.SetIamPolicyRequest
|
|
||||||
(*GetIamPolicyRequest)(nil), // 1: google.iam.v1.GetIamPolicyRequest
|
|
||||||
(*TestIamPermissionsRequest)(nil), // 2: google.iam.v1.TestIamPermissionsRequest
|
|
||||||
(*TestIamPermissionsResponse)(nil), // 3: google.iam.v1.TestIamPermissionsResponse
|
|
||||||
(*Policy)(nil), // 4: google.iam.v1.Policy
|
|
||||||
(*fieldmaskpb.FieldMask)(nil), // 5: google.protobuf.FieldMask
|
|
||||||
(*GetPolicyOptions)(nil), // 6: google.iam.v1.GetPolicyOptions
|
|
||||||
}
|
|
||||||
var file_google_iam_v1_iam_policy_proto_depIdxs = []int32{
|
|
||||||
4, // 0: google.iam.v1.SetIamPolicyRequest.policy:type_name -> google.iam.v1.Policy
|
|
||||||
5, // 1: google.iam.v1.SetIamPolicyRequest.update_mask:type_name -> google.protobuf.FieldMask
|
|
||||||
6, // 2: google.iam.v1.GetIamPolicyRequest.options:type_name -> google.iam.v1.GetPolicyOptions
|
|
||||||
0, // 3: google.iam.v1.IAMPolicy.SetIamPolicy:input_type -> google.iam.v1.SetIamPolicyRequest
|
|
||||||
1, // 4: google.iam.v1.IAMPolicy.GetIamPolicy:input_type -> google.iam.v1.GetIamPolicyRequest
|
|
||||||
2, // 5: google.iam.v1.IAMPolicy.TestIamPermissions:input_type -> google.iam.v1.TestIamPermissionsRequest
|
|
||||||
4, // 6: google.iam.v1.IAMPolicy.SetIamPolicy:output_type -> google.iam.v1.Policy
|
|
||||||
4, // 7: google.iam.v1.IAMPolicy.GetIamPolicy:output_type -> google.iam.v1.Policy
|
|
||||||
3, // 8: google.iam.v1.IAMPolicy.TestIamPermissions:output_type -> google.iam.v1.TestIamPermissionsResponse
|
|
||||||
6, // [6:9] is the sub-list for method output_type
|
|
||||||
3, // [3:6] is the sub-list for method input_type
|
|
||||||
3, // [3:3] is the sub-list for extension type_name
|
|
||||||
3, // [3:3] is the sub-list for extension extendee
|
|
||||||
0, // [0:3] is the sub-list for field type_name
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() { file_google_iam_v1_iam_policy_proto_init() }
|
|
||||||
func file_google_iam_v1_iam_policy_proto_init() {
|
|
||||||
if File_google_iam_v1_iam_policy_proto != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
file_google_iam_v1_options_proto_init()
|
|
||||||
file_google_iam_v1_policy_proto_init()
|
|
||||||
if !protoimpl.UnsafeEnabled {
|
|
||||||
file_google_iam_v1_iam_policy_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
|
|
||||||
switch v := v.(*SetIamPolicyRequest); i {
|
|
||||||
case 0:
|
|
||||||
return &v.state
|
|
||||||
case 1:
|
|
||||||
return &v.sizeCache
|
|
||||||
case 2:
|
|
||||||
return &v.unknownFields
|
|
||||||
default:
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
file_google_iam_v1_iam_policy_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
|
|
||||||
switch v := v.(*GetIamPolicyRequest); i {
|
|
||||||
case 0:
|
|
||||||
return &v.state
|
|
||||||
case 1:
|
|
||||||
return &v.sizeCache
|
|
||||||
case 2:
|
|
||||||
return &v.unknownFields
|
|
||||||
default:
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
file_google_iam_v1_iam_policy_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
|
|
||||||
switch v := v.(*TestIamPermissionsRequest); i {
|
|
||||||
case 0:
|
|
||||||
return &v.state
|
|
||||||
case 1:
|
|
||||||
return &v.sizeCache
|
|
||||||
case 2:
|
|
||||||
return &v.unknownFields
|
|
||||||
default:
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
file_google_iam_v1_iam_policy_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
|
|
||||||
switch v := v.(*TestIamPermissionsResponse); i {
|
|
||||||
case 0:
|
|
||||||
return &v.state
|
|
||||||
case 1:
|
|
||||||
return &v.sizeCache
|
|
||||||
case 2:
|
|
||||||
return &v.unknownFields
|
|
||||||
default:
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
type x struct{}
|
|
||||||
out := protoimpl.TypeBuilder{
|
|
||||||
File: protoimpl.DescBuilder{
|
|
||||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
|
||||||
RawDescriptor: file_google_iam_v1_iam_policy_proto_rawDesc,
|
|
||||||
NumEnums: 0,
|
|
||||||
NumMessages: 4,
|
|
||||||
NumExtensions: 0,
|
|
||||||
NumServices: 1,
|
|
||||||
},
|
|
||||||
GoTypes: file_google_iam_v1_iam_policy_proto_goTypes,
|
|
||||||
DependencyIndexes: file_google_iam_v1_iam_policy_proto_depIdxs,
|
|
||||||
MessageInfos: file_google_iam_v1_iam_policy_proto_msgTypes,
|
|
||||||
}.Build()
|
|
||||||
File_google_iam_v1_iam_policy_proto = out.File
|
|
||||||
file_google_iam_v1_iam_policy_proto_rawDesc = nil
|
|
||||||
file_google_iam_v1_iam_policy_proto_goTypes = nil
|
|
||||||
file_google_iam_v1_iam_policy_proto_depIdxs = nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reference imports to suppress errors if they are not otherwise used.
|
|
||||||
var _ context.Context
|
|
||||||
var _ grpc.ClientConnInterface
|
|
||||||
|
|
||||||
// This is a compile-time assertion to ensure that this generated file
|
|
||||||
// is compatible with the grpc package it is being compiled against.
|
|
||||||
const _ = grpc.SupportPackageIsVersion6
|
|
||||||
|
|
||||||
// IAMPolicyClient is the client API for IAMPolicy service.
|
|
||||||
//
|
|
||||||
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
|
|
||||||
type IAMPolicyClient interface {
|
|
||||||
// Sets the access control policy on the specified resource. Replaces any
|
|
||||||
// existing policy.
|
|
||||||
//
|
|
||||||
// Can return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors.
|
|
||||||
SetIamPolicy(ctx context.Context, in *SetIamPolicyRequest, opts ...grpc.CallOption) (*Policy, error)
|
|
||||||
// Gets the access control policy for a resource.
|
|
||||||
// Returns an empty policy if the resource exists and does not have a policy
|
|
||||||
// set.
|
|
||||||
GetIamPolicy(ctx context.Context, in *GetIamPolicyRequest, opts ...grpc.CallOption) (*Policy, error)
|
|
||||||
// Returns permissions that a caller has on the specified resource.
|
|
||||||
// If the resource does not exist, this will return an empty set of
|
|
||||||
// permissions, not a `NOT_FOUND` error.
|
|
||||||
//
|
|
||||||
// Note: This operation is designed to be used for building permission-aware
|
|
||||||
// UIs and command-line tools, not for authorization checking. This operation
|
|
||||||
// may "fail open" without warning.
|
|
||||||
TestIamPermissions(ctx context.Context, in *TestIamPermissionsRequest, opts ...grpc.CallOption) (*TestIamPermissionsResponse, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
type iAMPolicyClient struct {
|
|
||||||
cc grpc.ClientConnInterface
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewIAMPolicyClient(cc grpc.ClientConnInterface) IAMPolicyClient {
|
|
||||||
return &iAMPolicyClient{cc}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *iAMPolicyClient) SetIamPolicy(ctx context.Context, in *SetIamPolicyRequest, opts ...grpc.CallOption) (*Policy, error) {
|
|
||||||
out := new(Policy)
|
|
||||||
err := c.cc.Invoke(ctx, "/google.iam.v1.IAMPolicy/SetIamPolicy", in, out, opts...)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return out, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *iAMPolicyClient) GetIamPolicy(ctx context.Context, in *GetIamPolicyRequest, opts ...grpc.CallOption) (*Policy, error) {
|
|
||||||
out := new(Policy)
|
|
||||||
err := c.cc.Invoke(ctx, "/google.iam.v1.IAMPolicy/GetIamPolicy", in, out, opts...)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return out, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *iAMPolicyClient) TestIamPermissions(ctx context.Context, in *TestIamPermissionsRequest, opts ...grpc.CallOption) (*TestIamPermissionsResponse, error) {
|
|
||||||
out := new(TestIamPermissionsResponse)
|
|
||||||
err := c.cc.Invoke(ctx, "/google.iam.v1.IAMPolicy/TestIamPermissions", in, out, opts...)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return out, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// IAMPolicyServer is the server API for IAMPolicy service.
|
|
||||||
type IAMPolicyServer interface {
|
|
||||||
// Sets the access control policy on the specified resource. Replaces any
|
|
||||||
// existing policy.
|
|
||||||
//
|
|
||||||
// Can return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors.
|
|
||||||
SetIamPolicy(context.Context, *SetIamPolicyRequest) (*Policy, error)
|
|
||||||
// Gets the access control policy for a resource.
|
|
||||||
// Returns an empty policy if the resource exists and does not have a policy
|
|
||||||
// set.
|
|
||||||
GetIamPolicy(context.Context, *GetIamPolicyRequest) (*Policy, error)
|
|
||||||
// Returns permissions that a caller has on the specified resource.
|
|
||||||
// If the resource does not exist, this will return an empty set of
|
|
||||||
// permissions, not a `NOT_FOUND` error.
|
|
||||||
//
|
|
||||||
// Note: This operation is designed to be used for building permission-aware
|
|
||||||
// UIs and command-line tools, not for authorization checking. This operation
|
|
||||||
// may "fail open" without warning.
|
|
||||||
TestIamPermissions(context.Context, *TestIamPermissionsRequest) (*TestIamPermissionsResponse, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnimplementedIAMPolicyServer can be embedded to have forward compatible implementations.
|
|
||||||
type UnimplementedIAMPolicyServer struct {
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*UnimplementedIAMPolicyServer) SetIamPolicy(context.Context, *SetIamPolicyRequest) (*Policy, error) {
|
|
||||||
return nil, status.Errorf(codes.Unimplemented, "method SetIamPolicy not implemented")
|
|
||||||
}
|
|
||||||
func (*UnimplementedIAMPolicyServer) GetIamPolicy(context.Context, *GetIamPolicyRequest) (*Policy, error) {
|
|
||||||
return nil, status.Errorf(codes.Unimplemented, "method GetIamPolicy not implemented")
|
|
||||||
}
|
|
||||||
func (*UnimplementedIAMPolicyServer) TestIamPermissions(context.Context, *TestIamPermissionsRequest) (*TestIamPermissionsResponse, error) {
|
|
||||||
return nil, status.Errorf(codes.Unimplemented, "method TestIamPermissions not implemented")
|
|
||||||
}
|
|
||||||
|
|
||||||
func RegisterIAMPolicyServer(s *grpc.Server, srv IAMPolicyServer) {
|
|
||||||
s.RegisterService(&_IAMPolicy_serviceDesc, srv)
|
|
||||||
}
|
|
||||||
|
|
||||||
func _IAMPolicy_SetIamPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
|
||||||
in := new(SetIamPolicyRequest)
|
|
||||||
if err := dec(in); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if interceptor == nil {
|
|
||||||
return srv.(IAMPolicyServer).SetIamPolicy(ctx, in)
|
|
||||||
}
|
|
||||||
info := &grpc.UnaryServerInfo{
|
|
||||||
Server: srv,
|
|
||||||
FullMethod: "/google.iam.v1.IAMPolicy/SetIamPolicy",
|
|
||||||
}
|
|
||||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
|
||||||
return srv.(IAMPolicyServer).SetIamPolicy(ctx, req.(*SetIamPolicyRequest))
|
|
||||||
}
|
|
||||||
return interceptor(ctx, in, info, handler)
|
|
||||||
}
|
|
||||||
|
|
||||||
func _IAMPolicy_GetIamPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
|
||||||
in := new(GetIamPolicyRequest)
|
|
||||||
if err := dec(in); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if interceptor == nil {
|
|
||||||
return srv.(IAMPolicyServer).GetIamPolicy(ctx, in)
|
|
||||||
}
|
|
||||||
info := &grpc.UnaryServerInfo{
|
|
||||||
Server: srv,
|
|
||||||
FullMethod: "/google.iam.v1.IAMPolicy/GetIamPolicy",
|
|
||||||
}
|
|
||||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
|
||||||
return srv.(IAMPolicyServer).GetIamPolicy(ctx, req.(*GetIamPolicyRequest))
|
|
||||||
}
|
|
||||||
return interceptor(ctx, in, info, handler)
|
|
||||||
}
|
|
||||||
|
|
||||||
func _IAMPolicy_TestIamPermissions_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
|
||||||
in := new(TestIamPermissionsRequest)
|
|
||||||
if err := dec(in); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if interceptor == nil {
|
|
||||||
return srv.(IAMPolicyServer).TestIamPermissions(ctx, in)
|
|
||||||
}
|
|
||||||
info := &grpc.UnaryServerInfo{
|
|
||||||
Server: srv,
|
|
||||||
FullMethod: "/google.iam.v1.IAMPolicy/TestIamPermissions",
|
|
||||||
}
|
|
||||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
|
||||||
return srv.(IAMPolicyServer).TestIamPermissions(ctx, req.(*TestIamPermissionsRequest))
|
|
||||||
}
|
|
||||||
return interceptor(ctx, in, info, handler)
|
|
||||||
}
|
|
||||||
|
|
||||||
var _IAMPolicy_serviceDesc = grpc.ServiceDesc{
|
|
||||||
ServiceName: "google.iam.v1.IAMPolicy",
|
|
||||||
HandlerType: (*IAMPolicyServer)(nil),
|
|
||||||
Methods: []grpc.MethodDesc{
|
|
||||||
{
|
|
||||||
MethodName: "SetIamPolicy",
|
|
||||||
Handler: _IAMPolicy_SetIamPolicy_Handler,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
MethodName: "GetIamPolicy",
|
|
||||||
Handler: _IAMPolicy_GetIamPolicy_Handler,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
MethodName: "TestIamPermissions",
|
|
||||||
Handler: _IAMPolicy_TestIamPermissions_Handler,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Streams: []grpc.StreamDesc{},
|
|
||||||
Metadata: "google/iam/v1/iam_policy.proto",
|
|
||||||
}
|
|
||||||
187
vendor/cloud.google.com/go/iam/apiv1/iampb/options.pb.go
generated
vendored
187
vendor/cloud.google.com/go/iam/apiv1/iampb/options.pb.go
generated
vendored
|
|
@ -1,187 +0,0 @@
|
||||||
// Copyright 2022 Google LLC
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
|
||||||
// versions:
|
|
||||||
// protoc-gen-go v1.26.0
|
|
||||||
// protoc v3.21.5
|
|
||||||
// source: google/iam/v1/options.proto
|
|
||||||
|
|
||||||
package iampb
|
|
||||||
|
|
||||||
import (
|
|
||||||
reflect "reflect"
|
|
||||||
sync "sync"
|
|
||||||
|
|
||||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
|
||||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
// Verify that this generated code is sufficiently up-to-date.
|
|
||||||
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
|
|
||||||
// Verify that runtime/protoimpl is sufficiently up-to-date.
|
|
||||||
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
|
||||||
)
|
|
||||||
|
|
||||||
// Encapsulates settings provided to GetIamPolicy.
|
|
||||||
type GetPolicyOptions struct {
|
|
||||||
state protoimpl.MessageState
|
|
||||||
sizeCache protoimpl.SizeCache
|
|
||||||
unknownFields protoimpl.UnknownFields
|
|
||||||
|
|
||||||
// Optional. The maximum policy version that will be used to format the
|
|
||||||
// policy.
|
|
||||||
//
|
|
||||||
// Valid values are 0, 1, and 3. Requests specifying an invalid value will be
|
|
||||||
// rejected.
|
|
||||||
//
|
|
||||||
// Requests for policies with any conditional role bindings must specify
|
|
||||||
// version 3. Policies with no conditional role bindings may specify any valid
|
|
||||||
// value or leave the field unset.
|
|
||||||
//
|
|
||||||
// The policy in the response might use the policy version that you specified,
|
|
||||||
// or it might use a lower policy version. For example, if you specify version
|
|
||||||
// 3, but the policy has no conditional role bindings, the response uses
|
|
||||||
// version 1.
|
|
||||||
//
|
|
||||||
// To learn which resources support conditions in their IAM policies, see the
|
|
||||||
// [IAM
|
|
||||||
// documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
|
|
||||||
RequestedPolicyVersion int32 `protobuf:"varint,1,opt,name=requested_policy_version,json=requestedPolicyVersion,proto3" json:"requested_policy_version,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *GetPolicyOptions) Reset() {
|
|
||||||
*x = GetPolicyOptions{}
|
|
||||||
if protoimpl.UnsafeEnabled {
|
|
||||||
mi := &file_google_iam_v1_options_proto_msgTypes[0]
|
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
|
||||||
ms.StoreMessageInfo(mi)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *GetPolicyOptions) String() string {
|
|
||||||
return protoimpl.X.MessageStringOf(x)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*GetPolicyOptions) ProtoMessage() {}
|
|
||||||
|
|
||||||
func (x *GetPolicyOptions) ProtoReflect() protoreflect.Message {
|
|
||||||
mi := &file_google_iam_v1_options_proto_msgTypes[0]
|
|
||||||
if protoimpl.UnsafeEnabled && x != nil {
|
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
|
||||||
if ms.LoadMessageInfo() == nil {
|
|
||||||
ms.StoreMessageInfo(mi)
|
|
||||||
}
|
|
||||||
return ms
|
|
||||||
}
|
|
||||||
return mi.MessageOf(x)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Deprecated: Use GetPolicyOptions.ProtoReflect.Descriptor instead.
|
|
||||||
func (*GetPolicyOptions) Descriptor() ([]byte, []int) {
|
|
||||||
return file_google_iam_v1_options_proto_rawDescGZIP(), []int{0}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *GetPolicyOptions) GetRequestedPolicyVersion() int32 {
|
|
||||||
if x != nil {
|
|
||||||
return x.RequestedPolicyVersion
|
|
||||||
}
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
var File_google_iam_v1_options_proto protoreflect.FileDescriptor
|
|
||||||
|
|
||||||
var file_google_iam_v1_options_proto_rawDesc = []byte{
|
|
||||||
0x0a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x69, 0x61, 0x6d, 0x2f, 0x76, 0x31, 0x2f,
|
|
||||||
0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0d, 0x67,
|
|
||||||
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x22, 0x4c, 0x0a, 0x10,
|
|
||||||
0x47, 0x65, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
|
|
||||||
0x12, 0x38, 0x0a, 0x18, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x70, 0x6f,
|
|
||||||
0x6c, 0x69, 0x63, 0x79, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01,
|
|
||||||
0x28, 0x05, 0x52, 0x16, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, 0x50, 0x6f, 0x6c,
|
|
||||||
0x69, 0x63, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x84, 0x01, 0x0a, 0x11, 0x63,
|
|
||||||
0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31,
|
|
||||||
0x42, 0x0c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01,
|
|
||||||
0x5a, 0x30, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e,
|
|
||||||
0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f,
|
|
||||||
0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x69, 0x61, 0x6d, 0x2f, 0x76, 0x31, 0x3b, 0x69,
|
|
||||||
0x61, 0x6d, 0xf8, 0x01, 0x01, 0xaa, 0x02, 0x13, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43,
|
|
||||||
0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x49, 0x61, 0x6d, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x13, 0x47, 0x6f,
|
|
||||||
0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x49, 0x61, 0x6d, 0x5c, 0x56,
|
|
||||||
0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
file_google_iam_v1_options_proto_rawDescOnce sync.Once
|
|
||||||
file_google_iam_v1_options_proto_rawDescData = file_google_iam_v1_options_proto_rawDesc
|
|
||||||
)
|
|
||||||
|
|
||||||
func file_google_iam_v1_options_proto_rawDescGZIP() []byte {
|
|
||||||
file_google_iam_v1_options_proto_rawDescOnce.Do(func() {
|
|
||||||
file_google_iam_v1_options_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_iam_v1_options_proto_rawDescData)
|
|
||||||
})
|
|
||||||
return file_google_iam_v1_options_proto_rawDescData
|
|
||||||
}
|
|
||||||
|
|
||||||
var file_google_iam_v1_options_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
|
|
||||||
var file_google_iam_v1_options_proto_goTypes = []interface{}{
|
|
||||||
(*GetPolicyOptions)(nil), // 0: google.iam.v1.GetPolicyOptions
|
|
||||||
}
|
|
||||||
var file_google_iam_v1_options_proto_depIdxs = []int32{
|
|
||||||
0, // [0:0] is the sub-list for method output_type
|
|
||||||
0, // [0:0] is the sub-list for method input_type
|
|
||||||
0, // [0:0] is the sub-list for extension type_name
|
|
||||||
0, // [0:0] is the sub-list for extension extendee
|
|
||||||
0, // [0:0] is the sub-list for field type_name
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() { file_google_iam_v1_options_proto_init() }
|
|
||||||
func file_google_iam_v1_options_proto_init() {
|
|
||||||
if File_google_iam_v1_options_proto != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if !protoimpl.UnsafeEnabled {
|
|
||||||
file_google_iam_v1_options_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
|
|
||||||
switch v := v.(*GetPolicyOptions); i {
|
|
||||||
case 0:
|
|
||||||
return &v.state
|
|
||||||
case 1:
|
|
||||||
return &v.sizeCache
|
|
||||||
case 2:
|
|
||||||
return &v.unknownFields
|
|
||||||
default:
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
type x struct{}
|
|
||||||
out := protoimpl.TypeBuilder{
|
|
||||||
File: protoimpl.DescBuilder{
|
|
||||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
|
||||||
RawDescriptor: file_google_iam_v1_options_proto_rawDesc,
|
|
||||||
NumEnums: 0,
|
|
||||||
NumMessages: 1,
|
|
||||||
NumExtensions: 0,
|
|
||||||
NumServices: 0,
|
|
||||||
},
|
|
||||||
GoTypes: file_google_iam_v1_options_proto_goTypes,
|
|
||||||
DependencyIndexes: file_google_iam_v1_options_proto_depIdxs,
|
|
||||||
MessageInfos: file_google_iam_v1_options_proto_msgTypes,
|
|
||||||
}.Build()
|
|
||||||
File_google_iam_v1_options_proto = out.File
|
|
||||||
file_google_iam_v1_options_proto_rawDesc = nil
|
|
||||||
file_google_iam_v1_options_proto_goTypes = nil
|
|
||||||
file_google_iam_v1_options_proto_depIdxs = nil
|
|
||||||
}
|
|
||||||
1169
vendor/cloud.google.com/go/iam/apiv1/iampb/policy.pb.go
generated
vendored
1169
vendor/cloud.google.com/go/iam/apiv1/iampb/policy.pb.go
generated
vendored
File diff suppressed because it is too large
Load diff
855
vendor/cloud.google.com/go/iam/credentials/apiv1/credentialspb/common.pb.go
generated
vendored
855
vendor/cloud.google.com/go/iam/credentials/apiv1/credentialspb/common.pb.go
generated
vendored
|
|
@ -1,855 +0,0 @@
|
||||||
// Copyright 2020 Google LLC
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
|
||||||
// versions:
|
|
||||||
// protoc-gen-go v1.26.0
|
|
||||||
// protoc v3.21.9
|
|
||||||
// source: google/iam/credentials/v1/common.proto
|
|
||||||
|
|
||||||
package credentialspb
|
|
||||||
|
|
||||||
import (
|
|
||||||
reflect "reflect"
|
|
||||||
sync "sync"
|
|
||||||
|
|
||||||
_ "google.golang.org/genproto/googleapis/api/annotations"
|
|
||||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
|
||||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
|
||||||
durationpb "google.golang.org/protobuf/types/known/durationpb"
|
|
||||||
timestamppb "google.golang.org/protobuf/types/known/timestamppb"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
// Verify that this generated code is sufficiently up-to-date.
|
|
||||||
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
|
|
||||||
// Verify that runtime/protoimpl is sufficiently up-to-date.
|
|
||||||
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
|
||||||
)
|
|
||||||
|
|
||||||
type GenerateAccessTokenRequest struct {
|
|
||||||
state protoimpl.MessageState
|
|
||||||
sizeCache protoimpl.SizeCache
|
|
||||||
unknownFields protoimpl.UnknownFields
|
|
||||||
|
|
||||||
// Required. The resource name of the service account for which the credentials
|
|
||||||
// are requested, in the following format:
|
|
||||||
// `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` wildcard
|
|
||||||
// character is required; replacing it with a project ID is invalid.
|
|
||||||
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
|
|
||||||
// The sequence of service accounts in a delegation chain. Each service
|
|
||||||
// account must be granted the `roles/iam.serviceAccountTokenCreator` role
|
|
||||||
// on its next service account in the chain. The last service account in the
|
|
||||||
// chain must be granted the `roles/iam.serviceAccountTokenCreator` role
|
|
||||||
// on the service account that is specified in the `name` field of the
|
|
||||||
// request.
|
|
||||||
//
|
|
||||||
// The delegates must have the following format:
|
|
||||||
// `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` wildcard
|
|
||||||
// character is required; replacing it with a project ID is invalid.
|
|
||||||
Delegates []string `protobuf:"bytes,2,rep,name=delegates,proto3" json:"delegates,omitempty"`
|
|
||||||
// Required. Code to identify the scopes to be included in the OAuth 2.0 access token.
|
|
||||||
// See https://developers.google.com/identity/protocols/googlescopes for more
|
|
||||||
// information.
|
|
||||||
// At least one value required.
|
|
||||||
Scope []string `protobuf:"bytes,4,rep,name=scope,proto3" json:"scope,omitempty"`
|
|
||||||
// The desired lifetime duration of the access token in seconds.
|
|
||||||
// Must be set to a value less than or equal to 3600 (1 hour). If a value is
|
|
||||||
// not specified, the token's lifetime will be set to a default value of one
|
|
||||||
// hour.
|
|
||||||
Lifetime *durationpb.Duration `protobuf:"bytes,7,opt,name=lifetime,proto3" json:"lifetime,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *GenerateAccessTokenRequest) Reset() {
|
|
||||||
*x = GenerateAccessTokenRequest{}
|
|
||||||
if protoimpl.UnsafeEnabled {
|
|
||||||
mi := &file_google_iam_credentials_v1_common_proto_msgTypes[0]
|
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
|
||||||
ms.StoreMessageInfo(mi)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *GenerateAccessTokenRequest) String() string {
|
|
||||||
return protoimpl.X.MessageStringOf(x)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*GenerateAccessTokenRequest) ProtoMessage() {}
|
|
||||||
|
|
||||||
func (x *GenerateAccessTokenRequest) ProtoReflect() protoreflect.Message {
|
|
||||||
mi := &file_google_iam_credentials_v1_common_proto_msgTypes[0]
|
|
||||||
if protoimpl.UnsafeEnabled && x != nil {
|
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
|
||||||
if ms.LoadMessageInfo() == nil {
|
|
||||||
ms.StoreMessageInfo(mi)
|
|
||||||
}
|
|
||||||
return ms
|
|
||||||
}
|
|
||||||
return mi.MessageOf(x)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Deprecated: Use GenerateAccessTokenRequest.ProtoReflect.Descriptor instead.
|
|
||||||
func (*GenerateAccessTokenRequest) Descriptor() ([]byte, []int) {
|
|
||||||
return file_google_iam_credentials_v1_common_proto_rawDescGZIP(), []int{0}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *GenerateAccessTokenRequest) GetName() string {
|
|
||||||
if x != nil {
|
|
||||||
return x.Name
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *GenerateAccessTokenRequest) GetDelegates() []string {
|
|
||||||
if x != nil {
|
|
||||||
return x.Delegates
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *GenerateAccessTokenRequest) GetScope() []string {
|
|
||||||
if x != nil {
|
|
||||||
return x.Scope
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *GenerateAccessTokenRequest) GetLifetime() *durationpb.Duration {
|
|
||||||
if x != nil {
|
|
||||||
return x.Lifetime
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type GenerateAccessTokenResponse struct {
|
|
||||||
state protoimpl.MessageState
|
|
||||||
sizeCache protoimpl.SizeCache
|
|
||||||
unknownFields protoimpl.UnknownFields
|
|
||||||
|
|
||||||
// The OAuth 2.0 access token.
|
|
||||||
AccessToken string `protobuf:"bytes,1,opt,name=access_token,json=accessToken,proto3" json:"access_token,omitempty"`
|
|
||||||
// Token expiration time.
|
|
||||||
// The expiration time is always set.
|
|
||||||
ExpireTime *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=expire_time,json=expireTime,proto3" json:"expire_time,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *GenerateAccessTokenResponse) Reset() {
|
|
||||||
*x = GenerateAccessTokenResponse{}
|
|
||||||
if protoimpl.UnsafeEnabled {
|
|
||||||
mi := &file_google_iam_credentials_v1_common_proto_msgTypes[1]
|
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
|
||||||
ms.StoreMessageInfo(mi)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *GenerateAccessTokenResponse) String() string {
|
|
||||||
return protoimpl.X.MessageStringOf(x)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*GenerateAccessTokenResponse) ProtoMessage() {}
|
|
||||||
|
|
||||||
func (x *GenerateAccessTokenResponse) ProtoReflect() protoreflect.Message {
|
|
||||||
mi := &file_google_iam_credentials_v1_common_proto_msgTypes[1]
|
|
||||||
if protoimpl.UnsafeEnabled && x != nil {
|
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
|
||||||
if ms.LoadMessageInfo() == nil {
|
|
||||||
ms.StoreMessageInfo(mi)
|
|
||||||
}
|
|
||||||
return ms
|
|
||||||
}
|
|
||||||
return mi.MessageOf(x)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Deprecated: Use GenerateAccessTokenResponse.ProtoReflect.Descriptor instead.
|
|
||||||
func (*GenerateAccessTokenResponse) Descriptor() ([]byte, []int) {
|
|
||||||
return file_google_iam_credentials_v1_common_proto_rawDescGZIP(), []int{1}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *GenerateAccessTokenResponse) GetAccessToken() string {
|
|
||||||
if x != nil {
|
|
||||||
return x.AccessToken
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *GenerateAccessTokenResponse) GetExpireTime() *timestamppb.Timestamp {
|
|
||||||
if x != nil {
|
|
||||||
return x.ExpireTime
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type SignBlobRequest struct {
|
|
||||||
state protoimpl.MessageState
|
|
||||||
sizeCache protoimpl.SizeCache
|
|
||||||
unknownFields protoimpl.UnknownFields
|
|
||||||
|
|
||||||
// Required. The resource name of the service account for which the credentials
|
|
||||||
// are requested, in the following format:
|
|
||||||
// `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` wildcard
|
|
||||||
// character is required; replacing it with a project ID is invalid.
|
|
||||||
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
|
|
||||||
// The sequence of service accounts in a delegation chain. Each service
|
|
||||||
// account must be granted the `roles/iam.serviceAccountTokenCreator` role
|
|
||||||
// on its next service account in the chain. The last service account in the
|
|
||||||
// chain must be granted the `roles/iam.serviceAccountTokenCreator` role
|
|
||||||
// on the service account that is specified in the `name` field of the
|
|
||||||
// request.
|
|
||||||
//
|
|
||||||
// The delegates must have the following format:
|
|
||||||
// `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` wildcard
|
|
||||||
// character is required; replacing it with a project ID is invalid.
|
|
||||||
Delegates []string `protobuf:"bytes,3,rep,name=delegates,proto3" json:"delegates,omitempty"`
|
|
||||||
// Required. The bytes to sign.
|
|
||||||
Payload []byte `protobuf:"bytes,5,opt,name=payload,proto3" json:"payload,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *SignBlobRequest) Reset() {
|
|
||||||
*x = SignBlobRequest{}
|
|
||||||
if protoimpl.UnsafeEnabled {
|
|
||||||
mi := &file_google_iam_credentials_v1_common_proto_msgTypes[2]
|
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
|
||||||
ms.StoreMessageInfo(mi)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *SignBlobRequest) String() string {
|
|
||||||
return protoimpl.X.MessageStringOf(x)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*SignBlobRequest) ProtoMessage() {}
|
|
||||||
|
|
||||||
func (x *SignBlobRequest) ProtoReflect() protoreflect.Message {
|
|
||||||
mi := &file_google_iam_credentials_v1_common_proto_msgTypes[2]
|
|
||||||
if protoimpl.UnsafeEnabled && x != nil {
|
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
|
||||||
if ms.LoadMessageInfo() == nil {
|
|
||||||
ms.StoreMessageInfo(mi)
|
|
||||||
}
|
|
||||||
return ms
|
|
||||||
}
|
|
||||||
return mi.MessageOf(x)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Deprecated: Use SignBlobRequest.ProtoReflect.Descriptor instead.
|
|
||||||
func (*SignBlobRequest) Descriptor() ([]byte, []int) {
|
|
||||||
return file_google_iam_credentials_v1_common_proto_rawDescGZIP(), []int{2}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *SignBlobRequest) GetName() string {
|
|
||||||
if x != nil {
|
|
||||||
return x.Name
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *SignBlobRequest) GetDelegates() []string {
|
|
||||||
if x != nil {
|
|
||||||
return x.Delegates
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *SignBlobRequest) GetPayload() []byte {
|
|
||||||
if x != nil {
|
|
||||||
return x.Payload
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type SignBlobResponse struct {
|
|
||||||
state protoimpl.MessageState
|
|
||||||
sizeCache protoimpl.SizeCache
|
|
||||||
unknownFields protoimpl.UnknownFields
|
|
||||||
|
|
||||||
// The ID of the key used to sign the blob.
|
|
||||||
KeyId string `protobuf:"bytes,1,opt,name=key_id,json=keyId,proto3" json:"key_id,omitempty"`
|
|
||||||
// The signed blob.
|
|
||||||
SignedBlob []byte `protobuf:"bytes,4,opt,name=signed_blob,json=signedBlob,proto3" json:"signed_blob,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *SignBlobResponse) Reset() {
|
|
||||||
*x = SignBlobResponse{}
|
|
||||||
if protoimpl.UnsafeEnabled {
|
|
||||||
mi := &file_google_iam_credentials_v1_common_proto_msgTypes[3]
|
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
|
||||||
ms.StoreMessageInfo(mi)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *SignBlobResponse) String() string {
|
|
||||||
return protoimpl.X.MessageStringOf(x)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*SignBlobResponse) ProtoMessage() {}
|
|
||||||
|
|
||||||
func (x *SignBlobResponse) ProtoReflect() protoreflect.Message {
|
|
||||||
mi := &file_google_iam_credentials_v1_common_proto_msgTypes[3]
|
|
||||||
if protoimpl.UnsafeEnabled && x != nil {
|
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
|
||||||
if ms.LoadMessageInfo() == nil {
|
|
||||||
ms.StoreMessageInfo(mi)
|
|
||||||
}
|
|
||||||
return ms
|
|
||||||
}
|
|
||||||
return mi.MessageOf(x)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Deprecated: Use SignBlobResponse.ProtoReflect.Descriptor instead.
|
|
||||||
func (*SignBlobResponse) Descriptor() ([]byte, []int) {
|
|
||||||
return file_google_iam_credentials_v1_common_proto_rawDescGZIP(), []int{3}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *SignBlobResponse) GetKeyId() string {
|
|
||||||
if x != nil {
|
|
||||||
return x.KeyId
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *SignBlobResponse) GetSignedBlob() []byte {
|
|
||||||
if x != nil {
|
|
||||||
return x.SignedBlob
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type SignJwtRequest struct {
|
|
||||||
state protoimpl.MessageState
|
|
||||||
sizeCache protoimpl.SizeCache
|
|
||||||
unknownFields protoimpl.UnknownFields
|
|
||||||
|
|
||||||
// Required. The resource name of the service account for which the credentials
|
|
||||||
// are requested, in the following format:
|
|
||||||
// `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` wildcard
|
|
||||||
// character is required; replacing it with a project ID is invalid.
|
|
||||||
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
|
|
||||||
// The sequence of service accounts in a delegation chain. Each service
|
|
||||||
// account must be granted the `roles/iam.serviceAccountTokenCreator` role
|
|
||||||
// on its next service account in the chain. The last service account in the
|
|
||||||
// chain must be granted the `roles/iam.serviceAccountTokenCreator` role
|
|
||||||
// on the service account that is specified in the `name` field of the
|
|
||||||
// request.
|
|
||||||
//
|
|
||||||
// The delegates must have the following format:
|
|
||||||
// `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` wildcard
|
|
||||||
// character is required; replacing it with a project ID is invalid.
|
|
||||||
Delegates []string `protobuf:"bytes,3,rep,name=delegates,proto3" json:"delegates,omitempty"`
|
|
||||||
// Required. The JWT payload to sign: a JSON object that contains a JWT Claims Set.
|
|
||||||
Payload string `protobuf:"bytes,5,opt,name=payload,proto3" json:"payload,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *SignJwtRequest) Reset() {
|
|
||||||
*x = SignJwtRequest{}
|
|
||||||
if protoimpl.UnsafeEnabled {
|
|
||||||
mi := &file_google_iam_credentials_v1_common_proto_msgTypes[4]
|
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
|
||||||
ms.StoreMessageInfo(mi)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *SignJwtRequest) String() string {
|
|
||||||
return protoimpl.X.MessageStringOf(x)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*SignJwtRequest) ProtoMessage() {}
|
|
||||||
|
|
||||||
func (x *SignJwtRequest) ProtoReflect() protoreflect.Message {
|
|
||||||
mi := &file_google_iam_credentials_v1_common_proto_msgTypes[4]
|
|
||||||
if protoimpl.UnsafeEnabled && x != nil {
|
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
|
||||||
if ms.LoadMessageInfo() == nil {
|
|
||||||
ms.StoreMessageInfo(mi)
|
|
||||||
}
|
|
||||||
return ms
|
|
||||||
}
|
|
||||||
return mi.MessageOf(x)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Deprecated: Use SignJwtRequest.ProtoReflect.Descriptor instead.
|
|
||||||
func (*SignJwtRequest) Descriptor() ([]byte, []int) {
|
|
||||||
return file_google_iam_credentials_v1_common_proto_rawDescGZIP(), []int{4}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *SignJwtRequest) GetName() string {
|
|
||||||
if x != nil {
|
|
||||||
return x.Name
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *SignJwtRequest) GetDelegates() []string {
|
|
||||||
if x != nil {
|
|
||||||
return x.Delegates
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *SignJwtRequest) GetPayload() string {
|
|
||||||
if x != nil {
|
|
||||||
return x.Payload
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
type SignJwtResponse struct {
|
|
||||||
state protoimpl.MessageState
|
|
||||||
sizeCache protoimpl.SizeCache
|
|
||||||
unknownFields protoimpl.UnknownFields
|
|
||||||
|
|
||||||
// The ID of the key used to sign the JWT.
|
|
||||||
KeyId string `protobuf:"bytes,1,opt,name=key_id,json=keyId,proto3" json:"key_id,omitempty"`
|
|
||||||
// The signed JWT.
|
|
||||||
SignedJwt string `protobuf:"bytes,2,opt,name=signed_jwt,json=signedJwt,proto3" json:"signed_jwt,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *SignJwtResponse) Reset() {
|
|
||||||
*x = SignJwtResponse{}
|
|
||||||
if protoimpl.UnsafeEnabled {
|
|
||||||
mi := &file_google_iam_credentials_v1_common_proto_msgTypes[5]
|
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
|
||||||
ms.StoreMessageInfo(mi)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *SignJwtResponse) String() string {
|
|
||||||
return protoimpl.X.MessageStringOf(x)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*SignJwtResponse) ProtoMessage() {}
|
|
||||||
|
|
||||||
func (x *SignJwtResponse) ProtoReflect() protoreflect.Message {
|
|
||||||
mi := &file_google_iam_credentials_v1_common_proto_msgTypes[5]
|
|
||||||
if protoimpl.UnsafeEnabled && x != nil {
|
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
|
||||||
if ms.LoadMessageInfo() == nil {
|
|
||||||
ms.StoreMessageInfo(mi)
|
|
||||||
}
|
|
||||||
return ms
|
|
||||||
}
|
|
||||||
return mi.MessageOf(x)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Deprecated: Use SignJwtResponse.ProtoReflect.Descriptor instead.
|
|
||||||
func (*SignJwtResponse) Descriptor() ([]byte, []int) {
|
|
||||||
return file_google_iam_credentials_v1_common_proto_rawDescGZIP(), []int{5}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *SignJwtResponse) GetKeyId() string {
|
|
||||||
if x != nil {
|
|
||||||
return x.KeyId
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *SignJwtResponse) GetSignedJwt() string {
|
|
||||||
if x != nil {
|
|
||||||
return x.SignedJwt
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
type GenerateIdTokenRequest struct {
|
|
||||||
state protoimpl.MessageState
|
|
||||||
sizeCache protoimpl.SizeCache
|
|
||||||
unknownFields protoimpl.UnknownFields
|
|
||||||
|
|
||||||
// Required. The resource name of the service account for which the credentials
|
|
||||||
// are requested, in the following format:
|
|
||||||
// `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` wildcard
|
|
||||||
// character is required; replacing it with a project ID is invalid.
|
|
||||||
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
|
|
||||||
// The sequence of service accounts in a delegation chain. Each service
|
|
||||||
// account must be granted the `roles/iam.serviceAccountTokenCreator` role
|
|
||||||
// on its next service account in the chain. The last service account in the
|
|
||||||
// chain must be granted the `roles/iam.serviceAccountTokenCreator` role
|
|
||||||
// on the service account that is specified in the `name` field of the
|
|
||||||
// request.
|
|
||||||
//
|
|
||||||
// The delegates must have the following format:
|
|
||||||
// `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` wildcard
|
|
||||||
// character is required; replacing it with a project ID is invalid.
|
|
||||||
Delegates []string `protobuf:"bytes,2,rep,name=delegates,proto3" json:"delegates,omitempty"`
|
|
||||||
// Required. The audience for the token, such as the API or account that this token
|
|
||||||
// grants access to.
|
|
||||||
Audience string `protobuf:"bytes,3,opt,name=audience,proto3" json:"audience,omitempty"`
|
|
||||||
// Include the service account email in the token. If set to `true`, the
|
|
||||||
// token will contain `email` and `email_verified` claims.
|
|
||||||
IncludeEmail bool `protobuf:"varint,4,opt,name=include_email,json=includeEmail,proto3" json:"include_email,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *GenerateIdTokenRequest) Reset() {
|
|
||||||
*x = GenerateIdTokenRequest{}
|
|
||||||
if protoimpl.UnsafeEnabled {
|
|
||||||
mi := &file_google_iam_credentials_v1_common_proto_msgTypes[6]
|
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
|
||||||
ms.StoreMessageInfo(mi)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *GenerateIdTokenRequest) String() string {
|
|
||||||
return protoimpl.X.MessageStringOf(x)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*GenerateIdTokenRequest) ProtoMessage() {}
|
|
||||||
|
|
||||||
func (x *GenerateIdTokenRequest) ProtoReflect() protoreflect.Message {
|
|
||||||
mi := &file_google_iam_credentials_v1_common_proto_msgTypes[6]
|
|
||||||
if protoimpl.UnsafeEnabled && x != nil {
|
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
|
||||||
if ms.LoadMessageInfo() == nil {
|
|
||||||
ms.StoreMessageInfo(mi)
|
|
||||||
}
|
|
||||||
return ms
|
|
||||||
}
|
|
||||||
return mi.MessageOf(x)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Deprecated: Use GenerateIdTokenRequest.ProtoReflect.Descriptor instead.
|
|
||||||
func (*GenerateIdTokenRequest) Descriptor() ([]byte, []int) {
|
|
||||||
return file_google_iam_credentials_v1_common_proto_rawDescGZIP(), []int{6}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *GenerateIdTokenRequest) GetName() string {
|
|
||||||
if x != nil {
|
|
||||||
return x.Name
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *GenerateIdTokenRequest) GetDelegates() []string {
|
|
||||||
if x != nil {
|
|
||||||
return x.Delegates
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *GenerateIdTokenRequest) GetAudience() string {
|
|
||||||
if x != nil {
|
|
||||||
return x.Audience
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *GenerateIdTokenRequest) GetIncludeEmail() bool {
|
|
||||||
if x != nil {
|
|
||||||
return x.IncludeEmail
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
type GenerateIdTokenResponse struct {
|
|
||||||
state protoimpl.MessageState
|
|
||||||
sizeCache protoimpl.SizeCache
|
|
||||||
unknownFields protoimpl.UnknownFields
|
|
||||||
|
|
||||||
// The OpenId Connect ID token.
|
|
||||||
Token string `protobuf:"bytes,1,opt,name=token,proto3" json:"token,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *GenerateIdTokenResponse) Reset() {
|
|
||||||
*x = GenerateIdTokenResponse{}
|
|
||||||
if protoimpl.UnsafeEnabled {
|
|
||||||
mi := &file_google_iam_credentials_v1_common_proto_msgTypes[7]
|
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
|
||||||
ms.StoreMessageInfo(mi)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *GenerateIdTokenResponse) String() string {
|
|
||||||
return protoimpl.X.MessageStringOf(x)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*GenerateIdTokenResponse) ProtoMessage() {}
|
|
||||||
|
|
||||||
func (x *GenerateIdTokenResponse) ProtoReflect() protoreflect.Message {
|
|
||||||
mi := &file_google_iam_credentials_v1_common_proto_msgTypes[7]
|
|
||||||
if protoimpl.UnsafeEnabled && x != nil {
|
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
|
||||||
if ms.LoadMessageInfo() == nil {
|
|
||||||
ms.StoreMessageInfo(mi)
|
|
||||||
}
|
|
||||||
return ms
|
|
||||||
}
|
|
||||||
return mi.MessageOf(x)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Deprecated: Use GenerateIdTokenResponse.ProtoReflect.Descriptor instead.
|
|
||||||
func (*GenerateIdTokenResponse) Descriptor() ([]byte, []int) {
|
|
||||||
return file_google_iam_credentials_v1_common_proto_rawDescGZIP(), []int{7}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *GenerateIdTokenResponse) GetToken() string {
|
|
||||||
if x != nil {
|
|
||||||
return x.Token
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
var File_google_iam_credentials_v1_common_proto protoreflect.FileDescriptor
|
|
||||||
|
|
||||||
var file_google_iam_credentials_v1_common_proto_rawDesc = []byte{
|
|
||||||
0x0a, 0x26, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x69, 0x61, 0x6d, 0x2f, 0x63, 0x72, 0x65,
|
|
||||||
0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x2f, 0x76, 0x31, 0x2f, 0x63, 0x6f, 0x6d, 0x6d,
|
|
||||||
0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
|
|
||||||
0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73,
|
|
||||||
0x2e, 0x76, 0x31, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f,
|
|
||||||
0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x2e, 0x70,
|
|
||||||
0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69,
|
|
||||||
0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a,
|
|
||||||
0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
|
|
||||||
0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a,
|
|
||||||
0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
|
|
||||||
0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
|
|
||||||
0x22, 0xcb, 0x01, 0x0a, 0x1a, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x41, 0x63, 0x63,
|
|
||||||
0x65, 0x73, 0x73, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
|
|
||||||
0x3d, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x29, 0xe0,
|
|
||||||
0x41, 0x02, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x69, 0x61, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
|
|
||||||
0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63,
|
|
||||||
0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1c,
|
|
||||||
0x0a, 0x09, 0x64, 0x65, 0x6c, 0x65, 0x67, 0x61, 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28,
|
|
||||||
0x09, 0x52, 0x09, 0x64, 0x65, 0x6c, 0x65, 0x67, 0x61, 0x74, 0x65, 0x73, 0x12, 0x19, 0x0a, 0x05,
|
|
||||||
0x73, 0x63, 0x6f, 0x70, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02,
|
|
||||||
0x52, 0x05, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x12, 0x35, 0x0a, 0x08, 0x6c, 0x69, 0x66, 0x65, 0x74,
|
|
||||||
0x69, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
|
|
||||||
0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61,
|
|
||||||
0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x6c, 0x69, 0x66, 0x65, 0x74, 0x69, 0x6d, 0x65, 0x22, 0x7d,
|
|
||||||
0x0a, 0x1b, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73,
|
|
||||||
0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x21, 0x0a,
|
|
||||||
0x0c, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x01, 0x20,
|
|
||||||
0x01, 0x28, 0x09, 0x52, 0x0b, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x54, 0x6f, 0x6b, 0x65, 0x6e,
|
|
||||||
0x12, 0x3b, 0x0a, 0x0b, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18,
|
|
||||||
0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
|
|
||||||
0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d,
|
|
||||||
0x70, 0x52, 0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x22, 0x8d, 0x01,
|
|
||||||
0x0a, 0x0f, 0x53, 0x69, 0x67, 0x6e, 0x42, 0x6c, 0x6f, 0x62, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
|
|
||||||
0x74, 0x12, 0x3d, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42,
|
|
||||||
0x29, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x69, 0x61, 0x6d, 0x2e, 0x67, 0x6f, 0x6f,
|
|
||||||
0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x65, 0x72, 0x76,
|
|
||||||
0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65,
|
|
||||||
0x12, 0x1c, 0x0a, 0x09, 0x64, 0x65, 0x6c, 0x65, 0x67, 0x61, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20,
|
|
||||||
0x03, 0x28, 0x09, 0x52, 0x09, 0x64, 0x65, 0x6c, 0x65, 0x67, 0x61, 0x74, 0x65, 0x73, 0x12, 0x1d,
|
|
||||||
0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x42,
|
|
||||||
0x03, 0xe0, 0x41, 0x02, 0x52, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x22, 0x4a, 0x0a,
|
|
||||||
0x10, 0x53, 0x69, 0x67, 0x6e, 0x42, 0x6c, 0x6f, 0x62, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
|
|
||||||
0x65, 0x12, 0x15, 0x0a, 0x06, 0x6b, 0x65, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28,
|
|
||||||
0x09, 0x52, 0x05, 0x6b, 0x65, 0x79, 0x49, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x69, 0x67, 0x6e,
|
|
||||||
0x65, 0x64, 0x5f, 0x62, 0x6c, 0x6f, 0x62, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0a, 0x73,
|
|
||||||
0x69, 0x67, 0x6e, 0x65, 0x64, 0x42, 0x6c, 0x6f, 0x62, 0x22, 0x8c, 0x01, 0x0a, 0x0e, 0x53, 0x69,
|
|
||||||
0x67, 0x6e, 0x4a, 0x77, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x04,
|
|
||||||
0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x29, 0xe0, 0x41, 0x02, 0xfa,
|
|
||||||
0x41, 0x23, 0x0a, 0x21, 0x69, 0x61, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70,
|
|
||||||
0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63,
|
|
||||||
0x63, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x64,
|
|
||||||
0x65, 0x6c, 0x65, 0x67, 0x61, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x09,
|
|
||||||
0x64, 0x65, 0x6c, 0x65, 0x67, 0x61, 0x74, 0x65, 0x73, 0x12, 0x1d, 0x0a, 0x07, 0x70, 0x61, 0x79,
|
|
||||||
0x6c, 0x6f, 0x61, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52,
|
|
||||||
0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x22, 0x47, 0x0a, 0x0f, 0x53, 0x69, 0x67, 0x6e,
|
|
||||||
0x4a, 0x77, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x15, 0x0a, 0x06, 0x6b,
|
|
||||||
0x65, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x6b, 0x65, 0x79,
|
|
||||||
0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x5f, 0x6a, 0x77, 0x74,
|
|
||||||
0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x4a, 0x77,
|
|
||||||
0x74, 0x22, 0xbb, 0x01, 0x0a, 0x16, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x49, 0x64,
|
|
||||||
0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x04,
|
|
||||||
0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x29, 0xe0, 0x41, 0x02, 0xfa,
|
|
||||||
0x41, 0x23, 0x0a, 0x21, 0x69, 0x61, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70,
|
|
||||||
0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63,
|
|
||||||
0x63, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x64,
|
|
||||||
0x65, 0x6c, 0x65, 0x67, 0x61, 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x09,
|
|
||||||
0x64, 0x65, 0x6c, 0x65, 0x67, 0x61, 0x74, 0x65, 0x73, 0x12, 0x1f, 0x0a, 0x08, 0x61, 0x75, 0x64,
|
|
||||||
0x69, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02,
|
|
||||||
0x52, 0x08, 0x61, 0x75, 0x64, 0x69, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x69, 0x6e,
|
|
||||||
0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28,
|
|
||||||
0x08, 0x52, 0x0c, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x45, 0x6d, 0x61, 0x69, 0x6c, 0x22,
|
|
||||||
0x2f, 0x0a, 0x17, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x49, 0x64, 0x54, 0x6f, 0x6b,
|
|
||||||
0x65, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f,
|
|
||||||
0x6b, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e,
|
|
||||||
0x42, 0xab, 0x02, 0x0a, 0x23, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
|
|
||||||
0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e,
|
|
||||||
0x74, 0x69, 0x61, 0x6c, 0x73, 0x2e, 0x76, 0x31, 0x42, 0x19, 0x49, 0x41, 0x4d, 0x43, 0x72, 0x65,
|
|
||||||
0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x50, 0x72,
|
|
||||||
0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x44, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f,
|
|
||||||
0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74,
|
|
||||||
0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x69, 0x61, 0x6d,
|
|
||||||
0x2f, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x2f, 0x76, 0x31, 0x3b,
|
|
||||||
0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0xf8, 0x01, 0x01, 0xaa, 0x02,
|
|
||||||
0x1f, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x49, 0x61,
|
|
||||||
0x6d, 0x2e, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x2e, 0x56, 0x31,
|
|
||||||
0xca, 0x02, 0x1f, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c,
|
|
||||||
0x49, 0x61, 0x6d, 0x5c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x5c,
|
|
||||||
0x56, 0x31, 0xea, 0x41, 0x59, 0x0a, 0x21, 0x69, 0x61, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
|
|
||||||
0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63,
|
|
||||||
0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x34, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63,
|
|
||||||
0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x73, 0x65, 0x72,
|
|
||||||
0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x2f, 0x7b, 0x73, 0x65,
|
|
||||||
0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x7d, 0x62, 0x06,
|
|
||||||
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
file_google_iam_credentials_v1_common_proto_rawDescOnce sync.Once
|
|
||||||
file_google_iam_credentials_v1_common_proto_rawDescData = file_google_iam_credentials_v1_common_proto_rawDesc
|
|
||||||
)
|
|
||||||
|
|
||||||
func file_google_iam_credentials_v1_common_proto_rawDescGZIP() []byte {
|
|
||||||
file_google_iam_credentials_v1_common_proto_rawDescOnce.Do(func() {
|
|
||||||
file_google_iam_credentials_v1_common_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_iam_credentials_v1_common_proto_rawDescData)
|
|
||||||
})
|
|
||||||
return file_google_iam_credentials_v1_common_proto_rawDescData
|
|
||||||
}
|
|
||||||
|
|
||||||
var file_google_iam_credentials_v1_common_proto_msgTypes = make([]protoimpl.MessageInfo, 8)
|
|
||||||
var file_google_iam_credentials_v1_common_proto_goTypes = []interface{}{
|
|
||||||
(*GenerateAccessTokenRequest)(nil), // 0: google.iam.credentials.v1.GenerateAccessTokenRequest
|
|
||||||
(*GenerateAccessTokenResponse)(nil), // 1: google.iam.credentials.v1.GenerateAccessTokenResponse
|
|
||||||
(*SignBlobRequest)(nil), // 2: google.iam.credentials.v1.SignBlobRequest
|
|
||||||
(*SignBlobResponse)(nil), // 3: google.iam.credentials.v1.SignBlobResponse
|
|
||||||
(*SignJwtRequest)(nil), // 4: google.iam.credentials.v1.SignJwtRequest
|
|
||||||
(*SignJwtResponse)(nil), // 5: google.iam.credentials.v1.SignJwtResponse
|
|
||||||
(*GenerateIdTokenRequest)(nil), // 6: google.iam.credentials.v1.GenerateIdTokenRequest
|
|
||||||
(*GenerateIdTokenResponse)(nil), // 7: google.iam.credentials.v1.GenerateIdTokenResponse
|
|
||||||
(*durationpb.Duration)(nil), // 8: google.protobuf.Duration
|
|
||||||
(*timestamppb.Timestamp)(nil), // 9: google.protobuf.Timestamp
|
|
||||||
}
|
|
||||||
var file_google_iam_credentials_v1_common_proto_depIdxs = []int32{
|
|
||||||
8, // 0: google.iam.credentials.v1.GenerateAccessTokenRequest.lifetime:type_name -> google.protobuf.Duration
|
|
||||||
9, // 1: google.iam.credentials.v1.GenerateAccessTokenResponse.expire_time:type_name -> google.protobuf.Timestamp
|
|
||||||
2, // [2:2] is the sub-list for method output_type
|
|
||||||
2, // [2:2] is the sub-list for method input_type
|
|
||||||
2, // [2:2] is the sub-list for extension type_name
|
|
||||||
2, // [2:2] is the sub-list for extension extendee
|
|
||||||
0, // [0:2] is the sub-list for field type_name
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() { file_google_iam_credentials_v1_common_proto_init() }
|
|
||||||
func file_google_iam_credentials_v1_common_proto_init() {
|
|
||||||
if File_google_iam_credentials_v1_common_proto != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if !protoimpl.UnsafeEnabled {
|
|
||||||
file_google_iam_credentials_v1_common_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
|
|
||||||
switch v := v.(*GenerateAccessTokenRequest); i {
|
|
||||||
case 0:
|
|
||||||
return &v.state
|
|
||||||
case 1:
|
|
||||||
return &v.sizeCache
|
|
||||||
case 2:
|
|
||||||
return &v.unknownFields
|
|
||||||
default:
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
file_google_iam_credentials_v1_common_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
|
|
||||||
switch v := v.(*GenerateAccessTokenResponse); i {
|
|
||||||
case 0:
|
|
||||||
return &v.state
|
|
||||||
case 1:
|
|
||||||
return &v.sizeCache
|
|
||||||
case 2:
|
|
||||||
return &v.unknownFields
|
|
||||||
default:
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
file_google_iam_credentials_v1_common_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
|
|
||||||
switch v := v.(*SignBlobRequest); i {
|
|
||||||
case 0:
|
|
||||||
return &v.state
|
|
||||||
case 1:
|
|
||||||
return &v.sizeCache
|
|
||||||
case 2:
|
|
||||||
return &v.unknownFields
|
|
||||||
default:
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
file_google_iam_credentials_v1_common_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
|
|
||||||
switch v := v.(*SignBlobResponse); i {
|
|
||||||
case 0:
|
|
||||||
return &v.state
|
|
||||||
case 1:
|
|
||||||
return &v.sizeCache
|
|
||||||
case 2:
|
|
||||||
return &v.unknownFields
|
|
||||||
default:
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
file_google_iam_credentials_v1_common_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
|
|
||||||
switch v := v.(*SignJwtRequest); i {
|
|
||||||
case 0:
|
|
||||||
return &v.state
|
|
||||||
case 1:
|
|
||||||
return &v.sizeCache
|
|
||||||
case 2:
|
|
||||||
return &v.unknownFields
|
|
||||||
default:
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
file_google_iam_credentials_v1_common_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
|
|
||||||
switch v := v.(*SignJwtResponse); i {
|
|
||||||
case 0:
|
|
||||||
return &v.state
|
|
||||||
case 1:
|
|
||||||
return &v.sizeCache
|
|
||||||
case 2:
|
|
||||||
return &v.unknownFields
|
|
||||||
default:
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
file_google_iam_credentials_v1_common_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
|
|
||||||
switch v := v.(*GenerateIdTokenRequest); i {
|
|
||||||
case 0:
|
|
||||||
return &v.state
|
|
||||||
case 1:
|
|
||||||
return &v.sizeCache
|
|
||||||
case 2:
|
|
||||||
return &v.unknownFields
|
|
||||||
default:
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
file_google_iam_credentials_v1_common_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
|
|
||||||
switch v := v.(*GenerateIdTokenResponse); i {
|
|
||||||
case 0:
|
|
||||||
return &v.state
|
|
||||||
case 1:
|
|
||||||
return &v.sizeCache
|
|
||||||
case 2:
|
|
||||||
return &v.unknownFields
|
|
||||||
default:
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
type x struct{}
|
|
||||||
out := protoimpl.TypeBuilder{
|
|
||||||
File: protoimpl.DescBuilder{
|
|
||||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
|
||||||
RawDescriptor: file_google_iam_credentials_v1_common_proto_rawDesc,
|
|
||||||
NumEnums: 0,
|
|
||||||
NumMessages: 8,
|
|
||||||
NumExtensions: 0,
|
|
||||||
NumServices: 0,
|
|
||||||
},
|
|
||||||
GoTypes: file_google_iam_credentials_v1_common_proto_goTypes,
|
|
||||||
DependencyIndexes: file_google_iam_credentials_v1_common_proto_depIdxs,
|
|
||||||
MessageInfos: file_google_iam_credentials_v1_common_proto_msgTypes,
|
|
||||||
}.Build()
|
|
||||||
File_google_iam_credentials_v1_common_proto = out.File
|
|
||||||
file_google_iam_credentials_v1_common_proto_rawDesc = nil
|
|
||||||
file_google_iam_credentials_v1_common_proto_goTypes = nil
|
|
||||||
file_google_iam_credentials_v1_common_proto_depIdxs = nil
|
|
||||||
}
|
|
||||||
375
vendor/cloud.google.com/go/iam/credentials/apiv1/credentialspb/iamcredentials.pb.go
generated
vendored
375
vendor/cloud.google.com/go/iam/credentials/apiv1/credentialspb/iamcredentials.pb.go
generated
vendored
|
|
@ -1,375 +0,0 @@
|
||||||
// Copyright 2020 Google LLC
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
|
||||||
// versions:
|
|
||||||
// protoc-gen-go v1.26.0
|
|
||||||
// protoc v3.21.9
|
|
||||||
// source: google/iam/credentials/v1/iamcredentials.proto
|
|
||||||
|
|
||||||
package credentialspb
|
|
||||||
|
|
||||||
import (
|
|
||||||
context "context"
|
|
||||||
reflect "reflect"
|
|
||||||
|
|
||||||
_ "google.golang.org/genproto/googleapis/api/annotations"
|
|
||||||
grpc "google.golang.org/grpc"
|
|
||||||
codes "google.golang.org/grpc/codes"
|
|
||||||
status "google.golang.org/grpc/status"
|
|
||||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
|
||||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
// Verify that this generated code is sufficiently up-to-date.
|
|
||||||
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
|
|
||||||
// Verify that runtime/protoimpl is sufficiently up-to-date.
|
|
||||||
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
|
||||||
)
|
|
||||||
|
|
||||||
var File_google_iam_credentials_v1_iamcredentials_proto protoreflect.FileDescriptor
|
|
||||||
|
|
||||||
var file_google_iam_credentials_v1_iamcredentials_proto_rawDesc = []byte{
|
|
||||||
0x0a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x69, 0x61, 0x6d, 0x2f, 0x63, 0x72, 0x65,
|
|
||||||
0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x2f, 0x76, 0x31, 0x2f, 0x69, 0x61, 0x6d, 0x63,
|
|
||||||
0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
|
|
||||||
0x12, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x63, 0x72, 0x65,
|
|
||||||
0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x2e, 0x76, 0x31, 0x1a, 0x1c, 0x67, 0x6f, 0x6f,
|
|
||||||
0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69,
|
|
||||||
0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
|
|
||||||
0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f,
|
|
||||||
0x74, 0x6f, 0x1a, 0x26, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x69, 0x61, 0x6d, 0x2f, 0x63,
|
|
||||||
0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x2f, 0x76, 0x31, 0x2f, 0x63, 0x6f,
|
|
||||||
0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x32, 0xad, 0x07, 0x0a, 0x0e, 0x49,
|
|
||||||
0x41, 0x4d, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x12, 0xec, 0x01,
|
|
||||||
0x0a, 0x13, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73,
|
|
||||||
0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x35, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69,
|
|
||||||
0x61, 0x6d, 0x2e, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x2e, 0x76,
|
|
||||||
0x31, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73,
|
|
||||||
0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x36, 0x2e, 0x67,
|
|
||||||
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e,
|
|
||||||
0x74, 0x69, 0x61, 0x6c, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74,
|
|
||||||
0x65, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x73, 0x70,
|
|
||||||
0x6f, 0x6e, 0x73, 0x65, 0x22, 0x66, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x40, 0x22, 0x3b, 0x2f, 0x76,
|
|
||||||
0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73,
|
|
||||||
0x2f, 0x2a, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e,
|
|
||||||
0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x41, 0x63,
|
|
||||||
0x63, 0x65, 0x73, 0x73, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x3a, 0x01, 0x2a, 0xda, 0x41, 0x1d, 0x6e,
|
|
||||||
0x61, 0x6d, 0x65, 0x2c, 0x64, 0x65, 0x6c, 0x65, 0x67, 0x61, 0x74, 0x65, 0x73, 0x2c, 0x73, 0x63,
|
|
||||||
0x6f, 0x70, 0x65, 0x2c, 0x6c, 0x69, 0x66, 0x65, 0x74, 0x69, 0x6d, 0x65, 0x12, 0xe4, 0x01, 0x0a,
|
|
||||||
0x0f, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x49, 0x64, 0x54, 0x6f, 0x6b, 0x65, 0x6e,
|
|
||||||
0x12, 0x31, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x63, 0x72,
|
|
||||||
0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x6e,
|
|
||||||
0x65, 0x72, 0x61, 0x74, 0x65, 0x49, 0x64, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x71, 0x75,
|
|
||||||
0x65, 0x73, 0x74, 0x1a, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d,
|
|
||||||
0x2e, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x2e, 0x76, 0x31, 0x2e,
|
|
||||||
0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x49, 0x64, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52,
|
|
||||||
0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x6a, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x3c, 0x22,
|
|
||||||
0x37, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65,
|
|
||||||
0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63,
|
|
||||||
0x6f, 0x75, 0x6e, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74,
|
|
||||||
0x65, 0x49, 0x64, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x3a, 0x01, 0x2a, 0xda, 0x41, 0x25, 0x6e, 0x61,
|
|
||||||
0x6d, 0x65, 0x2c, 0x64, 0x65, 0x6c, 0x65, 0x67, 0x61, 0x74, 0x65, 0x73, 0x2c, 0x61, 0x75, 0x64,
|
|
||||||
0x69, 0x65, 0x6e, 0x63, 0x65, 0x2c, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x65, 0x6d,
|
|
||||||
0x61, 0x69, 0x6c, 0x12, 0xb9, 0x01, 0x0a, 0x08, 0x53, 0x69, 0x67, 0x6e, 0x42, 0x6c, 0x6f, 0x62,
|
|
||||||
0x12, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x63, 0x72,
|
|
||||||
0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x69, 0x67,
|
|
||||||
0x6e, 0x42, 0x6c, 0x6f, 0x62, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x67,
|
|
||||||
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e,
|
|
||||||
0x74, 0x69, 0x61, 0x6c, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x42, 0x6c, 0x6f,
|
|
||||||
0x62, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x54, 0x82, 0xd3, 0xe4, 0x93, 0x02,
|
|
||||||
0x35, 0x22, 0x30, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f,
|
|
||||||
0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41,
|
|
||||||
0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x73, 0x69, 0x67, 0x6e, 0x42,
|
|
||||||
0x6c, 0x6f, 0x62, 0x3a, 0x01, 0x2a, 0xda, 0x41, 0x16, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x64, 0x65,
|
|
||||||
0x6c, 0x65, 0x67, 0x61, 0x74, 0x65, 0x73, 0x2c, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12,
|
|
||||||
0xb5, 0x01, 0x0a, 0x07, 0x53, 0x69, 0x67, 0x6e, 0x4a, 0x77, 0x74, 0x12, 0x29, 0x2e, 0x67, 0x6f,
|
|
||||||
0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74,
|
|
||||||
0x69, 0x61, 0x6c, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x4a, 0x77, 0x74, 0x52,
|
|
||||||
0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
|
|
||||||
0x69, 0x61, 0x6d, 0x2e, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x2e,
|
|
||||||
0x76, 0x31, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x4a, 0x77, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
|
|
||||||
0x73, 0x65, 0x22, 0x53, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x34, 0x22, 0x2f, 0x2f, 0x76, 0x31, 0x2f,
|
|
||||||
0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a,
|
|
||||||
0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x73,
|
|
||||||
0x2f, 0x2a, 0x7d, 0x3a, 0x73, 0x69, 0x67, 0x6e, 0x4a, 0x77, 0x74, 0x3a, 0x01, 0x2a, 0xda, 0x41,
|
|
||||||
0x16, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x64, 0x65, 0x6c, 0x65, 0x67, 0x61, 0x74, 0x65, 0x73, 0x2c,
|
|
||||||
0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x1a, 0x51, 0xca, 0x41, 0x1d, 0x69, 0x61, 0x6d, 0x63,
|
|
||||||
0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
|
|
||||||
0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0xd2, 0x41, 0x2e, 0x68, 0x74, 0x74, 0x70,
|
|
||||||
0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70,
|
|
||||||
0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75,
|
|
||||||
0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x42, 0xc9, 0x01, 0x0a, 0x23, 0x63,
|
|
||||||
0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e,
|
|
||||||
0x69, 0x61, 0x6d, 0x2e, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x2e,
|
|
||||||
0x76, 0x31, 0x42, 0x13, 0x49, 0x41, 0x4d, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61,
|
|
||||||
0x6c, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x44, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
|
|
||||||
0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e,
|
|
||||||
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73,
|
|
||||||
0x2f, 0x69, 0x61, 0x6d, 0x2f, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73,
|
|
||||||
0x2f, 0x76, 0x31, 0x3b, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0xf8,
|
|
||||||
0x01, 0x01, 0xaa, 0x02, 0x1f, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75,
|
|
||||||
0x64, 0x2e, 0x49, 0x61, 0x6d, 0x2e, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c,
|
|
||||||
0x73, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x1f, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c,
|
|
||||||
0x6f, 0x75, 0x64, 0x5c, 0x49, 0x61, 0x6d, 0x5c, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69,
|
|
||||||
0x61, 0x6c, 0x73, 0x5c, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
|
||||||
}
|
|
||||||
|
|
||||||
var file_google_iam_credentials_v1_iamcredentials_proto_goTypes = []interface{}{
|
|
||||||
(*GenerateAccessTokenRequest)(nil), // 0: google.iam.credentials.v1.GenerateAccessTokenRequest
|
|
||||||
(*GenerateIdTokenRequest)(nil), // 1: google.iam.credentials.v1.GenerateIdTokenRequest
|
|
||||||
(*SignBlobRequest)(nil), // 2: google.iam.credentials.v1.SignBlobRequest
|
|
||||||
(*SignJwtRequest)(nil), // 3: google.iam.credentials.v1.SignJwtRequest
|
|
||||||
(*GenerateAccessTokenResponse)(nil), // 4: google.iam.credentials.v1.GenerateAccessTokenResponse
|
|
||||||
(*GenerateIdTokenResponse)(nil), // 5: google.iam.credentials.v1.GenerateIdTokenResponse
|
|
||||||
(*SignBlobResponse)(nil), // 6: google.iam.credentials.v1.SignBlobResponse
|
|
||||||
(*SignJwtResponse)(nil), // 7: google.iam.credentials.v1.SignJwtResponse
|
|
||||||
}
|
|
||||||
var file_google_iam_credentials_v1_iamcredentials_proto_depIdxs = []int32{
|
|
||||||
0, // 0: google.iam.credentials.v1.IAMCredentials.GenerateAccessToken:input_type -> google.iam.credentials.v1.GenerateAccessTokenRequest
|
|
||||||
1, // 1: google.iam.credentials.v1.IAMCredentials.GenerateIdToken:input_type -> google.iam.credentials.v1.GenerateIdTokenRequest
|
|
||||||
2, // 2: google.iam.credentials.v1.IAMCredentials.SignBlob:input_type -> google.iam.credentials.v1.SignBlobRequest
|
|
||||||
3, // 3: google.iam.credentials.v1.IAMCredentials.SignJwt:input_type -> google.iam.credentials.v1.SignJwtRequest
|
|
||||||
4, // 4: google.iam.credentials.v1.IAMCredentials.GenerateAccessToken:output_type -> google.iam.credentials.v1.GenerateAccessTokenResponse
|
|
||||||
5, // 5: google.iam.credentials.v1.IAMCredentials.GenerateIdToken:output_type -> google.iam.credentials.v1.GenerateIdTokenResponse
|
|
||||||
6, // 6: google.iam.credentials.v1.IAMCredentials.SignBlob:output_type -> google.iam.credentials.v1.SignBlobResponse
|
|
||||||
7, // 7: google.iam.credentials.v1.IAMCredentials.SignJwt:output_type -> google.iam.credentials.v1.SignJwtResponse
|
|
||||||
4, // [4:8] is the sub-list for method output_type
|
|
||||||
0, // [0:4] is the sub-list for method input_type
|
|
||||||
0, // [0:0] is the sub-list for extension type_name
|
|
||||||
0, // [0:0] is the sub-list for extension extendee
|
|
||||||
0, // [0:0] is the sub-list for field type_name
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() { file_google_iam_credentials_v1_iamcredentials_proto_init() }
|
|
||||||
func file_google_iam_credentials_v1_iamcredentials_proto_init() {
|
|
||||||
if File_google_iam_credentials_v1_iamcredentials_proto != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
file_google_iam_credentials_v1_common_proto_init()
|
|
||||||
type x struct{}
|
|
||||||
out := protoimpl.TypeBuilder{
|
|
||||||
File: protoimpl.DescBuilder{
|
|
||||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
|
||||||
RawDescriptor: file_google_iam_credentials_v1_iamcredentials_proto_rawDesc,
|
|
||||||
NumEnums: 0,
|
|
||||||
NumMessages: 0,
|
|
||||||
NumExtensions: 0,
|
|
||||||
NumServices: 1,
|
|
||||||
},
|
|
||||||
GoTypes: file_google_iam_credentials_v1_iamcredentials_proto_goTypes,
|
|
||||||
DependencyIndexes: file_google_iam_credentials_v1_iamcredentials_proto_depIdxs,
|
|
||||||
}.Build()
|
|
||||||
File_google_iam_credentials_v1_iamcredentials_proto = out.File
|
|
||||||
file_google_iam_credentials_v1_iamcredentials_proto_rawDesc = nil
|
|
||||||
file_google_iam_credentials_v1_iamcredentials_proto_goTypes = nil
|
|
||||||
file_google_iam_credentials_v1_iamcredentials_proto_depIdxs = nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reference imports to suppress errors if they are not otherwise used.
|
|
||||||
var _ context.Context
|
|
||||||
var _ grpc.ClientConnInterface
|
|
||||||
|
|
||||||
// This is a compile-time assertion to ensure that this generated file
|
|
||||||
// is compatible with the grpc package it is being compiled against.
|
|
||||||
const _ = grpc.SupportPackageIsVersion6
|
|
||||||
|
|
||||||
// IAMCredentialsClient is the client API for IAMCredentials service.
|
|
||||||
//
|
|
||||||
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
|
|
||||||
type IAMCredentialsClient interface {
|
|
||||||
// Generates an OAuth 2.0 access token for a service account.
|
|
||||||
GenerateAccessToken(ctx context.Context, in *GenerateAccessTokenRequest, opts ...grpc.CallOption) (*GenerateAccessTokenResponse, error)
|
|
||||||
// Generates an OpenID Connect ID token for a service account.
|
|
||||||
GenerateIdToken(ctx context.Context, in *GenerateIdTokenRequest, opts ...grpc.CallOption) (*GenerateIdTokenResponse, error)
|
|
||||||
// Signs a blob using a service account's system-managed private key.
|
|
||||||
SignBlob(ctx context.Context, in *SignBlobRequest, opts ...grpc.CallOption) (*SignBlobResponse, error)
|
|
||||||
// Signs a JWT using a service account's system-managed private key.
|
|
||||||
SignJwt(ctx context.Context, in *SignJwtRequest, opts ...grpc.CallOption) (*SignJwtResponse, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
type iAMCredentialsClient struct {
|
|
||||||
cc grpc.ClientConnInterface
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewIAMCredentialsClient(cc grpc.ClientConnInterface) IAMCredentialsClient {
|
|
||||||
return &iAMCredentialsClient{cc}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *iAMCredentialsClient) GenerateAccessToken(ctx context.Context, in *GenerateAccessTokenRequest, opts ...grpc.CallOption) (*GenerateAccessTokenResponse, error) {
|
|
||||||
out := new(GenerateAccessTokenResponse)
|
|
||||||
err := c.cc.Invoke(ctx, "/google.iam.credentials.v1.IAMCredentials/GenerateAccessToken", in, out, opts...)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return out, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *iAMCredentialsClient) GenerateIdToken(ctx context.Context, in *GenerateIdTokenRequest, opts ...grpc.CallOption) (*GenerateIdTokenResponse, error) {
|
|
||||||
out := new(GenerateIdTokenResponse)
|
|
||||||
err := c.cc.Invoke(ctx, "/google.iam.credentials.v1.IAMCredentials/GenerateIdToken", in, out, opts...)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return out, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *iAMCredentialsClient) SignBlob(ctx context.Context, in *SignBlobRequest, opts ...grpc.CallOption) (*SignBlobResponse, error) {
|
|
||||||
out := new(SignBlobResponse)
|
|
||||||
err := c.cc.Invoke(ctx, "/google.iam.credentials.v1.IAMCredentials/SignBlob", in, out, opts...)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return out, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *iAMCredentialsClient) SignJwt(ctx context.Context, in *SignJwtRequest, opts ...grpc.CallOption) (*SignJwtResponse, error) {
|
|
||||||
out := new(SignJwtResponse)
|
|
||||||
err := c.cc.Invoke(ctx, "/google.iam.credentials.v1.IAMCredentials/SignJwt", in, out, opts...)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return out, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// IAMCredentialsServer is the server API for IAMCredentials service.
|
|
||||||
type IAMCredentialsServer interface {
|
|
||||||
// Generates an OAuth 2.0 access token for a service account.
|
|
||||||
GenerateAccessToken(context.Context, *GenerateAccessTokenRequest) (*GenerateAccessTokenResponse, error)
|
|
||||||
// Generates an OpenID Connect ID token for a service account.
|
|
||||||
GenerateIdToken(context.Context, *GenerateIdTokenRequest) (*GenerateIdTokenResponse, error)
|
|
||||||
// Signs a blob using a service account's system-managed private key.
|
|
||||||
SignBlob(context.Context, *SignBlobRequest) (*SignBlobResponse, error)
|
|
||||||
// Signs a JWT using a service account's system-managed private key.
|
|
||||||
SignJwt(context.Context, *SignJwtRequest) (*SignJwtResponse, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnimplementedIAMCredentialsServer can be embedded to have forward compatible implementations.
|
|
||||||
type UnimplementedIAMCredentialsServer struct {
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*UnimplementedIAMCredentialsServer) GenerateAccessToken(context.Context, *GenerateAccessTokenRequest) (*GenerateAccessTokenResponse, error) {
|
|
||||||
return nil, status.Errorf(codes.Unimplemented, "method GenerateAccessToken not implemented")
|
|
||||||
}
|
|
||||||
func (*UnimplementedIAMCredentialsServer) GenerateIdToken(context.Context, *GenerateIdTokenRequest) (*GenerateIdTokenResponse, error) {
|
|
||||||
return nil, status.Errorf(codes.Unimplemented, "method GenerateIdToken not implemented")
|
|
||||||
}
|
|
||||||
func (*UnimplementedIAMCredentialsServer) SignBlob(context.Context, *SignBlobRequest) (*SignBlobResponse, error) {
|
|
||||||
return nil, status.Errorf(codes.Unimplemented, "method SignBlob not implemented")
|
|
||||||
}
|
|
||||||
func (*UnimplementedIAMCredentialsServer) SignJwt(context.Context, *SignJwtRequest) (*SignJwtResponse, error) {
|
|
||||||
return nil, status.Errorf(codes.Unimplemented, "method SignJwt not implemented")
|
|
||||||
}
|
|
||||||
|
|
||||||
func RegisterIAMCredentialsServer(s *grpc.Server, srv IAMCredentialsServer) {
|
|
||||||
s.RegisterService(&_IAMCredentials_serviceDesc, srv)
|
|
||||||
}
|
|
||||||
|
|
||||||
func _IAMCredentials_GenerateAccessToken_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
|
||||||
in := new(GenerateAccessTokenRequest)
|
|
||||||
if err := dec(in); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if interceptor == nil {
|
|
||||||
return srv.(IAMCredentialsServer).GenerateAccessToken(ctx, in)
|
|
||||||
}
|
|
||||||
info := &grpc.UnaryServerInfo{
|
|
||||||
Server: srv,
|
|
||||||
FullMethod: "/google.iam.credentials.v1.IAMCredentials/GenerateAccessToken",
|
|
||||||
}
|
|
||||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
|
||||||
return srv.(IAMCredentialsServer).GenerateAccessToken(ctx, req.(*GenerateAccessTokenRequest))
|
|
||||||
}
|
|
||||||
return interceptor(ctx, in, info, handler)
|
|
||||||
}
|
|
||||||
|
|
||||||
func _IAMCredentials_GenerateIdToken_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
|
||||||
in := new(GenerateIdTokenRequest)
|
|
||||||
if err := dec(in); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if interceptor == nil {
|
|
||||||
return srv.(IAMCredentialsServer).GenerateIdToken(ctx, in)
|
|
||||||
}
|
|
||||||
info := &grpc.UnaryServerInfo{
|
|
||||||
Server: srv,
|
|
||||||
FullMethod: "/google.iam.credentials.v1.IAMCredentials/GenerateIdToken",
|
|
||||||
}
|
|
||||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
|
||||||
return srv.(IAMCredentialsServer).GenerateIdToken(ctx, req.(*GenerateIdTokenRequest))
|
|
||||||
}
|
|
||||||
return interceptor(ctx, in, info, handler)
|
|
||||||
}
|
|
||||||
|
|
||||||
func _IAMCredentials_SignBlob_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
|
||||||
in := new(SignBlobRequest)
|
|
||||||
if err := dec(in); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if interceptor == nil {
|
|
||||||
return srv.(IAMCredentialsServer).SignBlob(ctx, in)
|
|
||||||
}
|
|
||||||
info := &grpc.UnaryServerInfo{
|
|
||||||
Server: srv,
|
|
||||||
FullMethod: "/google.iam.credentials.v1.IAMCredentials/SignBlob",
|
|
||||||
}
|
|
||||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
|
||||||
return srv.(IAMCredentialsServer).SignBlob(ctx, req.(*SignBlobRequest))
|
|
||||||
}
|
|
||||||
return interceptor(ctx, in, info, handler)
|
|
||||||
}
|
|
||||||
|
|
||||||
func _IAMCredentials_SignJwt_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
|
||||||
in := new(SignJwtRequest)
|
|
||||||
if err := dec(in); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if interceptor == nil {
|
|
||||||
return srv.(IAMCredentialsServer).SignJwt(ctx, in)
|
|
||||||
}
|
|
||||||
info := &grpc.UnaryServerInfo{
|
|
||||||
Server: srv,
|
|
||||||
FullMethod: "/google.iam.credentials.v1.IAMCredentials/SignJwt",
|
|
||||||
}
|
|
||||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
|
||||||
return srv.(IAMCredentialsServer).SignJwt(ctx, req.(*SignJwtRequest))
|
|
||||||
}
|
|
||||||
return interceptor(ctx, in, info, handler)
|
|
||||||
}
|
|
||||||
|
|
||||||
var _IAMCredentials_serviceDesc = grpc.ServiceDesc{
|
|
||||||
ServiceName: "google.iam.credentials.v1.IAMCredentials",
|
|
||||||
HandlerType: (*IAMCredentialsServer)(nil),
|
|
||||||
Methods: []grpc.MethodDesc{
|
|
||||||
{
|
|
||||||
MethodName: "GenerateAccessToken",
|
|
||||||
Handler: _IAMCredentials_GenerateAccessToken_Handler,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
MethodName: "GenerateIdToken",
|
|
||||||
Handler: _IAMCredentials_GenerateIdToken_Handler,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
MethodName: "SignBlob",
|
|
||||||
Handler: _IAMCredentials_SignBlob_Handler,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
MethodName: "SignJwt",
|
|
||||||
Handler: _IAMCredentials_SignJwt_Handler,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Streams: []grpc.StreamDesc{},
|
|
||||||
Metadata: "google/iam/credentials/v1/iamcredentials.proto",
|
|
||||||
}
|
|
||||||
171
vendor/cloud.google.com/go/iam/credentials/apiv1/doc.go
generated
vendored
171
vendor/cloud.google.com/go/iam/credentials/apiv1/doc.go
generated
vendored
|
|
@ -1,171 +0,0 @@
|
||||||
// Copyright 2022 Google LLC
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// https://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
// Code generated by protoc-gen-go_gapic. DO NOT EDIT.
|
|
||||||
|
|
||||||
// Package credentials is an auto-generated package for the
|
|
||||||
// IAM Service Account Credentials API.
|
|
||||||
//
|
|
||||||
// Creates short-lived, limited-privilege credentials for IAM service
|
|
||||||
// accounts.
|
|
||||||
//
|
|
||||||
// # Example usage
|
|
||||||
//
|
|
||||||
// To get started with this package, create a client.
|
|
||||||
//
|
|
||||||
// ctx := context.Background()
|
|
||||||
// // This snippet has been automatically generated and should be regarded as a code template only.
|
|
||||||
// // It will require modifications to work:
|
|
||||||
// // - It may require correct/in-range values for request initialization.
|
|
||||||
// // - It may require specifying regional endpoints when creating the service client as shown in:
|
|
||||||
// // https://pkg.go.dev/cloud.google.com/go#hdr-Client_Options
|
|
||||||
// c, err := credentials.NewIamCredentialsClient(ctx)
|
|
||||||
// if err != nil {
|
|
||||||
// // TODO: Handle error.
|
|
||||||
// }
|
|
||||||
// defer c.Close()
|
|
||||||
//
|
|
||||||
// The client will use your default application credentials. Clients should be reused instead of created as needed.
|
|
||||||
// The methods of Client are safe for concurrent use by multiple goroutines.
|
|
||||||
// The returned client must be Closed when it is done being used.
|
|
||||||
//
|
|
||||||
// # Using the Client
|
|
||||||
//
|
|
||||||
// The following is an example of making an API call with the newly created client.
|
|
||||||
//
|
|
||||||
// ctx := context.Background()
|
|
||||||
// // This snippet has been automatically generated and should be regarded as a code template only.
|
|
||||||
// // It will require modifications to work:
|
|
||||||
// // - It may require correct/in-range values for request initialization.
|
|
||||||
// // - It may require specifying regional endpoints when creating the service client as shown in:
|
|
||||||
// // https://pkg.go.dev/cloud.google.com/go#hdr-Client_Options
|
|
||||||
// c, err := credentials.NewIamCredentialsClient(ctx)
|
|
||||||
// if err != nil {
|
|
||||||
// // TODO: Handle error.
|
|
||||||
// }
|
|
||||||
// defer c.Close()
|
|
||||||
//
|
|
||||||
// req := &credentialspb.GenerateAccessTokenRequest{
|
|
||||||
// // TODO: Fill request struct fields.
|
|
||||||
// // See https://pkg.go.dev/cloud.google.com/go/iam/credentials/apiv1/credentialspb#GenerateAccessTokenRequest.
|
|
||||||
// }
|
|
||||||
// resp, err := c.GenerateAccessToken(ctx, req)
|
|
||||||
// if err != nil {
|
|
||||||
// // TODO: Handle error.
|
|
||||||
// }
|
|
||||||
// // TODO: Use resp.
|
|
||||||
// _ = resp
|
|
||||||
//
|
|
||||||
// # Use of Context
|
|
||||||
//
|
|
||||||
// The ctx passed to NewIamCredentialsClient is used for authentication requests and
|
|
||||||
// for creating the underlying connection, but is not used for subsequent calls.
|
|
||||||
// Individual methods on the client use the ctx given to them.
|
|
||||||
//
|
|
||||||
// To close the open connection, use the Close() method.
|
|
||||||
//
|
|
||||||
// For information about setting deadlines, reusing contexts, and more
|
|
||||||
// please visit https://pkg.go.dev/cloud.google.com/go.
|
|
||||||
package credentials // import "cloud.google.com/go/iam/credentials/apiv1"
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"os"
|
|
||||||
"runtime"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"unicode"
|
|
||||||
|
|
||||||
"google.golang.org/api/option"
|
|
||||||
"google.golang.org/grpc/metadata"
|
|
||||||
)
|
|
||||||
|
|
||||||
// For more information on implementing a client constructor hook, see
|
|
||||||
// https://github.com/googleapis/google-cloud-go/wiki/Customizing-constructors.
|
|
||||||
type clientHookParams struct{}
|
|
||||||
type clientHook func(context.Context, clientHookParams) ([]option.ClientOption, error)
|
|
||||||
|
|
||||||
var versionClient string
|
|
||||||
|
|
||||||
func getVersionClient() string {
|
|
||||||
if versionClient == "" {
|
|
||||||
return "UNKNOWN"
|
|
||||||
}
|
|
||||||
return versionClient
|
|
||||||
}
|
|
||||||
|
|
||||||
func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context {
|
|
||||||
out, _ := metadata.FromOutgoingContext(ctx)
|
|
||||||
out = out.Copy()
|
|
||||||
for _, md := range mds {
|
|
||||||
for k, v := range md {
|
|
||||||
out[k] = append(out[k], v...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return metadata.NewOutgoingContext(ctx, out)
|
|
||||||
}
|
|
||||||
|
|
||||||
func checkDisableDeadlines() (bool, error) {
|
|
||||||
raw, ok := os.LookupEnv("GOOGLE_API_GO_EXPERIMENTAL_DISABLE_DEFAULT_DEADLINE")
|
|
||||||
if !ok {
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
b, err := strconv.ParseBool(raw)
|
|
||||||
return b, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// DefaultAuthScopes reports the default set of authentication scopes to use with this package.
|
|
||||||
func DefaultAuthScopes() []string {
|
|
||||||
return []string{
|
|
||||||
"https://www.googleapis.com/auth/cloud-platform",
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// versionGo returns the Go runtime version. The returned string
|
|
||||||
// has no whitespace, suitable for reporting in header.
|
|
||||||
func versionGo() string {
|
|
||||||
const develPrefix = "devel +"
|
|
||||||
|
|
||||||
s := runtime.Version()
|
|
||||||
if strings.HasPrefix(s, develPrefix) {
|
|
||||||
s = s[len(develPrefix):]
|
|
||||||
if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 {
|
|
||||||
s = s[:p]
|
|
||||||
}
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
notSemverRune := func(r rune) bool {
|
|
||||||
return !strings.ContainsRune("0123456789.", r)
|
|
||||||
}
|
|
||||||
|
|
||||||
if strings.HasPrefix(s, "go1") {
|
|
||||||
s = s[2:]
|
|
||||||
var prerelease string
|
|
||||||
if p := strings.IndexFunc(s, notSemverRune); p >= 0 {
|
|
||||||
s, prerelease = s[:p], s[p:]
|
|
||||||
}
|
|
||||||
if strings.HasSuffix(s, ".") {
|
|
||||||
s += "0"
|
|
||||||
} else if strings.Count(s, ".") < 2 {
|
|
||||||
s += ".0"
|
|
||||||
}
|
|
||||||
if prerelease != "" {
|
|
||||||
s += "-" + prerelease
|
|
||||||
}
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
return "UNKNOWN"
|
|
||||||
}
|
|
||||||
38
vendor/cloud.google.com/go/iam/credentials/apiv1/gapic_metadata.json
generated
vendored
38
vendor/cloud.google.com/go/iam/credentials/apiv1/gapic_metadata.json
generated
vendored
|
|
@ -1,38 +0,0 @@
|
||||||
{
|
|
||||||
"schema": "1.0",
|
|
||||||
"comment": "This file maps proto services/RPCs to the corresponding library clients/methods.",
|
|
||||||
"language": "go",
|
|
||||||
"protoPackage": "google.iam.credentials.v1",
|
|
||||||
"libraryPackage": "cloud.google.com/go/iam/credentials/apiv1",
|
|
||||||
"services": {
|
|
||||||
"IAMCredentials": {
|
|
||||||
"clients": {
|
|
||||||
"grpc": {
|
|
||||||
"libraryClient": "IamCredentialsClient",
|
|
||||||
"rpcs": {
|
|
||||||
"GenerateAccessToken": {
|
|
||||||
"methods": [
|
|
||||||
"GenerateAccessToken"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"GenerateIdToken": {
|
|
||||||
"methods": [
|
|
||||||
"GenerateIdToken"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"SignBlob": {
|
|
||||||
"methods": [
|
|
||||||
"SignBlob"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"SignJwt": {
|
|
||||||
"methods": [
|
|
||||||
"SignJwt"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
360
vendor/cloud.google.com/go/iam/credentials/apiv1/iam_credentials_client.go
generated
vendored
360
vendor/cloud.google.com/go/iam/credentials/apiv1/iam_credentials_client.go
generated
vendored
|
|
@ -1,360 +0,0 @@
|
||||||
// Copyright 2022 Google LLC
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// https://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
// Code generated by protoc-gen-go_gapic. DO NOT EDIT.
|
|
||||||
|
|
||||||
package credentials
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"math"
|
|
||||||
"net/url"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
credentialspb "cloud.google.com/go/iam/credentials/apiv1/credentialspb"
|
|
||||||
gax "github.com/googleapis/gax-go/v2"
|
|
||||||
"google.golang.org/api/option"
|
|
||||||
"google.golang.org/api/option/internaloption"
|
|
||||||
gtransport "google.golang.org/api/transport/grpc"
|
|
||||||
"google.golang.org/grpc"
|
|
||||||
"google.golang.org/grpc/codes"
|
|
||||||
"google.golang.org/grpc/metadata"
|
|
||||||
)
|
|
||||||
|
|
||||||
var newIamCredentialsClientHook clientHook
|
|
||||||
|
|
||||||
// IamCredentialsCallOptions contains the retry settings for each method of IamCredentialsClient.
|
|
||||||
type IamCredentialsCallOptions struct {
|
|
||||||
GenerateAccessToken []gax.CallOption
|
|
||||||
GenerateIdToken []gax.CallOption
|
|
||||||
SignBlob []gax.CallOption
|
|
||||||
SignJwt []gax.CallOption
|
|
||||||
}
|
|
||||||
|
|
||||||
func defaultIamCredentialsGRPCClientOptions() []option.ClientOption {
|
|
||||||
return []option.ClientOption{
|
|
||||||
internaloption.WithDefaultEndpoint("iamcredentials.googleapis.com:443"),
|
|
||||||
internaloption.WithDefaultMTLSEndpoint("iamcredentials.mtls.googleapis.com:443"),
|
|
||||||
internaloption.WithDefaultAudience("https://iamcredentials.googleapis.com/"),
|
|
||||||
internaloption.WithDefaultScopes(DefaultAuthScopes()...),
|
|
||||||
internaloption.EnableJwtWithScope(),
|
|
||||||
option.WithGRPCDialOption(grpc.WithDefaultCallOptions(
|
|
||||||
grpc.MaxCallRecvMsgSize(math.MaxInt32))),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func defaultIamCredentialsCallOptions() *IamCredentialsCallOptions {
|
|
||||||
return &IamCredentialsCallOptions{
|
|
||||||
GenerateAccessToken: []gax.CallOption{
|
|
||||||
gax.WithRetry(func() gax.Retryer {
|
|
||||||
return gax.OnCodes([]codes.Code{
|
|
||||||
codes.Unavailable,
|
|
||||||
codes.DeadlineExceeded,
|
|
||||||
}, gax.Backoff{
|
|
||||||
Initial: 100 * time.Millisecond,
|
|
||||||
Max: 60000 * time.Millisecond,
|
|
||||||
Multiplier: 1.30,
|
|
||||||
})
|
|
||||||
}),
|
|
||||||
},
|
|
||||||
GenerateIdToken: []gax.CallOption{
|
|
||||||
gax.WithRetry(func() gax.Retryer {
|
|
||||||
return gax.OnCodes([]codes.Code{
|
|
||||||
codes.Unavailable,
|
|
||||||
codes.DeadlineExceeded,
|
|
||||||
}, gax.Backoff{
|
|
||||||
Initial: 100 * time.Millisecond,
|
|
||||||
Max: 60000 * time.Millisecond,
|
|
||||||
Multiplier: 1.30,
|
|
||||||
})
|
|
||||||
}),
|
|
||||||
},
|
|
||||||
SignBlob: []gax.CallOption{
|
|
||||||
gax.WithRetry(func() gax.Retryer {
|
|
||||||
return gax.OnCodes([]codes.Code{
|
|
||||||
codes.Unavailable,
|
|
||||||
codes.DeadlineExceeded,
|
|
||||||
}, gax.Backoff{
|
|
||||||
Initial: 100 * time.Millisecond,
|
|
||||||
Max: 60000 * time.Millisecond,
|
|
||||||
Multiplier: 1.30,
|
|
||||||
})
|
|
||||||
}),
|
|
||||||
},
|
|
||||||
SignJwt: []gax.CallOption{
|
|
||||||
gax.WithRetry(func() gax.Retryer {
|
|
||||||
return gax.OnCodes([]codes.Code{
|
|
||||||
codes.Unavailable,
|
|
||||||
codes.DeadlineExceeded,
|
|
||||||
}, gax.Backoff{
|
|
||||||
Initial: 100 * time.Millisecond,
|
|
||||||
Max: 60000 * time.Millisecond,
|
|
||||||
Multiplier: 1.30,
|
|
||||||
})
|
|
||||||
}),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// internalIamCredentialsClient is an interface that defines the methods available from IAM Service Account Credentials API.
|
|
||||||
type internalIamCredentialsClient interface {
|
|
||||||
Close() error
|
|
||||||
setGoogleClientInfo(...string)
|
|
||||||
Connection() *grpc.ClientConn
|
|
||||||
GenerateAccessToken(context.Context, *credentialspb.GenerateAccessTokenRequest, ...gax.CallOption) (*credentialspb.GenerateAccessTokenResponse, error)
|
|
||||||
GenerateIdToken(context.Context, *credentialspb.GenerateIdTokenRequest, ...gax.CallOption) (*credentialspb.GenerateIdTokenResponse, error)
|
|
||||||
SignBlob(context.Context, *credentialspb.SignBlobRequest, ...gax.CallOption) (*credentialspb.SignBlobResponse, error)
|
|
||||||
SignJwt(context.Context, *credentialspb.SignJwtRequest, ...gax.CallOption) (*credentialspb.SignJwtResponse, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
// IamCredentialsClient is a client for interacting with IAM Service Account Credentials API.
|
|
||||||
// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
|
|
||||||
//
|
|
||||||
// A service account is a special type of Google account that belongs to your
|
|
||||||
// application or a virtual machine (VM), instead of to an individual end user.
|
|
||||||
// Your application assumes the identity of the service account to call Google
|
|
||||||
// APIs, so that the users aren’t directly involved.
|
|
||||||
//
|
|
||||||
// Service account credentials are used to temporarily assume the identity
|
|
||||||
// of the service account. Supported credential types include OAuth 2.0 access
|
|
||||||
// tokens, OpenID Connect ID tokens, self-signed JSON Web Tokens (JWTs), and
|
|
||||||
// more.
|
|
||||||
type IamCredentialsClient struct {
|
|
||||||
// The internal transport-dependent client.
|
|
||||||
internalClient internalIamCredentialsClient
|
|
||||||
|
|
||||||
// The call options for this service.
|
|
||||||
CallOptions *IamCredentialsCallOptions
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wrapper methods routed to the internal client.
|
|
||||||
|
|
||||||
// Close closes the connection to the API service. The user should invoke this when
|
|
||||||
// the client is no longer required.
|
|
||||||
func (c *IamCredentialsClient) Close() error {
|
|
||||||
return c.internalClient.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
// setGoogleClientInfo sets the name and version of the application in
|
|
||||||
// the `x-goog-api-client` header passed on each request. Intended for
|
|
||||||
// use by Google-written clients.
|
|
||||||
func (c *IamCredentialsClient) setGoogleClientInfo(keyval ...string) {
|
|
||||||
c.internalClient.setGoogleClientInfo(keyval...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Connection returns a connection to the API service.
|
|
||||||
//
|
|
||||||
// Deprecated: Connections are now pooled so this method does not always
|
|
||||||
// return the same resource.
|
|
||||||
func (c *IamCredentialsClient) Connection() *grpc.ClientConn {
|
|
||||||
return c.internalClient.Connection()
|
|
||||||
}
|
|
||||||
|
|
||||||
// GenerateAccessToken generates an OAuth 2.0 access token for a service account.
|
|
||||||
func (c *IamCredentialsClient) GenerateAccessToken(ctx context.Context, req *credentialspb.GenerateAccessTokenRequest, opts ...gax.CallOption) (*credentialspb.GenerateAccessTokenResponse, error) {
|
|
||||||
return c.internalClient.GenerateAccessToken(ctx, req, opts...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GenerateIdToken generates an OpenID Connect ID token for a service account.
|
|
||||||
func (c *IamCredentialsClient) GenerateIdToken(ctx context.Context, req *credentialspb.GenerateIdTokenRequest, opts ...gax.CallOption) (*credentialspb.GenerateIdTokenResponse, error) {
|
|
||||||
return c.internalClient.GenerateIdToken(ctx, req, opts...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// SignBlob signs a blob using a service account’s system-managed private key.
|
|
||||||
func (c *IamCredentialsClient) SignBlob(ctx context.Context, req *credentialspb.SignBlobRequest, opts ...gax.CallOption) (*credentialspb.SignBlobResponse, error) {
|
|
||||||
return c.internalClient.SignBlob(ctx, req, opts...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// SignJwt signs a JWT using a service account’s system-managed private key.
|
|
||||||
func (c *IamCredentialsClient) SignJwt(ctx context.Context, req *credentialspb.SignJwtRequest, opts ...gax.CallOption) (*credentialspb.SignJwtResponse, error) {
|
|
||||||
return c.internalClient.SignJwt(ctx, req, opts...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// iamCredentialsGRPCClient is a client for interacting with IAM Service Account Credentials API over gRPC transport.
|
|
||||||
//
|
|
||||||
// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
|
|
||||||
type iamCredentialsGRPCClient struct {
|
|
||||||
// Connection pool of gRPC connections to the service.
|
|
||||||
connPool gtransport.ConnPool
|
|
||||||
|
|
||||||
// flag to opt out of default deadlines via GOOGLE_API_GO_EXPERIMENTAL_DISABLE_DEFAULT_DEADLINE
|
|
||||||
disableDeadlines bool
|
|
||||||
|
|
||||||
// Points back to the CallOptions field of the containing IamCredentialsClient
|
|
||||||
CallOptions **IamCredentialsCallOptions
|
|
||||||
|
|
||||||
// The gRPC API client.
|
|
||||||
iamCredentialsClient credentialspb.IAMCredentialsClient
|
|
||||||
|
|
||||||
// The x-goog-* metadata to be sent with each request.
|
|
||||||
xGoogMetadata metadata.MD
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewIamCredentialsClient creates a new iam credentials client based on gRPC.
|
|
||||||
// The returned client must be Closed when it is done being used to clean up its underlying connections.
|
|
||||||
//
|
|
||||||
// A service account is a special type of Google account that belongs to your
|
|
||||||
// application or a virtual machine (VM), instead of to an individual end user.
|
|
||||||
// Your application assumes the identity of the service account to call Google
|
|
||||||
// APIs, so that the users aren’t directly involved.
|
|
||||||
//
|
|
||||||
// Service account credentials are used to temporarily assume the identity
|
|
||||||
// of the service account. Supported credential types include OAuth 2.0 access
|
|
||||||
// tokens, OpenID Connect ID tokens, self-signed JSON Web Tokens (JWTs), and
|
|
||||||
// more.
|
|
||||||
func NewIamCredentialsClient(ctx context.Context, opts ...option.ClientOption) (*IamCredentialsClient, error) {
|
|
||||||
clientOpts := defaultIamCredentialsGRPCClientOptions()
|
|
||||||
if newIamCredentialsClientHook != nil {
|
|
||||||
hookOpts, err := newIamCredentialsClientHook(ctx, clientHookParams{})
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
clientOpts = append(clientOpts, hookOpts...)
|
|
||||||
}
|
|
||||||
|
|
||||||
disableDeadlines, err := checkDisableDeadlines()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
connPool, err := gtransport.DialPool(ctx, append(clientOpts, opts...)...)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
client := IamCredentialsClient{CallOptions: defaultIamCredentialsCallOptions()}
|
|
||||||
|
|
||||||
c := &iamCredentialsGRPCClient{
|
|
||||||
connPool: connPool,
|
|
||||||
disableDeadlines: disableDeadlines,
|
|
||||||
iamCredentialsClient: credentialspb.NewIAMCredentialsClient(connPool),
|
|
||||||
CallOptions: &client.CallOptions,
|
|
||||||
}
|
|
||||||
c.setGoogleClientInfo()
|
|
||||||
|
|
||||||
client.internalClient = c
|
|
||||||
|
|
||||||
return &client, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Connection returns a connection to the API service.
|
|
||||||
//
|
|
||||||
// Deprecated: Connections are now pooled so this method does not always
|
|
||||||
// return the same resource.
|
|
||||||
func (c *iamCredentialsGRPCClient) Connection() *grpc.ClientConn {
|
|
||||||
return c.connPool.Conn()
|
|
||||||
}
|
|
||||||
|
|
||||||
// setGoogleClientInfo sets the name and version of the application in
|
|
||||||
// the `x-goog-api-client` header passed on each request. Intended for
|
|
||||||
// use by Google-written clients.
|
|
||||||
func (c *iamCredentialsGRPCClient) setGoogleClientInfo(keyval ...string) {
|
|
||||||
kv := append([]string{"gl-go", versionGo()}, keyval...)
|
|
||||||
kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "grpc", grpc.Version)
|
|
||||||
c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close closes the connection to the API service. The user should invoke this when
|
|
||||||
// the client is no longer required.
|
|
||||||
func (c *iamCredentialsGRPCClient) Close() error {
|
|
||||||
return c.connPool.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *iamCredentialsGRPCClient) GenerateAccessToken(ctx context.Context, req *credentialspb.GenerateAccessTokenRequest, opts ...gax.CallOption) (*credentialspb.GenerateAccessTokenResponse, error) {
|
|
||||||
if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines {
|
|
||||||
cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond)
|
|
||||||
defer cancel()
|
|
||||||
ctx = cctx
|
|
||||||
}
|
|
||||||
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName())))
|
|
||||||
|
|
||||||
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
|
||||||
opts = append((*c.CallOptions).GenerateAccessToken[0:len((*c.CallOptions).GenerateAccessToken):len((*c.CallOptions).GenerateAccessToken)], opts...)
|
|
||||||
var resp *credentialspb.GenerateAccessTokenResponse
|
|
||||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
|
||||||
var err error
|
|
||||||
resp, err = c.iamCredentialsClient.GenerateAccessToken(ctx, req, settings.GRPC...)
|
|
||||||
return err
|
|
||||||
}, opts...)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return resp, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *iamCredentialsGRPCClient) GenerateIdToken(ctx context.Context, req *credentialspb.GenerateIdTokenRequest, opts ...gax.CallOption) (*credentialspb.GenerateIdTokenResponse, error) {
|
|
||||||
if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines {
|
|
||||||
cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond)
|
|
||||||
defer cancel()
|
|
||||||
ctx = cctx
|
|
||||||
}
|
|
||||||
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName())))
|
|
||||||
|
|
||||||
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
|
||||||
opts = append((*c.CallOptions).GenerateIdToken[0:len((*c.CallOptions).GenerateIdToken):len((*c.CallOptions).GenerateIdToken)], opts...)
|
|
||||||
var resp *credentialspb.GenerateIdTokenResponse
|
|
||||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
|
||||||
var err error
|
|
||||||
resp, err = c.iamCredentialsClient.GenerateIdToken(ctx, req, settings.GRPC...)
|
|
||||||
return err
|
|
||||||
}, opts...)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return resp, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *iamCredentialsGRPCClient) SignBlob(ctx context.Context, req *credentialspb.SignBlobRequest, opts ...gax.CallOption) (*credentialspb.SignBlobResponse, error) {
|
|
||||||
if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines {
|
|
||||||
cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond)
|
|
||||||
defer cancel()
|
|
||||||
ctx = cctx
|
|
||||||
}
|
|
||||||
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName())))
|
|
||||||
|
|
||||||
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
|
||||||
opts = append((*c.CallOptions).SignBlob[0:len((*c.CallOptions).SignBlob):len((*c.CallOptions).SignBlob)], opts...)
|
|
||||||
var resp *credentialspb.SignBlobResponse
|
|
||||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
|
||||||
var err error
|
|
||||||
resp, err = c.iamCredentialsClient.SignBlob(ctx, req, settings.GRPC...)
|
|
||||||
return err
|
|
||||||
}, opts...)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return resp, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *iamCredentialsGRPCClient) SignJwt(ctx context.Context, req *credentialspb.SignJwtRequest, opts ...gax.CallOption) (*credentialspb.SignJwtResponse, error) {
|
|
||||||
if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines {
|
|
||||||
cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond)
|
|
||||||
defer cancel()
|
|
||||||
ctx = cctx
|
|
||||||
}
|
|
||||||
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName())))
|
|
||||||
|
|
||||||
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
|
|
||||||
opts = append((*c.CallOptions).SignJwt[0:len((*c.CallOptions).SignJwt):len((*c.CallOptions).SignJwt)], opts...)
|
|
||||||
var resp *credentialspb.SignJwtResponse
|
|
||||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
|
||||||
var err error
|
|
||||||
resp, err = c.iamCredentialsClient.SignJwt(ctx, req, settings.GRPC...)
|
|
||||||
return err
|
|
||||||
}, opts...)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return resp, nil
|
|
||||||
}
|
|
||||||
23
vendor/cloud.google.com/go/iam/credentials/apiv1/version.go
generated
vendored
23
vendor/cloud.google.com/go/iam/credentials/apiv1/version.go
generated
vendored
|
|
@ -1,23 +0,0 @@
|
||||||
// Copyright 2022 Google LLC
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
// Code generated by gapicgen. DO NOT EDIT.
|
|
||||||
|
|
||||||
package credentials
|
|
||||||
|
|
||||||
import "cloud.google.com/go/iam/internal"
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
versionClient = internal.Version
|
|
||||||
}
|
|
||||||
387
vendor/cloud.google.com/go/iam/iam.go
generated
vendored
387
vendor/cloud.google.com/go/iam/iam.go
generated
vendored
|
|
@ -1,387 +0,0 @@
|
||||||
// Copyright 2016 Google LLC
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
// Package iam supports the resource-specific operations of Google Cloud
|
|
||||||
// IAM (Identity and Access Management) for the Google Cloud Libraries.
|
|
||||||
// See https://cloud.google.com/iam for more about IAM.
|
|
||||||
//
|
|
||||||
// Users of the Google Cloud Libraries will typically not use this package
|
|
||||||
// directly. Instead they will begin with some resource that supports IAM, like
|
|
||||||
// a pubsub topic, and call its IAM method to get a Handle for that resource.
|
|
||||||
package iam
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
gax "github.com/googleapis/gax-go/v2"
|
|
||||||
pb "google.golang.org/genproto/googleapis/iam/v1"
|
|
||||||
"google.golang.org/grpc"
|
|
||||||
"google.golang.org/grpc/codes"
|
|
||||||
"google.golang.org/grpc/metadata"
|
|
||||||
)
|
|
||||||
|
|
||||||
// client abstracts the IAMPolicy API to allow multiple implementations.
|
|
||||||
type client interface {
|
|
||||||
Get(ctx context.Context, resource string) (*pb.Policy, error)
|
|
||||||
Set(ctx context.Context, resource string, p *pb.Policy) error
|
|
||||||
Test(ctx context.Context, resource string, perms []string) ([]string, error)
|
|
||||||
GetWithVersion(ctx context.Context, resource string, requestedPolicyVersion int32) (*pb.Policy, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
// grpcClient implements client for the standard gRPC-based IAMPolicy service.
|
|
||||||
type grpcClient struct {
|
|
||||||
c pb.IAMPolicyClient
|
|
||||||
}
|
|
||||||
|
|
||||||
var withRetry = gax.WithRetry(func() gax.Retryer {
|
|
||||||
return gax.OnCodes([]codes.Code{
|
|
||||||
codes.DeadlineExceeded,
|
|
||||||
codes.Unavailable,
|
|
||||||
}, gax.Backoff{
|
|
||||||
Initial: 100 * time.Millisecond,
|
|
||||||
Max: 60 * time.Second,
|
|
||||||
Multiplier: 1.3,
|
|
||||||
})
|
|
||||||
})
|
|
||||||
|
|
||||||
func (g *grpcClient) Get(ctx context.Context, resource string) (*pb.Policy, error) {
|
|
||||||
return g.GetWithVersion(ctx, resource, 1)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (g *grpcClient) GetWithVersion(ctx context.Context, resource string, requestedPolicyVersion int32) (*pb.Policy, error) {
|
|
||||||
var proto *pb.Policy
|
|
||||||
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "resource", resource))
|
|
||||||
ctx = insertMetadata(ctx, md)
|
|
||||||
|
|
||||||
err := gax.Invoke(ctx, func(ctx context.Context, _ gax.CallSettings) error {
|
|
||||||
var err error
|
|
||||||
proto, err = g.c.GetIamPolicy(ctx, &pb.GetIamPolicyRequest{
|
|
||||||
Resource: resource,
|
|
||||||
Options: &pb.GetPolicyOptions{
|
|
||||||
RequestedPolicyVersion: requestedPolicyVersion,
|
|
||||||
},
|
|
||||||
})
|
|
||||||
return err
|
|
||||||
}, withRetry)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return proto, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (g *grpcClient) Set(ctx context.Context, resource string, p *pb.Policy) error {
|
|
||||||
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "resource", resource))
|
|
||||||
ctx = insertMetadata(ctx, md)
|
|
||||||
|
|
||||||
return gax.Invoke(ctx, func(ctx context.Context, _ gax.CallSettings) error {
|
|
||||||
_, err := g.c.SetIamPolicy(ctx, &pb.SetIamPolicyRequest{
|
|
||||||
Resource: resource,
|
|
||||||
Policy: p,
|
|
||||||
})
|
|
||||||
return err
|
|
||||||
}, withRetry)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (g *grpcClient) Test(ctx context.Context, resource string, perms []string) ([]string, error) {
|
|
||||||
var res *pb.TestIamPermissionsResponse
|
|
||||||
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "resource", resource))
|
|
||||||
ctx = insertMetadata(ctx, md)
|
|
||||||
|
|
||||||
err := gax.Invoke(ctx, func(ctx context.Context, _ gax.CallSettings) error {
|
|
||||||
var err error
|
|
||||||
res, err = g.c.TestIamPermissions(ctx, &pb.TestIamPermissionsRequest{
|
|
||||||
Resource: resource,
|
|
||||||
Permissions: perms,
|
|
||||||
})
|
|
||||||
return err
|
|
||||||
}, withRetry)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return res.Permissions, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// A Handle provides IAM operations for a resource.
|
|
||||||
type Handle struct {
|
|
||||||
c client
|
|
||||||
resource string
|
|
||||||
}
|
|
||||||
|
|
||||||
// A Handle3 provides IAM operations for a resource. It is similar to a Handle, but provides access to newer IAM features (e.g., conditions).
|
|
||||||
type Handle3 struct {
|
|
||||||
c client
|
|
||||||
resource string
|
|
||||||
version int32
|
|
||||||
}
|
|
||||||
|
|
||||||
// InternalNewHandle is for use by the Google Cloud Libraries only.
|
|
||||||
//
|
|
||||||
// InternalNewHandle returns a Handle for resource.
|
|
||||||
// The conn parameter refers to a server that must support the IAMPolicy service.
|
|
||||||
func InternalNewHandle(conn grpc.ClientConnInterface, resource string) *Handle {
|
|
||||||
return InternalNewHandleGRPCClient(pb.NewIAMPolicyClient(conn), resource)
|
|
||||||
}
|
|
||||||
|
|
||||||
// InternalNewHandleGRPCClient is for use by the Google Cloud Libraries only.
|
|
||||||
//
|
|
||||||
// InternalNewHandleClient returns a Handle for resource using the given
|
|
||||||
// grpc service that implements IAM as a mixin
|
|
||||||
func InternalNewHandleGRPCClient(c pb.IAMPolicyClient, resource string) *Handle {
|
|
||||||
return InternalNewHandleClient(&grpcClient{c: c}, resource)
|
|
||||||
}
|
|
||||||
|
|
||||||
// InternalNewHandleClient is for use by the Google Cloud Libraries only.
|
|
||||||
//
|
|
||||||
// InternalNewHandleClient returns a Handle for resource using the given
|
|
||||||
// client implementation.
|
|
||||||
func InternalNewHandleClient(c client, resource string) *Handle {
|
|
||||||
return &Handle{
|
|
||||||
c: c,
|
|
||||||
resource: resource,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// V3 returns a Handle3, which is like Handle except it sets
|
|
||||||
// requestedPolicyVersion to 3 when retrieving a policy and policy.version to 3
|
|
||||||
// when storing a policy.
|
|
||||||
func (h *Handle) V3() *Handle3 {
|
|
||||||
return &Handle3{
|
|
||||||
c: h.c,
|
|
||||||
resource: h.resource,
|
|
||||||
version: 3,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Policy retrieves the IAM policy for the resource.
|
|
||||||
func (h *Handle) Policy(ctx context.Context) (*Policy, error) {
|
|
||||||
proto, err := h.c.Get(ctx, h.resource)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return &Policy{InternalProto: proto}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetPolicy replaces the resource's current policy with the supplied Policy.
|
|
||||||
//
|
|
||||||
// If policy was created from a prior call to Get, then the modification will
|
|
||||||
// only succeed if the policy has not changed since the Get.
|
|
||||||
func (h *Handle) SetPolicy(ctx context.Context, policy *Policy) error {
|
|
||||||
return h.c.Set(ctx, h.resource, policy.InternalProto)
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestPermissions returns the subset of permissions that the caller has on the resource.
|
|
||||||
func (h *Handle) TestPermissions(ctx context.Context, permissions []string) ([]string, error) {
|
|
||||||
return h.c.Test(ctx, h.resource, permissions)
|
|
||||||
}
|
|
||||||
|
|
||||||
// A RoleName is a name representing a collection of permissions.
|
|
||||||
type RoleName string
|
|
||||||
|
|
||||||
// Common role names.
|
|
||||||
const (
|
|
||||||
Owner RoleName = "roles/owner"
|
|
||||||
Editor RoleName = "roles/editor"
|
|
||||||
Viewer RoleName = "roles/viewer"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
// AllUsers is a special member that denotes all users, even unauthenticated ones.
|
|
||||||
AllUsers = "allUsers"
|
|
||||||
|
|
||||||
// AllAuthenticatedUsers is a special member that denotes all authenticated users.
|
|
||||||
AllAuthenticatedUsers = "allAuthenticatedUsers"
|
|
||||||
)
|
|
||||||
|
|
||||||
// A Policy is a list of Bindings representing roles
|
|
||||||
// granted to members.
|
|
||||||
//
|
|
||||||
// The zero Policy is a valid policy with no bindings.
|
|
||||||
type Policy struct {
|
|
||||||
// TODO(jba): when type aliases are available, put Policy into an internal package
|
|
||||||
// and provide an exported alias here.
|
|
||||||
|
|
||||||
// This field is exported for use by the Google Cloud Libraries only.
|
|
||||||
// It may become unexported in a future release.
|
|
||||||
InternalProto *pb.Policy
|
|
||||||
}
|
|
||||||
|
|
||||||
// Members returns the list of members with the supplied role.
|
|
||||||
// The return value should not be modified. Use Add and Remove
|
|
||||||
// to modify the members of a role.
|
|
||||||
func (p *Policy) Members(r RoleName) []string {
|
|
||||||
b := p.binding(r)
|
|
||||||
if b == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return b.Members
|
|
||||||
}
|
|
||||||
|
|
||||||
// HasRole reports whether member has role r.
|
|
||||||
func (p *Policy) HasRole(member string, r RoleName) bool {
|
|
||||||
return memberIndex(member, p.binding(r)) >= 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add adds member member to role r if it is not already present.
|
|
||||||
// A new binding is created if there is no binding for the role.
|
|
||||||
func (p *Policy) Add(member string, r RoleName) {
|
|
||||||
b := p.binding(r)
|
|
||||||
if b == nil {
|
|
||||||
if p.InternalProto == nil {
|
|
||||||
p.InternalProto = &pb.Policy{}
|
|
||||||
}
|
|
||||||
p.InternalProto.Bindings = append(p.InternalProto.Bindings, &pb.Binding{
|
|
||||||
Role: string(r),
|
|
||||||
Members: []string{member},
|
|
||||||
})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if memberIndex(member, b) < 0 {
|
|
||||||
b.Members = append(b.Members, member)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove removes member from role r if it is present.
|
|
||||||
func (p *Policy) Remove(member string, r RoleName) {
|
|
||||||
bi := p.bindingIndex(r)
|
|
||||||
if bi < 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
bindings := p.InternalProto.Bindings
|
|
||||||
b := bindings[bi]
|
|
||||||
mi := memberIndex(member, b)
|
|
||||||
if mi < 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// Order doesn't matter for bindings or members, so to remove, move the last item
|
|
||||||
// into the removed spot and shrink the slice.
|
|
||||||
if len(b.Members) == 1 {
|
|
||||||
// Remove binding.
|
|
||||||
last := len(bindings) - 1
|
|
||||||
bindings[bi] = bindings[last]
|
|
||||||
bindings[last] = nil
|
|
||||||
p.InternalProto.Bindings = bindings[:last]
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// Remove member.
|
|
||||||
// TODO(jba): worry about multiple copies of m?
|
|
||||||
last := len(b.Members) - 1
|
|
||||||
b.Members[mi] = b.Members[last]
|
|
||||||
b.Members[last] = ""
|
|
||||||
b.Members = b.Members[:last]
|
|
||||||
}
|
|
||||||
|
|
||||||
// Roles returns the names of all the roles that appear in the Policy.
|
|
||||||
func (p *Policy) Roles() []RoleName {
|
|
||||||
if p.InternalProto == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
var rns []RoleName
|
|
||||||
for _, b := range p.InternalProto.Bindings {
|
|
||||||
rns = append(rns, RoleName(b.Role))
|
|
||||||
}
|
|
||||||
return rns
|
|
||||||
}
|
|
||||||
|
|
||||||
// binding returns the Binding for the suppied role, or nil if there isn't one.
|
|
||||||
func (p *Policy) binding(r RoleName) *pb.Binding {
|
|
||||||
i := p.bindingIndex(r)
|
|
||||||
if i < 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return p.InternalProto.Bindings[i]
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *Policy) bindingIndex(r RoleName) int {
|
|
||||||
if p.InternalProto == nil {
|
|
||||||
return -1
|
|
||||||
}
|
|
||||||
for i, b := range p.InternalProto.Bindings {
|
|
||||||
if b.Role == string(r) {
|
|
||||||
return i
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return -1
|
|
||||||
}
|
|
||||||
|
|
||||||
// memberIndex returns the index of m in b's Members, or -1 if not found.
|
|
||||||
func memberIndex(m string, b *pb.Binding) int {
|
|
||||||
if b == nil {
|
|
||||||
return -1
|
|
||||||
}
|
|
||||||
for i, mm := range b.Members {
|
|
||||||
if mm == m {
|
|
||||||
return i
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return -1
|
|
||||||
}
|
|
||||||
|
|
||||||
// insertMetadata inserts metadata into the given context
|
|
||||||
func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context {
|
|
||||||
out, _ := metadata.FromOutgoingContext(ctx)
|
|
||||||
out = out.Copy()
|
|
||||||
for _, md := range mds {
|
|
||||||
for k, v := range md {
|
|
||||||
out[k] = append(out[k], v...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return metadata.NewOutgoingContext(ctx, out)
|
|
||||||
}
|
|
||||||
|
|
||||||
// A Policy3 is a list of Bindings representing roles granted to members.
|
|
||||||
//
|
|
||||||
// The zero Policy3 is a valid policy with no bindings.
|
|
||||||
//
|
|
||||||
// It is similar to a Policy, except a Policy3 provides direct access to the
|
|
||||||
// list of Bindings.
|
|
||||||
//
|
|
||||||
// The policy version is always set to 3.
|
|
||||||
type Policy3 struct {
|
|
||||||
etag []byte
|
|
||||||
Bindings []*pb.Binding
|
|
||||||
}
|
|
||||||
|
|
||||||
// Policy retrieves the IAM policy for the resource.
|
|
||||||
//
|
|
||||||
// requestedPolicyVersion is always set to 3.
|
|
||||||
func (h *Handle3) Policy(ctx context.Context) (*Policy3, error) {
|
|
||||||
proto, err := h.c.GetWithVersion(ctx, h.resource, h.version)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return &Policy3{
|
|
||||||
Bindings: proto.Bindings,
|
|
||||||
etag: proto.Etag,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetPolicy replaces the resource's current policy with the supplied Policy.
|
|
||||||
//
|
|
||||||
// If policy was created from a prior call to Get, then the modification will
|
|
||||||
// only succeed if the policy has not changed since the Get.
|
|
||||||
func (h *Handle3) SetPolicy(ctx context.Context, policy *Policy3) error {
|
|
||||||
return h.c.Set(ctx, h.resource, &pb.Policy{
|
|
||||||
Bindings: policy.Bindings,
|
|
||||||
Etag: policy.etag,
|
|
||||||
Version: h.version,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestPermissions returns the subset of permissions that the caller has on the resource.
|
|
||||||
func (h *Handle3) TestPermissions(ctx context.Context, permissions []string) ([]string, error) {
|
|
||||||
return h.c.Test(ctx, h.resource, permissions)
|
|
||||||
}
|
|
||||||
18
vendor/cloud.google.com/go/iam/internal/version.go
generated
vendored
18
vendor/cloud.google.com/go/iam/internal/version.go
generated
vendored
|
|
@ -1,18 +0,0 @@
|
||||||
// Copyright 2022 Google LLC
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package internal
|
|
||||||
|
|
||||||
// Version is the current tagged release of the library.
|
|
||||||
const Version = "0.8.0"
|
|
||||||
1946
vendor/cloud.google.com/go/internal/.repo-metadata-full.json
generated
vendored
1946
vendor/cloud.google.com/go/internal/.repo-metadata-full.json
generated
vendored
File diff suppressed because it is too large
Load diff
18
vendor/cloud.google.com/go/internal/README.md
generated
vendored
18
vendor/cloud.google.com/go/internal/README.md
generated
vendored
|
|
@ -1,18 +0,0 @@
|
||||||
# Internal
|
|
||||||
|
|
||||||
This directory contains internal code for cloud.google.com/go packages.
|
|
||||||
|
|
||||||
## .repo-metadata-full.json
|
|
||||||
|
|
||||||
`.repo-metadata-full.json` contains metadata about the packages in this repo. It
|
|
||||||
is generated by `internal/gapicgen/generator`. It's processed by external tools
|
|
||||||
to build lists of all of the packages.
|
|
||||||
|
|
||||||
Don't make breaking changes to the format without consulting with the external
|
|
||||||
tools.
|
|
||||||
|
|
||||||
One day, we may want to create individual `.repo-metadata.json` files next to
|
|
||||||
each package, which is the pattern followed by some other languages. External
|
|
||||||
tools would then talk to pkg.go.dev or some other service to get the overall
|
|
||||||
list of packages and use the `.repo-metadata.json` files to get the additional
|
|
||||||
metadata required. For now, `.repo-metadata-full.json` includes everything.
|
|
||||||
55
vendor/cloud.google.com/go/internal/annotate.go
generated
vendored
55
vendor/cloud.google.com/go/internal/annotate.go
generated
vendored
|
|
@ -1,55 +0,0 @@
|
||||||
// Copyright 2017 Google LLC
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package internal
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"google.golang.org/api/googleapi"
|
|
||||||
"google.golang.org/grpc/status"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Annotate prepends msg to the error message in err, attempting
|
|
||||||
// to preserve other information in err, like an error code.
|
|
||||||
//
|
|
||||||
// Annotate panics if err is nil.
|
|
||||||
//
|
|
||||||
// Annotate knows about these error types:
|
|
||||||
// - "google.golang.org/grpc/status".Status
|
|
||||||
// - "google.golang.org/api/googleapi".Error
|
|
||||||
// If the error is not one of these types, Annotate behaves
|
|
||||||
// like
|
|
||||||
//
|
|
||||||
// fmt.Errorf("%s: %v", msg, err)
|
|
||||||
func Annotate(err error, msg string) error {
|
|
||||||
if err == nil {
|
|
||||||
panic("Annotate called with nil")
|
|
||||||
}
|
|
||||||
if s, ok := status.FromError(err); ok {
|
|
||||||
p := s.Proto()
|
|
||||||
p.Message = msg + ": " + p.Message
|
|
||||||
return status.ErrorProto(p)
|
|
||||||
}
|
|
||||||
if g, ok := err.(*googleapi.Error); ok {
|
|
||||||
g.Message = msg + ": " + g.Message
|
|
||||||
return g
|
|
||||||
}
|
|
||||||
return fmt.Errorf("%s: %v", msg, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Annotatef uses format and args to format a string, then calls Annotate.
|
|
||||||
func Annotatef(err error, format string, args ...interface{}) error {
|
|
||||||
return Annotate(err, fmt.Sprintf(format, args...))
|
|
||||||
}
|
|
||||||
108
vendor/cloud.google.com/go/internal/optional/optional.go
generated
vendored
108
vendor/cloud.google.com/go/internal/optional/optional.go
generated
vendored
|
|
@ -1,108 +0,0 @@
|
||||||
// Copyright 2016 Google LLC
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
// Package optional provides versions of primitive types that can
|
|
||||||
// be nil. These are useful in methods that update some of an API object's
|
|
||||||
// fields.
|
|
||||||
package optional
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
type (
|
|
||||||
// Bool is either a bool or nil.
|
|
||||||
Bool interface{}
|
|
||||||
|
|
||||||
// String is either a string or nil.
|
|
||||||
String interface{}
|
|
||||||
|
|
||||||
// Int is either an int or nil.
|
|
||||||
Int interface{}
|
|
||||||
|
|
||||||
// Uint is either a uint or nil.
|
|
||||||
Uint interface{}
|
|
||||||
|
|
||||||
// Float64 is either a float64 or nil.
|
|
||||||
Float64 interface{}
|
|
||||||
|
|
||||||
// Duration is either a time.Duration or nil.
|
|
||||||
Duration interface{}
|
|
||||||
)
|
|
||||||
|
|
||||||
// ToBool returns its argument as a bool.
|
|
||||||
// It panics if its argument is nil or not a bool.
|
|
||||||
func ToBool(v Bool) bool {
|
|
||||||
x, ok := v.(bool)
|
|
||||||
if !ok {
|
|
||||||
doPanic("Bool", v)
|
|
||||||
}
|
|
||||||
return x
|
|
||||||
}
|
|
||||||
|
|
||||||
// ToString returns its argument as a string.
|
|
||||||
// It panics if its argument is nil or not a string.
|
|
||||||
func ToString(v String) string {
|
|
||||||
x, ok := v.(string)
|
|
||||||
if !ok {
|
|
||||||
doPanic("String", v)
|
|
||||||
}
|
|
||||||
return x
|
|
||||||
}
|
|
||||||
|
|
||||||
// ToInt returns its argument as an int.
|
|
||||||
// It panics if its argument is nil or not an int.
|
|
||||||
func ToInt(v Int) int {
|
|
||||||
x, ok := v.(int)
|
|
||||||
if !ok {
|
|
||||||
doPanic("Int", v)
|
|
||||||
}
|
|
||||||
return x
|
|
||||||
}
|
|
||||||
|
|
||||||
// ToUint returns its argument as a uint.
|
|
||||||
// It panics if its argument is nil or not a uint.
|
|
||||||
func ToUint(v Uint) uint {
|
|
||||||
x, ok := v.(uint)
|
|
||||||
if !ok {
|
|
||||||
doPanic("Uint", v)
|
|
||||||
}
|
|
||||||
return x
|
|
||||||
}
|
|
||||||
|
|
||||||
// ToFloat64 returns its argument as a float64.
|
|
||||||
// It panics if its argument is nil or not a float64.
|
|
||||||
func ToFloat64(v Float64) float64 {
|
|
||||||
x, ok := v.(float64)
|
|
||||||
if !ok {
|
|
||||||
doPanic("Float64", v)
|
|
||||||
}
|
|
||||||
return x
|
|
||||||
}
|
|
||||||
|
|
||||||
// ToDuration returns its argument as a time.Duration.
|
|
||||||
// It panics if its argument is nil or not a time.Duration.
|
|
||||||
func ToDuration(v Duration) time.Duration {
|
|
||||||
x, ok := v.(time.Duration)
|
|
||||||
if !ok {
|
|
||||||
doPanic("Duration", v)
|
|
||||||
}
|
|
||||||
return x
|
|
||||||
}
|
|
||||||
|
|
||||||
func doPanic(capType string, v interface{}) {
|
|
||||||
panic(fmt.Sprintf("optional.%s value should be %s, got %T", capType, strings.ToLower(capType), v))
|
|
||||||
}
|
|
||||||
85
vendor/cloud.google.com/go/internal/retry.go
generated
vendored
85
vendor/cloud.google.com/go/internal/retry.go
generated
vendored
|
|
@ -1,85 +0,0 @@
|
||||||
// Copyright 2016 Google LLC
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package internal
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
gax "github.com/googleapis/gax-go/v2"
|
|
||||||
"google.golang.org/grpc/status"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Retry calls the supplied function f repeatedly according to the provided
|
|
||||||
// backoff parameters. It returns when one of the following occurs:
|
|
||||||
// When f's first return value is true, Retry immediately returns with f's second
|
|
||||||
// return value.
|
|
||||||
// When the provided context is done, Retry returns with an error that
|
|
||||||
// includes both ctx.Error() and the last error returned by f.
|
|
||||||
func Retry(ctx context.Context, bo gax.Backoff, f func() (stop bool, err error)) error {
|
|
||||||
return retry(ctx, bo, f, gax.Sleep)
|
|
||||||
}
|
|
||||||
|
|
||||||
func retry(ctx context.Context, bo gax.Backoff, f func() (stop bool, err error),
|
|
||||||
sleep func(context.Context, time.Duration) error) error {
|
|
||||||
var lastErr error
|
|
||||||
for {
|
|
||||||
stop, err := f()
|
|
||||||
if stop {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
// Remember the last "real" error from f.
|
|
||||||
if err != nil && err != context.Canceled && err != context.DeadlineExceeded {
|
|
||||||
lastErr = err
|
|
||||||
}
|
|
||||||
p := bo.Pause()
|
|
||||||
if ctxErr := sleep(ctx, p); ctxErr != nil {
|
|
||||||
if lastErr != nil {
|
|
||||||
return wrappedCallErr{ctxErr: ctxErr, wrappedErr: lastErr}
|
|
||||||
}
|
|
||||||
return ctxErr
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Use this error type to return an error which allows introspection of both
|
|
||||||
// the context error and the error from the service.
|
|
||||||
type wrappedCallErr struct {
|
|
||||||
ctxErr error
|
|
||||||
wrappedErr error
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e wrappedCallErr) Error() string {
|
|
||||||
return fmt.Sprintf("retry failed with %v; last error: %v", e.ctxErr, e.wrappedErr)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e wrappedCallErr) Unwrap() error {
|
|
||||||
return e.wrappedErr
|
|
||||||
}
|
|
||||||
|
|
||||||
// Is allows errors.Is to match the error from the call as well as context
|
|
||||||
// sentinel errors.
|
|
||||||
func (e wrappedCallErr) Is(err error) bool {
|
|
||||||
return e.ctxErr == err || e.wrappedErr == err
|
|
||||||
}
|
|
||||||
|
|
||||||
// GRPCStatus allows the wrapped error to be used with status.FromError.
|
|
||||||
func (e wrappedCallErr) GRPCStatus() *status.Status {
|
|
||||||
if s, ok := status.FromError(e.wrappedErr); ok {
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
111
vendor/cloud.google.com/go/internal/trace/trace.go
generated
vendored
111
vendor/cloud.google.com/go/internal/trace/trace.go
generated
vendored
|
|
@ -1,111 +0,0 @@
|
||||||
// Copyright 2018 Google LLC
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package trace
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"go.opencensus.io/trace"
|
|
||||||
"golang.org/x/xerrors"
|
|
||||||
"google.golang.org/api/googleapi"
|
|
||||||
"google.golang.org/genproto/googleapis/rpc/code"
|
|
||||||
"google.golang.org/grpc/status"
|
|
||||||
)
|
|
||||||
|
|
||||||
// StartSpan adds a span to the trace with the given name.
|
|
||||||
func StartSpan(ctx context.Context, name string) context.Context {
|
|
||||||
ctx, _ = trace.StartSpan(ctx, name)
|
|
||||||
return ctx
|
|
||||||
}
|
|
||||||
|
|
||||||
// EndSpan ends a span with the given error.
|
|
||||||
func EndSpan(ctx context.Context, err error) {
|
|
||||||
span := trace.FromContext(ctx)
|
|
||||||
if err != nil {
|
|
||||||
span.SetStatus(toStatus(err))
|
|
||||||
}
|
|
||||||
span.End()
|
|
||||||
}
|
|
||||||
|
|
||||||
// toStatus interrogates an error and converts it to an appropriate
|
|
||||||
// OpenCensus status.
|
|
||||||
func toStatus(err error) trace.Status {
|
|
||||||
var err2 *googleapi.Error
|
|
||||||
if ok := xerrors.As(err, &err2); ok {
|
|
||||||
return trace.Status{Code: httpStatusCodeToOCCode(err2.Code), Message: err2.Message}
|
|
||||||
} else if s, ok := status.FromError(err); ok {
|
|
||||||
return trace.Status{Code: int32(s.Code()), Message: s.Message()}
|
|
||||||
} else {
|
|
||||||
return trace.Status{Code: int32(code.Code_UNKNOWN), Message: err.Error()}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO(deklerk): switch to using OpenCensus function when it becomes available.
|
|
||||||
// Reference: https://github.com/googleapis/googleapis/blob/26b634d2724ac5dd30ae0b0cbfb01f07f2e4050e/google/rpc/code.proto
|
|
||||||
func httpStatusCodeToOCCode(httpStatusCode int) int32 {
|
|
||||||
switch httpStatusCode {
|
|
||||||
case 200:
|
|
||||||
return int32(code.Code_OK)
|
|
||||||
case 499:
|
|
||||||
return int32(code.Code_CANCELLED)
|
|
||||||
case 500:
|
|
||||||
return int32(code.Code_UNKNOWN) // Could also be Code_INTERNAL, Code_DATA_LOSS
|
|
||||||
case 400:
|
|
||||||
return int32(code.Code_INVALID_ARGUMENT) // Could also be Code_OUT_OF_RANGE
|
|
||||||
case 504:
|
|
||||||
return int32(code.Code_DEADLINE_EXCEEDED)
|
|
||||||
case 404:
|
|
||||||
return int32(code.Code_NOT_FOUND)
|
|
||||||
case 409:
|
|
||||||
return int32(code.Code_ALREADY_EXISTS) // Could also be Code_ABORTED
|
|
||||||
case 403:
|
|
||||||
return int32(code.Code_PERMISSION_DENIED)
|
|
||||||
case 401:
|
|
||||||
return int32(code.Code_UNAUTHENTICATED)
|
|
||||||
case 429:
|
|
||||||
return int32(code.Code_RESOURCE_EXHAUSTED)
|
|
||||||
case 501:
|
|
||||||
return int32(code.Code_UNIMPLEMENTED)
|
|
||||||
case 503:
|
|
||||||
return int32(code.Code_UNAVAILABLE)
|
|
||||||
default:
|
|
||||||
return int32(code.Code_UNKNOWN)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: (odeke-em): perhaps just pass around spans due to the cost
|
|
||||||
// incurred from using trace.FromContext(ctx) yet we could avoid
|
|
||||||
// throwing away the work done by ctx, span := trace.StartSpan.
|
|
||||||
func TracePrintf(ctx context.Context, attrMap map[string]interface{}, format string, args ...interface{}) {
|
|
||||||
var attrs []trace.Attribute
|
|
||||||
for k, v := range attrMap {
|
|
||||||
var a trace.Attribute
|
|
||||||
switch v := v.(type) {
|
|
||||||
case string:
|
|
||||||
a = trace.StringAttribute(k, v)
|
|
||||||
case bool:
|
|
||||||
a = trace.BoolAttribute(k, v)
|
|
||||||
case int:
|
|
||||||
a = trace.Int64Attribute(k, int64(v))
|
|
||||||
case int64:
|
|
||||||
a = trace.Int64Attribute(k, v)
|
|
||||||
default:
|
|
||||||
a = trace.StringAttribute(k, fmt.Sprintf("%#v", v))
|
|
||||||
}
|
|
||||||
attrs = append(attrs, a)
|
|
||||||
}
|
|
||||||
trace.FromContext(ctx).Annotatef(attrs, format, args...)
|
|
||||||
}
|
|
||||||
19
vendor/cloud.google.com/go/internal/version/update_version.sh
generated
vendored
19
vendor/cloud.google.com/go/internal/version/update_version.sh
generated
vendored
|
|
@ -1,19 +0,0 @@
|
||||||
#!/bin/bash
|
|
||||||
# Copyright 2019 Google LLC
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
today=$(date +%Y%m%d)
|
|
||||||
|
|
||||||
sed -i -r -e 's/const Repo = "([0-9]{8})"/const Repo = "'$today'"/' $GOFILE
|
|
||||||
|
|
||||||
71
vendor/cloud.google.com/go/internal/version/version.go
generated
vendored
71
vendor/cloud.google.com/go/internal/version/version.go
generated
vendored
|
|
@ -1,71 +0,0 @@
|
||||||
// Copyright 2016 Google LLC
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
//go:generate ./update_version.sh
|
|
||||||
|
|
||||||
// Package version contains version information for Google Cloud Client
|
|
||||||
// Libraries for Go, as reported in request headers.
|
|
||||||
package version
|
|
||||||
|
|
||||||
import (
|
|
||||||
"runtime"
|
|
||||||
"strings"
|
|
||||||
"unicode"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Repo is the current version of the client libraries in this
|
|
||||||
// repo. It should be a date in YYYYMMDD format.
|
|
||||||
const Repo = "20201104"
|
|
||||||
|
|
||||||
// Go returns the Go runtime version. The returned string
|
|
||||||
// has no whitespace.
|
|
||||||
func Go() string {
|
|
||||||
return goVersion
|
|
||||||
}
|
|
||||||
|
|
||||||
var goVersion = goVer(runtime.Version())
|
|
||||||
|
|
||||||
const develPrefix = "devel +"
|
|
||||||
|
|
||||||
func goVer(s string) string {
|
|
||||||
if strings.HasPrefix(s, develPrefix) {
|
|
||||||
s = s[len(develPrefix):]
|
|
||||||
if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 {
|
|
||||||
s = s[:p]
|
|
||||||
}
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
if strings.HasPrefix(s, "go1") {
|
|
||||||
s = s[2:]
|
|
||||||
var prerelease string
|
|
||||||
if p := strings.IndexFunc(s, notSemverRune); p >= 0 {
|
|
||||||
s, prerelease = s[:p], s[p:]
|
|
||||||
}
|
|
||||||
if strings.HasSuffix(s, ".") {
|
|
||||||
s += "0"
|
|
||||||
} else if strings.Count(s, ".") < 2 {
|
|
||||||
s += ".0"
|
|
||||||
}
|
|
||||||
if prerelease != "" {
|
|
||||||
s += "-" + prerelease
|
|
||||||
}
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
func notSemverRune(r rune) bool {
|
|
||||||
return !strings.ContainsRune("0123456789.", r)
|
|
||||||
}
|
|
||||||
3
vendor/cloud.google.com/go/storage/.release-please-manifest.json
generated
vendored
3
vendor/cloud.google.com/go/storage/.release-please-manifest.json
generated
vendored
|
|
@ -1,3 +0,0 @@
|
||||||
{
|
|
||||||
"storage": "1.27.0"
|
|
||||||
}
|
|
||||||
304
vendor/cloud.google.com/go/storage/CHANGES.md
generated
vendored
304
vendor/cloud.google.com/go/storage/CHANGES.md
generated
vendored
|
|
@ -1,304 +0,0 @@
|
||||||
# Changes
|
|
||||||
|
|
||||||
|
|
||||||
## [1.27.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.26.0...storage/v1.27.0) (2022-09-22)
|
|
||||||
|
|
||||||
|
|
||||||
### Features
|
|
||||||
|
|
||||||
* **storage:** Find GoogleAccessID when using impersonated creds ([#6591](https://github.com/googleapis/google-cloud-go/issues/6591)) ([a2d16a7](https://github.com/googleapis/google-cloud-go/commit/a2d16a7a778c85d13217fc67955ec5dac1da34e8))
|
|
||||||
|
|
||||||
## [1.26.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.25.0...storage/v1.26.0) (2022-08-29)
|
|
||||||
|
|
||||||
|
|
||||||
### Features
|
|
||||||
|
|
||||||
* **storage:** export ShouldRetry ([#6370](https://github.com/googleapis/google-cloud-go/issues/6370)) ([0da9ab0](https://github.com/googleapis/google-cloud-go/commit/0da9ab0831540569dc04c0a23437b084b1564e15)), refs [#6362](https://github.com/googleapis/google-cloud-go/issues/6362)
|
|
||||||
|
|
||||||
|
|
||||||
### Bug Fixes
|
|
||||||
|
|
||||||
* **storage:** allow to use age=0 in OLM conditions ([#6204](https://github.com/googleapis/google-cloud-go/issues/6204)) ([c85704f](https://github.com/googleapis/google-cloud-go/commit/c85704f4284626ce728cb48f3b130f2ce2a0165e))
|
|
||||||
|
|
||||||
## [1.25.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.24.0...storage/v1.25.0) (2022-08-11)
|
|
||||||
|
|
||||||
|
|
||||||
### Features
|
|
||||||
|
|
||||||
* **storage/internal:** Add routing annotations ([8a8ba85](https://github.com/googleapis/google-cloud-go/commit/8a8ba85311f85701c97fd7c10f1d88b738ce423f))
|
|
||||||
* **storage:** refactor to use transport-agnostic interface ([#6465](https://github.com/googleapis/google-cloud-go/issues/6465)) ([d03c3e1](https://github.com/googleapis/google-cloud-go/commit/d03c3e15a79fe9afa1232d9c8bd4c484a9bb927e))
|
|
||||||
|
|
||||||
## [1.24.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.23.0...storage/v1.24.0) (2022-07-20)
|
|
||||||
|
|
||||||
|
|
||||||
### Features
|
|
||||||
|
|
||||||
* **storage:** add Custom Placement Config Dual Region Support ([#6294](https://github.com/googleapis/google-cloud-go/issues/6294)) ([5a8c607](https://github.com/googleapis/google-cloud-go/commit/5a8c607e3a9a3265887e27cb13f8943f3e3fa23d))
|
|
||||||
|
|
||||||
## [1.23.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.22.1...storage/v1.23.0) (2022-06-23)
|
|
||||||
|
|
||||||
|
|
||||||
### Features
|
|
||||||
|
|
||||||
* **storage:** add support for OLM Prefix/Suffix ([#5929](https://github.com/googleapis/google-cloud-go/issues/5929)) ([ec21d10](https://github.com/googleapis/google-cloud-go/commit/ec21d10d6d1b01aa97a52560319775041707690d))
|
|
||||||
* **storage:** support AbortIncompleteMultipartUpload LifecycleAction ([#5812](https://github.com/googleapis/google-cloud-go/issues/5812)) ([fdec929](https://github.com/googleapis/google-cloud-go/commit/fdec929b9da6e01dda0ab3c72544d44d6bd82bd4)), refs [#5795](https://github.com/googleapis/google-cloud-go/issues/5795)
|
|
||||||
|
|
||||||
|
|
||||||
### Bug Fixes
|
|
||||||
|
|
||||||
* **storage:** allow for Age *int64 type and int64 type ([#6230](https://github.com/googleapis/google-cloud-go/issues/6230)) ([cc7acb8](https://github.com/googleapis/google-cloud-go/commit/cc7acb8bffb31828e9e96d4834a65f9728494473))
|
|
||||||
|
|
||||||
### [1.22.1](https://github.com/googleapis/google-cloud-go/compare/storage/v1.22.0...storage/v1.22.1) (2022-05-19)
|
|
||||||
|
|
||||||
|
|
||||||
### Bug Fixes
|
|
||||||
|
|
||||||
* **storage:** bump genproto, remove deadcode ([#6059](https://github.com/googleapis/google-cloud-go/issues/6059)) ([bb10f9f](https://github.com/googleapis/google-cloud-go/commit/bb10f9faca57dc3b987e0fb601090887b3507f07))
|
|
||||||
* **storage:** remove field that no longer exists ([#6061](https://github.com/googleapis/google-cloud-go/issues/6061)) ([ee150cf](https://github.com/googleapis/google-cloud-go/commit/ee150cfd194463ddfcb59898cfb0237e47777973))
|
|
||||||
|
|
||||||
## [1.22.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.21.0...storage/v1.22.0) (2022-03-31)
|
|
||||||
|
|
||||||
|
|
||||||
### Features
|
|
||||||
|
|
||||||
* **storage:** allow specifying includeTrailingDelimiter ([#5617](https://github.com/googleapis/google-cloud-go/issues/5617)) ([a34503b](https://github.com/googleapis/google-cloud-go/commit/a34503bc0f0b95399285e8db66976b227e3b0072))
|
|
||||||
* **storage:** set versionClient to module version ([55f0d92](https://github.com/googleapis/google-cloud-go/commit/55f0d92bf112f14b024b4ab0076c9875a17423c9))
|
|
||||||
|
|
||||||
|
|
||||||
### Bug Fixes
|
|
||||||
|
|
||||||
* **storage:** respect STORAGE_EMULATOR_HOST in signedURL ([#5673](https://github.com/googleapis/google-cloud-go/issues/5673)) ([1c249ae](https://github.com/googleapis/google-cloud-go/commit/1c249ae5b4980cf53fa74635943ca8bf6a96a341))
|
|
||||||
|
|
||||||
## [1.21.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.20.0...storage/v1.21.0) (2022-02-17)
|
|
||||||
|
|
||||||
|
|
||||||
### Features
|
|
||||||
|
|
||||||
* **storage:** add better version metadata to calls ([#5507](https://github.com/googleapis/google-cloud-go/issues/5507)) ([13fe0bc](https://github.com/googleapis/google-cloud-go/commit/13fe0bc0d8acbffd46b59ab69b25449f1cbd6a88)), refs [#2749](https://github.com/googleapis/google-cloud-go/issues/2749)
|
|
||||||
* **storage:** add Writer.ChunkRetryDeadline ([#5482](https://github.com/googleapis/google-cloud-go/issues/5482)) ([498a746](https://github.com/googleapis/google-cloud-go/commit/498a746769fa43958b92af8875b927879947128e))
|
|
||||||
|
|
||||||
## [1.20.0](https://www.github.com/googleapis/google-cloud-go/compare/storage/v1.19.0...storage/v1.20.0) (2022-02-04)
|
|
||||||
|
|
||||||
|
|
||||||
### Features
|
|
||||||
|
|
||||||
* **storage/internal:** Update definition of RewriteObjectRequest to bring to parity with JSON API support ([#5447](https://www.github.com/googleapis/google-cloud-go/issues/5447)) ([7d175ef](https://www.github.com/googleapis/google-cloud-go/commit/7d175ef12b7b3e75585427f5dd2aab4a175e92d6))
|
|
||||||
|
|
||||||
## [1.19.0](https://www.github.com/googleapis/google-cloud-go/compare/storage/v1.18.2...storage/v1.19.0) (2022-01-25)
|
|
||||||
|
|
||||||
|
|
||||||
### Features
|
|
||||||
|
|
||||||
* **storage:** add fully configurable and idempotency-aware retry strategy ([#5384](https://www.github.com/googleapis/google-cloud-go/issues/5384), [#5185](https://www.github.com/googleapis/google-cloud-go/issues/5185), [#5170](https://www.github.com/googleapis/google-cloud-go/issues/5170), [#5223](https://www.github.com/googleapis/google-cloud-go/issues/5223), [#5221](https://www.github.com/googleapis/google-cloud-go/issues/5221), [#5193](https://www.github.com/googleapis/google-cloud-go/issues/5193), [#5159](https://www.github.com/googleapis/google-cloud-go/issues/5159), [#5165](https://www.github.com/googleapis/google-cloud-go/issues/5165), [#5166](https://www.github.com/googleapis/google-cloud-go/issues/5166), [#5210](https://www.github.com/googleapis/google-cloud-go/issues/5210), [#5172](https://www.github.com/googleapis/google-cloud-go/issues/5172), [#5314](https://www.github.com/googleapis/google-cloud-go/issues/5314))
|
|
||||||
* This release contains changes to fully align this library's retry strategy
|
|
||||||
with best practices as described in the
|
|
||||||
Cloud Storage [docs](https://cloud.google.com/storage/docs/retry-strategy).
|
|
||||||
* The library will now retry only idempotent operations by default. This means
|
|
||||||
that for certain operations, including object upload, compose, rewrite,
|
|
||||||
update, and delete, requests will not be retried by default unless
|
|
||||||
[idempotency conditions](https://cloud.google.com/storage/docs/retry-strategy#idempotency)
|
|
||||||
for the request have been met.
|
|
||||||
* The library now has methods to configure aspects of retry policy for
|
|
||||||
API calls, including which errors are retried, the timing of the
|
|
||||||
exponential backoff, and how idempotency is taken into account.
|
|
||||||
* If you wish to re-enable retries for a non-idempotent request, use the
|
|
||||||
[RetryAlways](https://pkg.go.dev/cloud.google.com/go/storage@main#RetryAlways)
|
|
||||||
policy.
|
|
||||||
* For full details on how to configure retries, see the
|
|
||||||
[package docs](https://pkg.go.dev/cloud.google.com/go/storage@main#hdr-Retrying_failed_requests)
|
|
||||||
and the
|
|
||||||
[Cloud Storage docs](https://cloud.google.com/storage/docs/retry-strategy)
|
|
||||||
* **storage:** GenerateSignedPostPolicyV4 can use existing creds to authenticate ([#5105](https://www.github.com/googleapis/google-cloud-go/issues/5105)) ([46489f4](https://www.github.com/googleapis/google-cloud-go/commit/46489f4c8a634068a3e7cf2fd5e5ca11b555c0a8))
|
|
||||||
* **storage:** post policy can be signed with a fn that takes raw bytes ([#5079](https://www.github.com/googleapis/google-cloud-go/issues/5079)) ([25d1278](https://www.github.com/googleapis/google-cloud-go/commit/25d1278cab539fbfdd8563ed6b297e30d3fe555c))
|
|
||||||
* **storage:** add rpo (turbo replication) support ([#5003](https://www.github.com/googleapis/google-cloud-go/issues/5003)) ([3bd5995](https://www.github.com/googleapis/google-cloud-go/commit/3bd59958e0c06d2655b67fcb5410668db3c52af0))
|
|
||||||
|
|
||||||
### Bug Fixes
|
|
||||||
|
|
||||||
* **storage:** fix nil check in gRPC Reader ([#5376](https://www.github.com/googleapis/google-cloud-go/issues/5376)) ([5e7d722](https://www.github.com/googleapis/google-cloud-go/commit/5e7d722d18a62b28ba98169b3bdbb49401377264))
|
|
||||||
|
|
||||||
### [1.18.2](https://www.github.com/googleapis/google-cloud-go/compare/storage/v1.18.1...storage/v1.18.2) (2021-10-18)
|
|
||||||
|
|
||||||
|
|
||||||
### Bug Fixes
|
|
||||||
|
|
||||||
* **storage:** upgrade genproto ([#4993](https://www.github.com/googleapis/google-cloud-go/issues/4993)) ([5ca462d](https://www.github.com/googleapis/google-cloud-go/commit/5ca462d99fe851b7cddfd70108798e2fa959bdfd)), refs [#4991](https://www.github.com/googleapis/google-cloud-go/issues/4991)
|
|
||||||
|
|
||||||
### [1.18.1](https://www.github.com/googleapis/google-cloud-go/compare/storage/v1.18.0...storage/v1.18.1) (2021-10-14)
|
|
||||||
|
|
||||||
|
|
||||||
### Bug Fixes
|
|
||||||
|
|
||||||
* **storage:** don't assume auth from a client option ([#4982](https://www.github.com/googleapis/google-cloud-go/issues/4982)) ([e17334d](https://www.github.com/googleapis/google-cloud-go/commit/e17334d1fe7645d89d14ae7148313498b984dfbb))
|
|
||||||
|
|
||||||
## [1.18.0](https://www.github.com/googleapis/google-cloud-go/compare/storage/v1.17.0...storage/v1.18.0) (2021-10-11)
|
|
||||||
|
|
||||||
|
|
||||||
### Features
|
|
||||||
|
|
||||||
* **storage:** returned wrapped error for timeouts ([#4802](https://www.github.com/googleapis/google-cloud-go/issues/4802)) ([0e102a3](https://www.github.com/googleapis/google-cloud-go/commit/0e102a385dc67a06f6b444b3a93e6998428529be)), refs [#4197](https://www.github.com/googleapis/google-cloud-go/issues/4197)
|
|
||||||
* **storage:** SignedUrl can use existing creds to authenticate ([#4604](https://www.github.com/googleapis/google-cloud-go/issues/4604)) ([b824c89](https://www.github.com/googleapis/google-cloud-go/commit/b824c897e6941270747b612f6d36a8d6ae081315))
|
|
||||||
|
|
||||||
|
|
||||||
### Bug Fixes
|
|
||||||
|
|
||||||
* **storage:** update PAP to use inherited instead of unspecified ([#4909](https://www.github.com/googleapis/google-cloud-go/issues/4909)) ([dac26b1](https://www.github.com/googleapis/google-cloud-go/commit/dac26b1af2f2972f12775341173bcc5f982438b8))
|
|
||||||
|
|
||||||
## [1.17.0](https://www.github.com/googleapis/google-cloud-go/compare/storage/v1.16.1...storage/v1.17.0) (2021-09-28)
|
|
||||||
|
|
||||||
|
|
||||||
### Features
|
|
||||||
|
|
||||||
* **storage:** add projectNumber field to bucketAttrs. ([#4805](https://www.github.com/googleapis/google-cloud-go/issues/4805)) ([07343af](https://www.github.com/googleapis/google-cloud-go/commit/07343afc15085b164cc41d202d13f9d46f5c0d02))
|
|
||||||
|
|
||||||
|
|
||||||
### Bug Fixes
|
|
||||||
|
|
||||||
* **storage:** align retry idempotency (part 1) ([#4715](https://www.github.com/googleapis/google-cloud-go/issues/4715)) ([ffa903e](https://www.github.com/googleapis/google-cloud-go/commit/ffa903eeec61aa3869e5220e2f09371127b5c393))
|
|
||||||
|
|
||||||
### [1.16.1](https://www.github.com/googleapis/google-cloud-go/compare/storage/v1.16.0...storage/v1.16.1) (2021-08-30)
|
|
||||||
|
|
||||||
|
|
||||||
### Bug Fixes
|
|
||||||
|
|
||||||
* **storage/internal:** Update encryption_key fields to "bytes" type. fix: Improve date/times and field name clarity in lifecycle conditions. ([a52baa4](https://www.github.com/googleapis/google-cloud-go/commit/a52baa456ed8513ec492c4b573c191eb61468758))
|
|
||||||
* **storage:** accept emulator env var without scheme ([#4616](https://www.github.com/googleapis/google-cloud-go/issues/4616)) ([5f8cbb9](https://www.github.com/googleapis/google-cloud-go/commit/5f8cbb98070109e2a34409ac775ed63b94d37efd))
|
|
||||||
* **storage:** preserve supplied endpoint's scheme ([#4609](https://www.github.com/googleapis/google-cloud-go/issues/4609)) ([ee2756f](https://www.github.com/googleapis/google-cloud-go/commit/ee2756fb0a335d591464a770c9fa4f8fe0ba2e01))
|
|
||||||
* **storage:** remove unnecessary variable ([#4608](https://www.github.com/googleapis/google-cloud-go/issues/4608)) ([27fc784](https://www.github.com/googleapis/google-cloud-go/commit/27fc78456fb251652bdf5cdb493734a7e1e643e1))
|
|
||||||
* **storage:** retry LockRetentionPolicy ([#4439](https://www.github.com/googleapis/google-cloud-go/issues/4439)) ([09879ea](https://www.github.com/googleapis/google-cloud-go/commit/09879ea80cb67f9bfd8fc9384b0fda335567cba9)), refs [#4437](https://www.github.com/googleapis/google-cloud-go/issues/4437)
|
|
||||||
* **storage:** revise Reader to send XML preconditions ([#4479](https://www.github.com/googleapis/google-cloud-go/issues/4479)) ([e36b29a](https://www.github.com/googleapis/google-cloud-go/commit/e36b29a3d43bce5c1c044f7daf6e1db00b0a49e0)), refs [#4470](https://www.github.com/googleapis/google-cloud-go/issues/4470)
|
|
||||||
|
|
||||||
## [1.16.0](https://www.github.com/googleapis/google-cloud-go/compare/storage/v1.15.0...storage/v1.16.0) (2021-06-28)
|
|
||||||
|
|
||||||
|
|
||||||
### Features
|
|
||||||
|
|
||||||
* **storage:** support PublicAccessPrevention ([#3608](https://www.github.com/googleapis/google-cloud-go/issues/3608)) ([99bc782](https://www.github.com/googleapis/google-cloud-go/commit/99bc782fb50a47602b45278384ef5d5b5da9263b)), refs [#3203](https://www.github.com/googleapis/google-cloud-go/issues/3203)
|
|
||||||
|
|
||||||
|
|
||||||
### Bug Fixes
|
|
||||||
|
|
||||||
* **storage:** fix Writer.ChunkSize validation ([#4255](https://www.github.com/googleapis/google-cloud-go/issues/4255)) ([69c2e9d](https://www.github.com/googleapis/google-cloud-go/commit/69c2e9dc6303e1a004d3104a8178532fa738e742)), refs [#4167](https://www.github.com/googleapis/google-cloud-go/issues/4167)
|
|
||||||
* **storage:** try to reopen for failed Reads ([#4226](https://www.github.com/googleapis/google-cloud-go/issues/4226)) ([564102b](https://www.github.com/googleapis/google-cloud-go/commit/564102b335dbfb558bec8af883e5f898efb5dd10)), refs [#3040](https://www.github.com/googleapis/google-cloud-go/issues/3040)
|
|
||||||
|
|
||||||
## [1.15.0](https://www.github.com/googleapis/google-cloud-go/compare/storage/v1.13.0...storage/v1.15.0) (2021-04-21)
|
|
||||||
|
|
||||||
|
|
||||||
### Features
|
|
||||||
|
|
||||||
* **transport** Bump dependency on google.golang.org/api to pick up HTTP/2
|
|
||||||
config updates (see [googleapis/google-api-go-client#882](https://github.com/googleapis/google-api-go-client/pull/882)).
|
|
||||||
|
|
||||||
### Bug Fixes
|
|
||||||
|
|
||||||
* **storage:** retry io.ErrUnexpectedEOF ([#3957](https://www.github.com/googleapis/google-cloud-go/issues/3957)) ([f6590cd](https://www.github.com/googleapis/google-cloud-go/commit/f6590cdc26c8479be5df48949fa59f879e0c24fc))
|
|
||||||
|
|
||||||
|
|
||||||
## v1.14.0
|
|
||||||
|
|
||||||
- Updates to various dependencies.
|
|
||||||
|
|
||||||
## [1.13.0](https://www.github.com/googleapis/google-cloud-go/compare/storage/v1.12.0...v1.13.0) (2021-02-03)
|
|
||||||
|
|
||||||
|
|
||||||
### Features
|
|
||||||
|
|
||||||
* **storage:** add missing StorageClass in BucketAttrsToUpdate ([#3038](https://www.github.com/googleapis/google-cloud-go/issues/3038)) ([2fa1b72](https://www.github.com/googleapis/google-cloud-go/commit/2fa1b727f8a7b20aa62fe0990530744f6c109be0))
|
|
||||||
* **storage:** add projection parameter for BucketHandle.Objects() ([#3549](https://www.github.com/googleapis/google-cloud-go/issues/3549)) ([9b9c3dc](https://www.github.com/googleapis/google-cloud-go/commit/9b9c3dce3ee10af5b6c4d070821bf47a861efd5b))
|
|
||||||
|
|
||||||
|
|
||||||
### Bug Fixes
|
|
||||||
|
|
||||||
* **storage:** fix endpoint selection logic ([#3172](https://www.github.com/googleapis/google-cloud-go/issues/3172)) ([99edf0d](https://www.github.com/googleapis/google-cloud-go/commit/99edf0d211a9e617f2586fbc83b6f9630da3c537))
|
|
||||||
|
|
||||||
## v1.12.0
|
|
||||||
- V4 signed URL fixes:
|
|
||||||
- Fix encoding of spaces in query parameters.
|
|
||||||
- Add fields that were missing from PostPolicyV4 policy conditions.
|
|
||||||
- Fix Query to correctly list prefixes as well as objects when SetAttrSelection
|
|
||||||
is used.
|
|
||||||
|
|
||||||
## v1.11.0
|
|
||||||
- Add support for CustomTime and NoncurrentTime object lifecycle management
|
|
||||||
features.
|
|
||||||
|
|
||||||
## v1.10.0
|
|
||||||
- Bump dependency on google.golang.org/api to capture changes to retry logic
|
|
||||||
which will make retries on writes more resilient.
|
|
||||||
- Improve documentation for Writer.ChunkSize.
|
|
||||||
- Fix a bug in lifecycle to allow callers to clear lifecycle rules on a bucket.
|
|
||||||
|
|
||||||
## v1.9.0
|
|
||||||
- Add retry for transient network errors on most operations (with the exception
|
|
||||||
of writes).
|
|
||||||
- Bump dependency for google.golang.org/api to capture a change in the default
|
|
||||||
HTTP transport which will improve performance for reads under heavy load.
|
|
||||||
- Add CRC32C checksum validation option to Composer.
|
|
||||||
|
|
||||||
## v1.8.0
|
|
||||||
- Add support for V4 signed post policies.
|
|
||||||
|
|
||||||
## v1.7.0
|
|
||||||
- V4 signed URL support:
|
|
||||||
- Add support for bucket-bound domains and virtual hosted style URLs.
|
|
||||||
- Add support for query parameters in the signature.
|
|
||||||
- Fix text encoding to align with standards.
|
|
||||||
- Add the object name to query parameters for write calls.
|
|
||||||
- Fix retry behavior when reading files with Content-Encoding gzip.
|
|
||||||
- Fix response header in reader.
|
|
||||||
- New code examples:
|
|
||||||
- Error handling for `ObjectHandle` preconditions.
|
|
||||||
- Existence checks for buckets and objects.
|
|
||||||
|
|
||||||
## v1.6.0
|
|
||||||
|
|
||||||
- Updated option handling:
|
|
||||||
- Don't drop custom scopes (#1756)
|
|
||||||
- Don't drop port in provided endpoint (#1737)
|
|
||||||
|
|
||||||
## v1.5.0
|
|
||||||
|
|
||||||
- Honor WithEndpoint client option for reads as well as writes.
|
|
||||||
- Add archive storage class to docs.
|
|
||||||
- Make fixes to storage benchwrapper.
|
|
||||||
|
|
||||||
## v1.4.0
|
|
||||||
|
|
||||||
- When listing objects in a bucket, allow callers to specify which attributes
|
|
||||||
are queried. This allows for performance optimization.
|
|
||||||
|
|
||||||
## v1.3.0
|
|
||||||
|
|
||||||
- Use `storage.googleapis.com/storage/v1` by default for GCS requests
|
|
||||||
instead of `www.googleapis.com/storage/v1`.
|
|
||||||
|
|
||||||
## v1.2.1
|
|
||||||
|
|
||||||
- Fixed a bug where UniformBucketLevelAccess and BucketPolicyOnly were not
|
|
||||||
being sent in all cases.
|
|
||||||
|
|
||||||
## v1.2.0
|
|
||||||
|
|
||||||
- Add support for UniformBucketLevelAccess. This configures access checks
|
|
||||||
to use only bucket-level IAM policies.
|
|
||||||
See: https://godoc.org/cloud.google.com/go/storage#UniformBucketLevelAccess.
|
|
||||||
- Fix userAgent to use correct version.
|
|
||||||
|
|
||||||
## v1.1.2
|
|
||||||
|
|
||||||
- Fix memory leak in BucketIterator and ObjectIterator.
|
|
||||||
|
|
||||||
## v1.1.1
|
|
||||||
|
|
||||||
- Send BucketPolicyOnly even when it's disabled.
|
|
||||||
|
|
||||||
## v1.1.0
|
|
||||||
|
|
||||||
- Performance improvements for ObjectIterator and BucketIterator.
|
|
||||||
- Fix Bucket.ObjectIterator size calculation checks.
|
|
||||||
- Added HMACKeyOptions to all the methods which allows for options such as
|
|
||||||
UserProject to be set per invocation and optionally be used.
|
|
||||||
|
|
||||||
## v1.0.0
|
|
||||||
|
|
||||||
This is the first tag to carve out storage as its own module. See:
|
|
||||||
https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository.
|
|
||||||
202
vendor/cloud.google.com/go/storage/LICENSE
generated
vendored
202
vendor/cloud.google.com/go/storage/LICENSE
generated
vendored
|
|
@ -1,202 +0,0 @@
|
||||||
|
|
||||||
Apache License
|
|
||||||
Version 2.0, January 2004
|
|
||||||
http://www.apache.org/licenses/
|
|
||||||
|
|
||||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
|
||||||
|
|
||||||
1. Definitions.
|
|
||||||
|
|
||||||
"License" shall mean the terms and conditions for use, reproduction,
|
|
||||||
and distribution as defined by Sections 1 through 9 of this document.
|
|
||||||
|
|
||||||
"Licensor" shall mean the copyright owner or entity authorized by
|
|
||||||
the copyright owner that is granting the License.
|
|
||||||
|
|
||||||
"Legal Entity" shall mean the union of the acting entity and all
|
|
||||||
other entities that control, are controlled by, or are under common
|
|
||||||
control with that entity. For the purposes of this definition,
|
|
||||||
"control" means (i) the power, direct or indirect, to cause the
|
|
||||||
direction or management of such entity, whether by contract or
|
|
||||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
|
||||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
|
||||||
|
|
||||||
"You" (or "Your") shall mean an individual or Legal Entity
|
|
||||||
exercising permissions granted by this License.
|
|
||||||
|
|
||||||
"Source" form shall mean the preferred form for making modifications,
|
|
||||||
including but not limited to software source code, documentation
|
|
||||||
source, and configuration files.
|
|
||||||
|
|
||||||
"Object" form shall mean any form resulting from mechanical
|
|
||||||
transformation or translation of a Source form, including but
|
|
||||||
not limited to compiled object code, generated documentation,
|
|
||||||
and conversions to other media types.
|
|
||||||
|
|
||||||
"Work" shall mean the work of authorship, whether in Source or
|
|
||||||
Object form, made available under the License, as indicated by a
|
|
||||||
copyright notice that is included in or attached to the work
|
|
||||||
(an example is provided in the Appendix below).
|
|
||||||
|
|
||||||
"Derivative Works" shall mean any work, whether in Source or Object
|
|
||||||
form, that is based on (or derived from) the Work and for which the
|
|
||||||
editorial revisions, annotations, elaborations, or other modifications
|
|
||||||
represent, as a whole, an original work of authorship. For the purposes
|
|
||||||
of this License, Derivative Works shall not include works that remain
|
|
||||||
separable from, or merely link (or bind by name) to the interfaces of,
|
|
||||||
the Work and Derivative Works thereof.
|
|
||||||
|
|
||||||
"Contribution" shall mean any work of authorship, including
|
|
||||||
the original version of the Work and any modifications or additions
|
|
||||||
to that Work or Derivative Works thereof, that is intentionally
|
|
||||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
|
||||||
or by an individual or Legal Entity authorized to submit on behalf of
|
|
||||||
the copyright owner. For the purposes of this definition, "submitted"
|
|
||||||
means any form of electronic, verbal, or written communication sent
|
|
||||||
to the Licensor or its representatives, including but not limited to
|
|
||||||
communication on electronic mailing lists, source code control systems,
|
|
||||||
and issue tracking systems that are managed by, or on behalf of, the
|
|
||||||
Licensor for the purpose of discussing and improving the Work, but
|
|
||||||
excluding communication that is conspicuously marked or otherwise
|
|
||||||
designated in writing by the copyright owner as "Not a Contribution."
|
|
||||||
|
|
||||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
|
||||||
on behalf of whom a Contribution has been received by Licensor and
|
|
||||||
subsequently incorporated within the Work.
|
|
||||||
|
|
||||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
|
||||||
this License, each Contributor hereby grants to You a perpetual,
|
|
||||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
|
||||||
copyright license to reproduce, prepare Derivative Works of,
|
|
||||||
publicly display, publicly perform, sublicense, and distribute the
|
|
||||||
Work and such Derivative Works in Source or Object form.
|
|
||||||
|
|
||||||
3. Grant of Patent License. Subject to the terms and conditions of
|
|
||||||
this License, each Contributor hereby grants to You a perpetual,
|
|
||||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
|
||||||
(except as stated in this section) patent license to make, have made,
|
|
||||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
|
||||||
where such license applies only to those patent claims licensable
|
|
||||||
by such Contributor that are necessarily infringed by their
|
|
||||||
Contribution(s) alone or by combination of their Contribution(s)
|
|
||||||
with the Work to which such Contribution(s) was submitted. If You
|
|
||||||
institute patent litigation against any entity (including a
|
|
||||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
|
||||||
or a Contribution incorporated within the Work constitutes direct
|
|
||||||
or contributory patent infringement, then any patent licenses
|
|
||||||
granted to You under this License for that Work shall terminate
|
|
||||||
as of the date such litigation is filed.
|
|
||||||
|
|
||||||
4. Redistribution. You may reproduce and distribute copies of the
|
|
||||||
Work or Derivative Works thereof in any medium, with or without
|
|
||||||
modifications, and in Source or Object form, provided that You
|
|
||||||
meet the following conditions:
|
|
||||||
|
|
||||||
(a) You must give any other recipients of the Work or
|
|
||||||
Derivative Works a copy of this License; and
|
|
||||||
|
|
||||||
(b) You must cause any modified files to carry prominent notices
|
|
||||||
stating that You changed the files; and
|
|
||||||
|
|
||||||
(c) You must retain, in the Source form of any Derivative Works
|
|
||||||
that You distribute, all copyright, patent, trademark, and
|
|
||||||
attribution notices from the Source form of the Work,
|
|
||||||
excluding those notices that do not pertain to any part of
|
|
||||||
the Derivative Works; and
|
|
||||||
|
|
||||||
(d) If the Work includes a "NOTICE" text file as part of its
|
|
||||||
distribution, then any Derivative Works that You distribute must
|
|
||||||
include a readable copy of the attribution notices contained
|
|
||||||
within such NOTICE file, excluding those notices that do not
|
|
||||||
pertain to any part of the Derivative Works, in at least one
|
|
||||||
of the following places: within a NOTICE text file distributed
|
|
||||||
as part of the Derivative Works; within the Source form or
|
|
||||||
documentation, if provided along with the Derivative Works; or,
|
|
||||||
within a display generated by the Derivative Works, if and
|
|
||||||
wherever such third-party notices normally appear. The contents
|
|
||||||
of the NOTICE file are for informational purposes only and
|
|
||||||
do not modify the License. You may add Your own attribution
|
|
||||||
notices within Derivative Works that You distribute, alongside
|
|
||||||
or as an addendum to the NOTICE text from the Work, provided
|
|
||||||
that such additional attribution notices cannot be construed
|
|
||||||
as modifying the License.
|
|
||||||
|
|
||||||
You may add Your own copyright statement to Your modifications and
|
|
||||||
may provide additional or different license terms and conditions
|
|
||||||
for use, reproduction, or distribution of Your modifications, or
|
|
||||||
for any such Derivative Works as a whole, provided Your use,
|
|
||||||
reproduction, and distribution of the Work otherwise complies with
|
|
||||||
the conditions stated in this License.
|
|
||||||
|
|
||||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
|
||||||
any Contribution intentionally submitted for inclusion in the Work
|
|
||||||
by You to the Licensor shall be under the terms and conditions of
|
|
||||||
this License, without any additional terms or conditions.
|
|
||||||
Notwithstanding the above, nothing herein shall supersede or modify
|
|
||||||
the terms of any separate license agreement you may have executed
|
|
||||||
with Licensor regarding such Contributions.
|
|
||||||
|
|
||||||
6. Trademarks. This License does not grant permission to use the trade
|
|
||||||
names, trademarks, service marks, or product names of the Licensor,
|
|
||||||
except as required for reasonable and customary use in describing the
|
|
||||||
origin of the Work and reproducing the content of the NOTICE file.
|
|
||||||
|
|
||||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
|
||||||
agreed to in writing, Licensor provides the Work (and each
|
|
||||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
||||||
implied, including, without limitation, any warranties or conditions
|
|
||||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
|
||||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
|
||||||
appropriateness of using or redistributing the Work and assume any
|
|
||||||
risks associated with Your exercise of permissions under this License.
|
|
||||||
|
|
||||||
8. Limitation of Liability. In no event and under no legal theory,
|
|
||||||
whether in tort (including negligence), contract, or otherwise,
|
|
||||||
unless required by applicable law (such as deliberate and grossly
|
|
||||||
negligent acts) or agreed to in writing, shall any Contributor be
|
|
||||||
liable to You for damages, including any direct, indirect, special,
|
|
||||||
incidental, or consequential damages of any character arising as a
|
|
||||||
result of this License or out of the use or inability to use the
|
|
||||||
Work (including but not limited to damages for loss of goodwill,
|
|
||||||
work stoppage, computer failure or malfunction, or any and all
|
|
||||||
other commercial damages or losses), even if such Contributor
|
|
||||||
has been advised of the possibility of such damages.
|
|
||||||
|
|
||||||
9. Accepting Warranty or Additional Liability. While redistributing
|
|
||||||
the Work or Derivative Works thereof, You may choose to offer,
|
|
||||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
|
||||||
or other liability obligations and/or rights consistent with this
|
|
||||||
License. However, in accepting such obligations, You may act only
|
|
||||||
on Your own behalf and on Your sole responsibility, not on behalf
|
|
||||||
of any other Contributor, and only if You agree to indemnify,
|
|
||||||
defend, and hold each Contributor harmless for any liability
|
|
||||||
incurred by, or claims asserted against, such Contributor by reason
|
|
||||||
of your accepting any such warranty or additional liability.
|
|
||||||
|
|
||||||
END OF TERMS AND CONDITIONS
|
|
||||||
|
|
||||||
APPENDIX: How to apply the Apache License to your work.
|
|
||||||
|
|
||||||
To apply the Apache License to your work, attach the following
|
|
||||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
|
||||||
replaced with your own identifying information. (Don't include
|
|
||||||
the brackets!) The text should be enclosed in the appropriate
|
|
||||||
comment syntax for the file format. We also recommend that a
|
|
||||||
file or class name and description of purpose be included on the
|
|
||||||
same "printed page" as the copyright notice for easier
|
|
||||||
identification within third-party archives.
|
|
||||||
|
|
||||||
Copyright [yyyy] [name of copyright owner]
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
32
vendor/cloud.google.com/go/storage/README.md
generated
vendored
32
vendor/cloud.google.com/go/storage/README.md
generated
vendored
|
|
@ -1,32 +0,0 @@
|
||||||
## Cloud Storage [](https://pkg.go.dev/cloud.google.com/go/storage)
|
|
||||||
|
|
||||||
- [About Cloud Storage](https://cloud.google.com/storage/)
|
|
||||||
- [API documentation](https://cloud.google.com/storage/docs)
|
|
||||||
- [Go client documentation](https://cloud.google.com/go/docs/reference/cloud.google.com/go/storage/latest)
|
|
||||||
- [Complete sample programs](https://github.com/GoogleCloudPlatform/golang-samples/tree/main/storage)
|
|
||||||
|
|
||||||
### Example Usage
|
|
||||||
|
|
||||||
First create a `storage.Client` to use throughout your application:
|
|
||||||
|
|
||||||
[snip]:# (storage-1)
|
|
||||||
```go
|
|
||||||
client, err := storage.NewClient(ctx)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
[snip]:# (storage-2)
|
|
||||||
```go
|
|
||||||
// Read the object1 from bucket.
|
|
||||||
rc, err := client.Bucket("bucket").Object("object1").NewReader(ctx)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
defer rc.Close()
|
|
||||||
body, err := ioutil.ReadAll(rc)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
```
|
|
||||||
356
vendor/cloud.google.com/go/storage/acl.go
generated
vendored
356
vendor/cloud.google.com/go/storage/acl.go
generated
vendored
|
|
@ -1,356 +0,0 @@
|
||||||
// Copyright 2014 Google LLC
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package storage
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"net/http"
|
|
||||||
"reflect"
|
|
||||||
|
|
||||||
"cloud.google.com/go/internal/trace"
|
|
||||||
storagepb "cloud.google.com/go/storage/internal/apiv2/stubs"
|
|
||||||
raw "google.golang.org/api/storage/v1"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ACLRole is the level of access to grant.
|
|
||||||
type ACLRole string
|
|
||||||
|
|
||||||
const (
|
|
||||||
RoleOwner ACLRole = "OWNER"
|
|
||||||
RoleReader ACLRole = "READER"
|
|
||||||
RoleWriter ACLRole = "WRITER"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ACLEntity refers to a user or group.
|
|
||||||
// They are sometimes referred to as grantees.
|
|
||||||
//
|
|
||||||
// It could be in the form of:
|
|
||||||
// "user-<userId>", "user-<email>", "group-<groupId>", "group-<email>",
|
|
||||||
// "domain-<domain>" and "project-team-<projectId>".
|
|
||||||
//
|
|
||||||
// Or one of the predefined constants: AllUsers, AllAuthenticatedUsers.
|
|
||||||
type ACLEntity string
|
|
||||||
|
|
||||||
const (
|
|
||||||
AllUsers ACLEntity = "allUsers"
|
|
||||||
AllAuthenticatedUsers ACLEntity = "allAuthenticatedUsers"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ACLRule represents a grant for a role to an entity (user, group or team) for a
|
|
||||||
// Google Cloud Storage object or bucket.
|
|
||||||
type ACLRule struct {
|
|
||||||
Entity ACLEntity
|
|
||||||
EntityID string
|
|
||||||
Role ACLRole
|
|
||||||
Domain string
|
|
||||||
Email string
|
|
||||||
ProjectTeam *ProjectTeam
|
|
||||||
}
|
|
||||||
|
|
||||||
// ProjectTeam is the project team associated with the entity, if any.
|
|
||||||
type ProjectTeam struct {
|
|
||||||
ProjectNumber string
|
|
||||||
Team string
|
|
||||||
}
|
|
||||||
|
|
||||||
// ACLHandle provides operations on an access control list for a Google Cloud Storage bucket or object.
|
|
||||||
// ACLHandle on an object operates on the latest generation of that object by default.
|
|
||||||
// Selecting a specific generation of an object is not currently supported by the client.
|
|
||||||
type ACLHandle struct {
|
|
||||||
c *Client
|
|
||||||
bucket string
|
|
||||||
object string
|
|
||||||
isDefault bool
|
|
||||||
userProject string // for requester-pays buckets
|
|
||||||
retry *retryConfig
|
|
||||||
}
|
|
||||||
|
|
||||||
// Delete permanently deletes the ACL entry for the given entity.
|
|
||||||
func (a *ACLHandle) Delete(ctx context.Context, entity ACLEntity) (err error) {
|
|
||||||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.ACL.Delete")
|
|
||||||
defer func() { trace.EndSpan(ctx, err) }()
|
|
||||||
|
|
||||||
if a.object != "" {
|
|
||||||
return a.objectDelete(ctx, entity)
|
|
||||||
}
|
|
||||||
if a.isDefault {
|
|
||||||
return a.bucketDefaultDelete(ctx, entity)
|
|
||||||
}
|
|
||||||
return a.bucketDelete(ctx, entity)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set sets the role for the given entity.
|
|
||||||
func (a *ACLHandle) Set(ctx context.Context, entity ACLEntity, role ACLRole) (err error) {
|
|
||||||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.ACL.Set")
|
|
||||||
defer func() { trace.EndSpan(ctx, err) }()
|
|
||||||
|
|
||||||
if a.object != "" {
|
|
||||||
return a.objectSet(ctx, entity, role, false)
|
|
||||||
}
|
|
||||||
if a.isDefault {
|
|
||||||
return a.objectSet(ctx, entity, role, true)
|
|
||||||
}
|
|
||||||
return a.bucketSet(ctx, entity, role)
|
|
||||||
}
|
|
||||||
|
|
||||||
// List retrieves ACL entries.
|
|
||||||
func (a *ACLHandle) List(ctx context.Context) (rules []ACLRule, err error) {
|
|
||||||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.ACL.List")
|
|
||||||
defer func() { trace.EndSpan(ctx, err) }()
|
|
||||||
|
|
||||||
if a.object != "" {
|
|
||||||
return a.objectList(ctx)
|
|
||||||
}
|
|
||||||
if a.isDefault {
|
|
||||||
return a.bucketDefaultList(ctx)
|
|
||||||
}
|
|
||||||
return a.bucketList(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *ACLHandle) bucketDefaultList(ctx context.Context) ([]ACLRule, error) {
|
|
||||||
opts := makeStorageOpts(true, a.retry, a.userProject)
|
|
||||||
return a.c.tc.ListDefaultObjectACLs(ctx, a.bucket, opts...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *ACLHandle) bucketDefaultDelete(ctx context.Context, entity ACLEntity) error {
|
|
||||||
opts := makeStorageOpts(false, a.retry, a.userProject)
|
|
||||||
return a.c.tc.DeleteDefaultObjectACL(ctx, a.bucket, entity, opts...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *ACLHandle) bucketList(ctx context.Context) ([]ACLRule, error) {
|
|
||||||
opts := makeStorageOpts(true, a.retry, a.userProject)
|
|
||||||
return a.c.tc.ListBucketACLs(ctx, a.bucket, opts...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *ACLHandle) bucketSet(ctx context.Context, entity ACLEntity, role ACLRole) error {
|
|
||||||
opts := makeStorageOpts(false, a.retry, a.userProject)
|
|
||||||
return a.c.tc.UpdateBucketACL(ctx, a.bucket, entity, role, opts...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *ACLHandle) bucketDelete(ctx context.Context, entity ACLEntity) error {
|
|
||||||
opts := makeStorageOpts(false, a.retry, a.userProject)
|
|
||||||
return a.c.tc.DeleteBucketACL(ctx, a.bucket, entity, opts...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *ACLHandle) objectList(ctx context.Context) ([]ACLRule, error) {
|
|
||||||
opts := makeStorageOpts(true, a.retry, a.userProject)
|
|
||||||
return a.c.tc.ListObjectACLs(ctx, a.bucket, a.object, opts...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *ACLHandle) objectSet(ctx context.Context, entity ACLEntity, role ACLRole, isBucketDefault bool) error {
|
|
||||||
opts := makeStorageOpts(false, a.retry, a.userProject)
|
|
||||||
if isBucketDefault {
|
|
||||||
return a.c.tc.UpdateDefaultObjectACL(ctx, a.bucket, entity, role, opts...)
|
|
||||||
}
|
|
||||||
return a.c.tc.UpdateObjectACL(ctx, a.bucket, a.object, entity, role, opts...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *ACLHandle) objectDelete(ctx context.Context, entity ACLEntity) error {
|
|
||||||
opts := makeStorageOpts(false, a.retry, a.userProject)
|
|
||||||
return a.c.tc.DeleteObjectACL(ctx, a.bucket, a.object, entity, opts...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *ACLHandle) configureCall(ctx context.Context, call interface{ Header() http.Header }) {
|
|
||||||
vc := reflect.ValueOf(call)
|
|
||||||
vc.MethodByName("Context").Call([]reflect.Value{reflect.ValueOf(ctx)})
|
|
||||||
if a.userProject != "" {
|
|
||||||
vc.MethodByName("UserProject").Call([]reflect.Value{reflect.ValueOf(a.userProject)})
|
|
||||||
}
|
|
||||||
setClientHeader(call.Header())
|
|
||||||
}
|
|
||||||
|
|
||||||
func toObjectACLRules(items []*raw.ObjectAccessControl) []ACLRule {
|
|
||||||
var rs []ACLRule
|
|
||||||
for _, item := range items {
|
|
||||||
rs = append(rs, toObjectACLRule(item))
|
|
||||||
}
|
|
||||||
return rs
|
|
||||||
}
|
|
||||||
|
|
||||||
func toObjectACLRulesFromProto(items []*storagepb.ObjectAccessControl) []ACLRule {
|
|
||||||
var rs []ACLRule
|
|
||||||
for _, item := range items {
|
|
||||||
rs = append(rs, toObjectACLRuleFromProto(item))
|
|
||||||
}
|
|
||||||
return rs
|
|
||||||
}
|
|
||||||
|
|
||||||
func toBucketACLRules(items []*raw.BucketAccessControl) []ACLRule {
|
|
||||||
var rs []ACLRule
|
|
||||||
for _, item := range items {
|
|
||||||
rs = append(rs, toBucketACLRule(item))
|
|
||||||
}
|
|
||||||
return rs
|
|
||||||
}
|
|
||||||
|
|
||||||
func toBucketACLRulesFromProto(items []*storagepb.BucketAccessControl) []ACLRule {
|
|
||||||
var rs []ACLRule
|
|
||||||
for _, item := range items {
|
|
||||||
rs = append(rs, toBucketACLRuleFromProto(item))
|
|
||||||
}
|
|
||||||
return rs
|
|
||||||
}
|
|
||||||
|
|
||||||
func toObjectACLRule(a *raw.ObjectAccessControl) ACLRule {
|
|
||||||
return ACLRule{
|
|
||||||
Entity: ACLEntity(a.Entity),
|
|
||||||
EntityID: a.EntityId,
|
|
||||||
Role: ACLRole(a.Role),
|
|
||||||
Domain: a.Domain,
|
|
||||||
Email: a.Email,
|
|
||||||
ProjectTeam: toObjectProjectTeam(a.ProjectTeam),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func toObjectACLRuleFromProto(a *storagepb.ObjectAccessControl) ACLRule {
|
|
||||||
return ACLRule{
|
|
||||||
Entity: ACLEntity(a.GetEntity()),
|
|
||||||
EntityID: a.GetEntityId(),
|
|
||||||
Role: ACLRole(a.GetRole()),
|
|
||||||
Domain: a.GetDomain(),
|
|
||||||
Email: a.GetEmail(),
|
|
||||||
ProjectTeam: toProjectTeamFromProto(a.GetProjectTeam()),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func toBucketACLRule(a *raw.BucketAccessControl) ACLRule {
|
|
||||||
return ACLRule{
|
|
||||||
Entity: ACLEntity(a.Entity),
|
|
||||||
EntityID: a.EntityId,
|
|
||||||
Role: ACLRole(a.Role),
|
|
||||||
Domain: a.Domain,
|
|
||||||
Email: a.Email,
|
|
||||||
ProjectTeam: toBucketProjectTeam(a.ProjectTeam),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func toBucketACLRuleFromProto(a *storagepb.BucketAccessControl) ACLRule {
|
|
||||||
return ACLRule{
|
|
||||||
Entity: ACLEntity(a.GetEntity()),
|
|
||||||
EntityID: a.GetEntityId(),
|
|
||||||
Role: ACLRole(a.GetRole()),
|
|
||||||
Domain: a.GetDomain(),
|
|
||||||
Email: a.GetEmail(),
|
|
||||||
ProjectTeam: toProjectTeamFromProto(a.GetProjectTeam()),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func toRawObjectACL(rules []ACLRule) []*raw.ObjectAccessControl {
|
|
||||||
if len(rules) == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
r := make([]*raw.ObjectAccessControl, 0, len(rules))
|
|
||||||
for _, rule := range rules {
|
|
||||||
r = append(r, rule.toRawObjectAccessControl("")) // bucket name unnecessary
|
|
||||||
}
|
|
||||||
return r
|
|
||||||
}
|
|
||||||
|
|
||||||
func toProtoObjectACL(rules []ACLRule) []*storagepb.ObjectAccessControl {
|
|
||||||
if len(rules) == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
r := make([]*storagepb.ObjectAccessControl, 0, len(rules))
|
|
||||||
for _, rule := range rules {
|
|
||||||
r = append(r, rule.toProtoObjectAccessControl("")) // bucket name unnecessary
|
|
||||||
}
|
|
||||||
return r
|
|
||||||
}
|
|
||||||
|
|
||||||
func toRawBucketACL(rules []ACLRule) []*raw.BucketAccessControl {
|
|
||||||
if len(rules) == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
r := make([]*raw.BucketAccessControl, 0, len(rules))
|
|
||||||
for _, rule := range rules {
|
|
||||||
r = append(r, rule.toRawBucketAccessControl("")) // bucket name unnecessary
|
|
||||||
}
|
|
||||||
return r
|
|
||||||
}
|
|
||||||
|
|
||||||
func toProtoBucketACL(rules []ACLRule) []*storagepb.BucketAccessControl {
|
|
||||||
if len(rules) == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
r := make([]*storagepb.BucketAccessControl, 0, len(rules))
|
|
||||||
for _, rule := range rules {
|
|
||||||
r = append(r, rule.toProtoBucketAccessControl())
|
|
||||||
}
|
|
||||||
return r
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r ACLRule) toRawBucketAccessControl(bucket string) *raw.BucketAccessControl {
|
|
||||||
return &raw.BucketAccessControl{
|
|
||||||
Bucket: bucket,
|
|
||||||
Entity: string(r.Entity),
|
|
||||||
Role: string(r.Role),
|
|
||||||
// The other fields are not settable.
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r ACLRule) toRawObjectAccessControl(bucket string) *raw.ObjectAccessControl {
|
|
||||||
return &raw.ObjectAccessControl{
|
|
||||||
Bucket: bucket,
|
|
||||||
Entity: string(r.Entity),
|
|
||||||
Role: string(r.Role),
|
|
||||||
// The other fields are not settable.
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r ACLRule) toProtoObjectAccessControl(bucket string) *storagepb.ObjectAccessControl {
|
|
||||||
return &storagepb.ObjectAccessControl{
|
|
||||||
Entity: string(r.Entity),
|
|
||||||
Role: string(r.Role),
|
|
||||||
// The other fields are not settable.
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r ACLRule) toProtoBucketAccessControl() *storagepb.BucketAccessControl {
|
|
||||||
return &storagepb.BucketAccessControl{
|
|
||||||
Entity: string(r.Entity),
|
|
||||||
Role: string(r.Role),
|
|
||||||
// The other fields are not settable.
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func toBucketProjectTeam(p *raw.BucketAccessControlProjectTeam) *ProjectTeam {
|
|
||||||
if p == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return &ProjectTeam{
|
|
||||||
ProjectNumber: p.ProjectNumber,
|
|
||||||
Team: p.Team,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func toProjectTeamFromProto(p *storagepb.ProjectTeam) *ProjectTeam {
|
|
||||||
if p == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return &ProjectTeam{
|
|
||||||
ProjectNumber: p.GetProjectNumber(),
|
|
||||||
Team: p.GetTeam(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func toObjectProjectTeam(p *raw.ObjectAccessControlProjectTeam) *ProjectTeam {
|
|
||||||
if p == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return &ProjectTeam{
|
|
||||||
ProjectNumber: p.ProjectNumber,
|
|
||||||
Team: p.Team,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
2070
vendor/cloud.google.com/go/storage/bucket.go
generated
vendored
2070
vendor/cloud.google.com/go/storage/bucket.go
generated
vendored
File diff suppressed because it is too large
Load diff
332
vendor/cloud.google.com/go/storage/client.go
generated
vendored
332
vendor/cloud.google.com/go/storage/client.go
generated
vendored
|
|
@ -1,332 +0,0 @@
|
||||||
// Copyright 2022 Google LLC
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package storage
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"io"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
gax "github.com/googleapis/gax-go/v2"
|
|
||||||
"google.golang.org/api/option"
|
|
||||||
iampb "google.golang.org/genproto/googleapis/iam/v1"
|
|
||||||
)
|
|
||||||
|
|
||||||
// TODO(noahdietz): Move existing factory methods to this file.
|
|
||||||
|
|
||||||
// storageClient is an internal-only interface designed to separate the
|
|
||||||
// transport-specific logic of making Storage API calls from the logic of the
|
|
||||||
// client library.
|
|
||||||
//
|
|
||||||
// Implementation requirements beyond implementing the interface include:
|
|
||||||
// * factory method(s) must accept a `userProject string` param
|
|
||||||
// * `settings` must be retained per instance
|
|
||||||
// * `storageOption`s must be resolved in the order they are received
|
|
||||||
// * all API errors must be wrapped in the gax-go APIError type
|
|
||||||
// * any unimplemented interface methods must return a StorageUnimplementedErr
|
|
||||||
//
|
|
||||||
// TODO(noahdietz): This interface is currently not used in the production code
|
|
||||||
// paths
|
|
||||||
type storageClient interface {
|
|
||||||
|
|
||||||
// Top-level methods.
|
|
||||||
|
|
||||||
GetServiceAccount(ctx context.Context, project string, opts ...storageOption) (string, error)
|
|
||||||
CreateBucket(ctx context.Context, project, bucket string, attrs *BucketAttrs, opts ...storageOption) (*BucketAttrs, error)
|
|
||||||
ListBuckets(ctx context.Context, project string, opts ...storageOption) *BucketIterator
|
|
||||||
Close() error
|
|
||||||
|
|
||||||
// Bucket methods.
|
|
||||||
|
|
||||||
DeleteBucket(ctx context.Context, bucket string, conds *BucketConditions, opts ...storageOption) error
|
|
||||||
GetBucket(ctx context.Context, bucket string, conds *BucketConditions, opts ...storageOption) (*BucketAttrs, error)
|
|
||||||
UpdateBucket(ctx context.Context, bucket string, uattrs *BucketAttrsToUpdate, conds *BucketConditions, opts ...storageOption) (*BucketAttrs, error)
|
|
||||||
LockBucketRetentionPolicy(ctx context.Context, bucket string, conds *BucketConditions, opts ...storageOption) error
|
|
||||||
ListObjects(ctx context.Context, bucket string, q *Query, opts ...storageOption) *ObjectIterator
|
|
||||||
|
|
||||||
// Object metadata methods.
|
|
||||||
|
|
||||||
DeleteObject(ctx context.Context, bucket, object string, gen int64, conds *Conditions, opts ...storageOption) error
|
|
||||||
GetObject(ctx context.Context, bucket, object string, gen int64, encryptionKey []byte, conds *Conditions, opts ...storageOption) (*ObjectAttrs, error)
|
|
||||||
UpdateObject(ctx context.Context, bucket, object string, uattrs *ObjectAttrsToUpdate, gen int64, encryptionKey []byte, conds *Conditions, opts ...storageOption) (*ObjectAttrs, error)
|
|
||||||
|
|
||||||
// Default Object ACL methods.
|
|
||||||
|
|
||||||
DeleteDefaultObjectACL(ctx context.Context, bucket string, entity ACLEntity, opts ...storageOption) error
|
|
||||||
ListDefaultObjectACLs(ctx context.Context, bucket string, opts ...storageOption) ([]ACLRule, error)
|
|
||||||
UpdateDefaultObjectACL(ctx context.Context, bucket string, entity ACLEntity, role ACLRole, opts ...storageOption) error
|
|
||||||
|
|
||||||
// Bucket ACL methods.
|
|
||||||
|
|
||||||
DeleteBucketACL(ctx context.Context, bucket string, entity ACLEntity, opts ...storageOption) error
|
|
||||||
ListBucketACLs(ctx context.Context, bucket string, opts ...storageOption) ([]ACLRule, error)
|
|
||||||
UpdateBucketACL(ctx context.Context, bucket string, entity ACLEntity, role ACLRole, opts ...storageOption) error
|
|
||||||
|
|
||||||
// Object ACL methods.
|
|
||||||
|
|
||||||
DeleteObjectACL(ctx context.Context, bucket, object string, entity ACLEntity, opts ...storageOption) error
|
|
||||||
ListObjectACLs(ctx context.Context, bucket, object string, opts ...storageOption) ([]ACLRule, error)
|
|
||||||
UpdateObjectACL(ctx context.Context, bucket, object string, entity ACLEntity, role ACLRole, opts ...storageOption) error
|
|
||||||
|
|
||||||
// Media operations.
|
|
||||||
|
|
||||||
ComposeObject(ctx context.Context, req *composeObjectRequest, opts ...storageOption) (*ObjectAttrs, error)
|
|
||||||
RewriteObject(ctx context.Context, req *rewriteObjectRequest, opts ...storageOption) (*rewriteObjectResponse, error)
|
|
||||||
|
|
||||||
NewRangeReader(ctx context.Context, params *newRangeReaderParams, opts ...storageOption) (*Reader, error)
|
|
||||||
OpenWriter(params *openWriterParams, opts ...storageOption) (*io.PipeWriter, error)
|
|
||||||
|
|
||||||
// IAM methods.
|
|
||||||
|
|
||||||
GetIamPolicy(ctx context.Context, resource string, version int32, opts ...storageOption) (*iampb.Policy, error)
|
|
||||||
SetIamPolicy(ctx context.Context, resource string, policy *iampb.Policy, opts ...storageOption) error
|
|
||||||
TestIamPermissions(ctx context.Context, resource string, permissions []string, opts ...storageOption) ([]string, error)
|
|
||||||
|
|
||||||
// HMAC Key methods.
|
|
||||||
|
|
||||||
GetHMACKey(ctx context.Context, project, accessID string, opts ...storageOption) (*HMACKey, error)
|
|
||||||
ListHMACKeys(ctx context.Context, project, serviceAccountEmail string, showDeletedKeys bool, opts ...storageOption) *HMACKeysIterator
|
|
||||||
UpdateHMACKey(ctx context.Context, project, serviceAccountEmail, accessID string, attrs *HMACKeyAttrsToUpdate, opts ...storageOption) (*HMACKey, error)
|
|
||||||
CreateHMACKey(ctx context.Context, project, serviceAccountEmail string, opts ...storageOption) (*HMACKey, error)
|
|
||||||
DeleteHMACKey(ctx context.Context, project, accessID string, opts ...storageOption) error
|
|
||||||
|
|
||||||
// Notification methods.
|
|
||||||
ListNotifications(ctx context.Context, bucket string, opts ...storageOption) (map[string]*Notification, error)
|
|
||||||
CreateNotification(ctx context.Context, bucket string, n *Notification, opts ...storageOption) (*Notification, error)
|
|
||||||
DeleteNotification(ctx context.Context, bucket string, id string, opts ...storageOption) error
|
|
||||||
}
|
|
||||||
|
|
||||||
// settings contains transport-agnostic configuration for API calls made via
|
|
||||||
// the storageClient inteface. All implementations must utilize settings
|
|
||||||
// and respect those that are applicable.
|
|
||||||
type settings struct {
|
|
||||||
// retry is the complete retry configuration to use when evaluating if an
|
|
||||||
// API call should be retried.
|
|
||||||
retry *retryConfig
|
|
||||||
|
|
||||||
// gax is a set of gax.CallOption to be conveyed to gax.Invoke.
|
|
||||||
// Note: Not all storageClient interfaces will must use gax.Invoke.
|
|
||||||
gax []gax.CallOption
|
|
||||||
|
|
||||||
// idempotent indicates if the call is idempotent or not when considering
|
|
||||||
// if the call should be retired or not.
|
|
||||||
idempotent bool
|
|
||||||
|
|
||||||
// clientOption is a set of option.ClientOption to be used during client
|
|
||||||
// transport initialization. See https://pkg.go.dev/google.golang.org/api/option
|
|
||||||
// for a list of supported options.
|
|
||||||
clientOption []option.ClientOption
|
|
||||||
|
|
||||||
// userProject is the user project that should be billed for the request.
|
|
||||||
userProject string
|
|
||||||
}
|
|
||||||
|
|
||||||
func initSettings(opts ...storageOption) *settings {
|
|
||||||
s := &settings{}
|
|
||||||
resolveOptions(s, opts...)
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
func resolveOptions(s *settings, opts ...storageOption) {
|
|
||||||
for _, o := range opts {
|
|
||||||
o.Apply(s)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// callSettings is a helper for resolving storage options against the settings
|
|
||||||
// in the context of an individual call. This is to ensure that client-level
|
|
||||||
// default settings are not mutated by two different calls getting options.
|
|
||||||
//
|
|
||||||
// Example: s := callSettings(c.settings, opts...)
|
|
||||||
func callSettings(defaults *settings, opts ...storageOption) *settings {
|
|
||||||
if defaults == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
// This does not make a deep copy of the pointer/slice fields, but all
|
|
||||||
// options replace the settings fields rather than modify their values in
|
|
||||||
// place.
|
|
||||||
cs := *defaults
|
|
||||||
resolveOptions(&cs, opts...)
|
|
||||||
return &cs
|
|
||||||
}
|
|
||||||
|
|
||||||
// makeStorageOpts is a helper for generating a set of storageOption based on
|
|
||||||
// idempotency, retryConfig, and userProject. All top-level client operations
|
|
||||||
// will generally have to pass these options through the interface.
|
|
||||||
func makeStorageOpts(isIdempotent bool, retry *retryConfig, userProject string) []storageOption {
|
|
||||||
opts := []storageOption{idempotent(isIdempotent)}
|
|
||||||
if retry != nil {
|
|
||||||
opts = append(opts, withRetryConfig(retry))
|
|
||||||
}
|
|
||||||
if userProject != "" {
|
|
||||||
opts = append(opts, withUserProject(userProject))
|
|
||||||
}
|
|
||||||
return opts
|
|
||||||
}
|
|
||||||
|
|
||||||
// storageOption is the transport-agnostic call option for the storageClient
|
|
||||||
// interface.
|
|
||||||
type storageOption interface {
|
|
||||||
Apply(s *settings)
|
|
||||||
}
|
|
||||||
|
|
||||||
func withGAXOptions(opts ...gax.CallOption) storageOption {
|
|
||||||
return &gaxOption{opts}
|
|
||||||
}
|
|
||||||
|
|
||||||
type gaxOption struct {
|
|
||||||
opts []gax.CallOption
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o *gaxOption) Apply(s *settings) { s.gax = o.opts }
|
|
||||||
|
|
||||||
func withRetryConfig(rc *retryConfig) storageOption {
|
|
||||||
return &retryOption{rc}
|
|
||||||
}
|
|
||||||
|
|
||||||
type retryOption struct {
|
|
||||||
rc *retryConfig
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o *retryOption) Apply(s *settings) { s.retry = o.rc }
|
|
||||||
|
|
||||||
func idempotent(i bool) storageOption {
|
|
||||||
return &idempotentOption{i}
|
|
||||||
}
|
|
||||||
|
|
||||||
type idempotentOption struct {
|
|
||||||
idempotency bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o *idempotentOption) Apply(s *settings) { s.idempotent = o.idempotency }
|
|
||||||
|
|
||||||
func withClientOptions(opts ...option.ClientOption) storageOption {
|
|
||||||
return &clientOption{opts: opts}
|
|
||||||
}
|
|
||||||
|
|
||||||
type clientOption struct {
|
|
||||||
opts []option.ClientOption
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o *clientOption) Apply(s *settings) { s.clientOption = o.opts }
|
|
||||||
|
|
||||||
func withUserProject(project string) storageOption {
|
|
||||||
return &userProjectOption{project}
|
|
||||||
}
|
|
||||||
|
|
||||||
type userProjectOption struct {
|
|
||||||
project string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o *userProjectOption) Apply(s *settings) { s.userProject = o.project }
|
|
||||||
|
|
||||||
type openWriterParams struct {
|
|
||||||
// Writer configuration
|
|
||||||
|
|
||||||
// ctx is the context used by the writer routine to make all network calls
|
|
||||||
// and to manage the writer routine - see `Writer.ctx`.
|
|
||||||
// Required.
|
|
||||||
ctx context.Context
|
|
||||||
// chunkSize - see `Writer.ChunkSize`.
|
|
||||||
// Optional.
|
|
||||||
chunkSize int
|
|
||||||
// chunkRetryDeadline - see `Writer.ChunkRetryDeadline`.
|
|
||||||
// Optional.
|
|
||||||
chunkRetryDeadline time.Duration
|
|
||||||
|
|
||||||
// Object/request properties
|
|
||||||
|
|
||||||
// bucket - see `Writer.o.bucket`.
|
|
||||||
// Required.
|
|
||||||
bucket string
|
|
||||||
// attrs - see `Writer.ObjectAttrs`.
|
|
||||||
// Required.
|
|
||||||
attrs *ObjectAttrs
|
|
||||||
// conds - see `Writer.o.conds`.
|
|
||||||
// Optional.
|
|
||||||
conds *Conditions
|
|
||||||
// encryptionKey - see `Writer.o.encryptionKey`
|
|
||||||
// Optional.
|
|
||||||
encryptionKey []byte
|
|
||||||
// sendCRC32C - see `Writer.SendCRC32C`.
|
|
||||||
// Optional.
|
|
||||||
sendCRC32C bool
|
|
||||||
|
|
||||||
// Writer callbacks
|
|
||||||
|
|
||||||
// donec - see `Writer.donec`.
|
|
||||||
// Required.
|
|
||||||
donec chan struct{}
|
|
||||||
// setError callback for reporting errors - see `Writer.error`.
|
|
||||||
// Required.
|
|
||||||
setError func(error)
|
|
||||||
// progress callback for reporting upload progress - see `Writer.progress`.
|
|
||||||
// Required.
|
|
||||||
progress func(int64)
|
|
||||||
// setObj callback for reporting the resulting object - see `Writer.obj`.
|
|
||||||
// Required.
|
|
||||||
setObj func(*ObjectAttrs)
|
|
||||||
}
|
|
||||||
|
|
||||||
type newRangeReaderParams struct {
|
|
||||||
bucket string
|
|
||||||
conds *Conditions
|
|
||||||
encryptionKey []byte
|
|
||||||
gen int64
|
|
||||||
length int64
|
|
||||||
object string
|
|
||||||
offset int64
|
|
||||||
readCompressed bool // Use accept-encoding: gzip. Only works for HTTP currently.
|
|
||||||
}
|
|
||||||
|
|
||||||
type composeObjectRequest struct {
|
|
||||||
dstBucket string
|
|
||||||
dstObject destinationObject
|
|
||||||
srcs []sourceObject
|
|
||||||
predefinedACL string
|
|
||||||
sendCRC32C bool
|
|
||||||
}
|
|
||||||
|
|
||||||
type sourceObject struct {
|
|
||||||
name string
|
|
||||||
bucket string
|
|
||||||
gen int64
|
|
||||||
conds *Conditions
|
|
||||||
encryptionKey []byte
|
|
||||||
}
|
|
||||||
|
|
||||||
type destinationObject struct {
|
|
||||||
name string
|
|
||||||
bucket string
|
|
||||||
conds *Conditions
|
|
||||||
attrs *ObjectAttrs // attrs to set on the destination object.
|
|
||||||
encryptionKey []byte
|
|
||||||
keyName string
|
|
||||||
}
|
|
||||||
|
|
||||||
type rewriteObjectRequest struct {
|
|
||||||
srcObject sourceObject
|
|
||||||
dstObject destinationObject
|
|
||||||
predefinedACL string
|
|
||||||
token string
|
|
||||||
}
|
|
||||||
|
|
||||||
type rewriteObjectResponse struct {
|
|
||||||
resource *ObjectAttrs
|
|
||||||
done bool
|
|
||||||
written int64
|
|
||||||
size int64
|
|
||||||
token string
|
|
||||||
}
|
|
||||||
222
vendor/cloud.google.com/go/storage/copy.go
generated
vendored
222
vendor/cloud.google.com/go/storage/copy.go
generated
vendored
|
|
@ -1,222 +0,0 @@
|
||||||
// Copyright 2016 Google LLC
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package storage
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"cloud.google.com/go/internal/trace"
|
|
||||||
)
|
|
||||||
|
|
||||||
// CopierFrom creates a Copier that can copy src to dst.
|
|
||||||
// You can immediately call Run on the returned Copier, or
|
|
||||||
// you can configure it first.
|
|
||||||
//
|
|
||||||
// For Requester Pays buckets, the user project of dst is billed, unless it is empty,
|
|
||||||
// in which case the user project of src is billed.
|
|
||||||
func (dst *ObjectHandle) CopierFrom(src *ObjectHandle) *Copier {
|
|
||||||
return &Copier{dst: dst, src: src}
|
|
||||||
}
|
|
||||||
|
|
||||||
// A Copier copies a source object to a destination.
|
|
||||||
type Copier struct {
|
|
||||||
// ObjectAttrs are optional attributes to set on the destination object.
|
|
||||||
// Any attributes must be initialized before any calls on the Copier. Nil
|
|
||||||
// or zero-valued attributes are ignored.
|
|
||||||
ObjectAttrs
|
|
||||||
|
|
||||||
// RewriteToken can be set before calling Run to resume a copy
|
|
||||||
// operation. After Run returns a non-nil error, RewriteToken will
|
|
||||||
// have been updated to contain the value needed to resume the copy.
|
|
||||||
RewriteToken string
|
|
||||||
|
|
||||||
// ProgressFunc can be used to monitor the progress of a multi-RPC copy
|
|
||||||
// operation. If ProgressFunc is not nil and copying requires multiple
|
|
||||||
// calls to the underlying service (see
|
|
||||||
// https://cloud.google.com/storage/docs/json_api/v1/objects/rewrite), then
|
|
||||||
// ProgressFunc will be invoked after each call with the number of bytes of
|
|
||||||
// content copied so far and the total size in bytes of the source object.
|
|
||||||
//
|
|
||||||
// ProgressFunc is intended to make upload progress available to the
|
|
||||||
// application. For example, the implementation of ProgressFunc may update
|
|
||||||
// a progress bar in the application's UI, or log the result of
|
|
||||||
// float64(copiedBytes)/float64(totalBytes).
|
|
||||||
//
|
|
||||||
// ProgressFunc should return quickly without blocking.
|
|
||||||
ProgressFunc func(copiedBytes, totalBytes uint64)
|
|
||||||
|
|
||||||
// The Cloud KMS key, in the form projects/P/locations/L/keyRings/R/cryptoKeys/K,
|
|
||||||
// that will be used to encrypt the object. Overrides the object's KMSKeyName, if
|
|
||||||
// any.
|
|
||||||
//
|
|
||||||
// Providing both a DestinationKMSKeyName and a customer-supplied encryption key
|
|
||||||
// (via ObjectHandle.Key) on the destination object will result in an error when
|
|
||||||
// Run is called.
|
|
||||||
DestinationKMSKeyName string
|
|
||||||
|
|
||||||
dst, src *ObjectHandle
|
|
||||||
}
|
|
||||||
|
|
||||||
// Run performs the copy.
|
|
||||||
func (c *Copier) Run(ctx context.Context) (attrs *ObjectAttrs, err error) {
|
|
||||||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Copier.Run")
|
|
||||||
defer func() { trace.EndSpan(ctx, err) }()
|
|
||||||
|
|
||||||
if err := c.src.validate(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if err := c.dst.validate(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if c.DestinationKMSKeyName != "" && c.dst.encryptionKey != nil {
|
|
||||||
return nil, errors.New("storage: cannot use DestinationKMSKeyName with a customer-supplied encryption key")
|
|
||||||
}
|
|
||||||
if c.dst.gen != defaultGen {
|
|
||||||
return nil, fmt.Errorf("storage: generation cannot be specified on copy destination, got %v", c.dst.gen)
|
|
||||||
}
|
|
||||||
// Convert destination attributes to raw form, omitting the bucket.
|
|
||||||
// If the bucket is included but name or content-type aren't, the service
|
|
||||||
// returns a 400 with "Required" as the only message. Omitting the bucket
|
|
||||||
// does not cause any problems.
|
|
||||||
req := &rewriteObjectRequest{
|
|
||||||
srcObject: sourceObject{
|
|
||||||
name: c.src.object,
|
|
||||||
bucket: c.src.bucket,
|
|
||||||
gen: c.src.gen,
|
|
||||||
conds: c.src.conds,
|
|
||||||
encryptionKey: c.src.encryptionKey,
|
|
||||||
},
|
|
||||||
dstObject: destinationObject{
|
|
||||||
name: c.dst.object,
|
|
||||||
bucket: c.dst.bucket,
|
|
||||||
conds: c.dst.conds,
|
|
||||||
attrs: &c.ObjectAttrs,
|
|
||||||
encryptionKey: c.dst.encryptionKey,
|
|
||||||
keyName: c.DestinationKMSKeyName,
|
|
||||||
},
|
|
||||||
predefinedACL: c.PredefinedACL,
|
|
||||||
token: c.RewriteToken,
|
|
||||||
}
|
|
||||||
|
|
||||||
isIdempotent := c.dst.conds != nil && (c.dst.conds.GenerationMatch != 0 || c.dst.conds.DoesNotExist)
|
|
||||||
var userProject string
|
|
||||||
if c.dst.userProject != "" {
|
|
||||||
userProject = c.dst.userProject
|
|
||||||
} else if c.src.userProject != "" {
|
|
||||||
userProject = c.src.userProject
|
|
||||||
}
|
|
||||||
opts := makeStorageOpts(isIdempotent, c.dst.retry, userProject)
|
|
||||||
|
|
||||||
for {
|
|
||||||
res, err := c.dst.c.tc.RewriteObject(ctx, req, opts...)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
c.RewriteToken = res.token
|
|
||||||
if c.ProgressFunc != nil {
|
|
||||||
c.ProgressFunc(uint64(res.written), uint64(res.size))
|
|
||||||
}
|
|
||||||
if res.done { // Finished successfully.
|
|
||||||
return res.resource, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ComposerFrom creates a Composer that can compose srcs into dst.
|
|
||||||
// You can immediately call Run on the returned Composer, or you can
|
|
||||||
// configure it first.
|
|
||||||
//
|
|
||||||
// The encryption key for the destination object will be used to decrypt all
|
|
||||||
// source objects and encrypt the destination object. It is an error
|
|
||||||
// to specify an encryption key for any of the source objects.
|
|
||||||
func (dst *ObjectHandle) ComposerFrom(srcs ...*ObjectHandle) *Composer {
|
|
||||||
return &Composer{dst: dst, srcs: srcs}
|
|
||||||
}
|
|
||||||
|
|
||||||
// A Composer composes source objects into a destination object.
|
|
||||||
//
|
|
||||||
// For Requester Pays buckets, the user project of dst is billed.
|
|
||||||
type Composer struct {
|
|
||||||
// ObjectAttrs are optional attributes to set on the destination object.
|
|
||||||
// Any attributes must be initialized before any calls on the Composer. Nil
|
|
||||||
// or zero-valued attributes are ignored.
|
|
||||||
ObjectAttrs
|
|
||||||
|
|
||||||
// SendCRC specifies whether to transmit a CRC32C field. It should be set
|
|
||||||
// to true in addition to setting the Composer's CRC32C field, because zero
|
|
||||||
// is a valid CRC and normally a zero would not be transmitted.
|
|
||||||
// If a CRC32C is sent, and the data in the destination object does not match
|
|
||||||
// the checksum, the compose will be rejected.
|
|
||||||
SendCRC32C bool
|
|
||||||
|
|
||||||
dst *ObjectHandle
|
|
||||||
srcs []*ObjectHandle
|
|
||||||
}
|
|
||||||
|
|
||||||
// Run performs the compose operation.
|
|
||||||
func (c *Composer) Run(ctx context.Context) (attrs *ObjectAttrs, err error) {
|
|
||||||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Composer.Run")
|
|
||||||
defer func() { trace.EndSpan(ctx, err) }()
|
|
||||||
|
|
||||||
if err := c.dst.validate(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if c.dst.gen != defaultGen {
|
|
||||||
return nil, fmt.Errorf("storage: generation cannot be specified on compose destination, got %v", c.dst.gen)
|
|
||||||
}
|
|
||||||
if len(c.srcs) == 0 {
|
|
||||||
return nil, errors.New("storage: at least one source object must be specified")
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, src := range c.srcs {
|
|
||||||
if err := src.validate(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if src.bucket != c.dst.bucket {
|
|
||||||
return nil, fmt.Errorf("storage: all source objects must be in bucket %q, found %q", c.dst.bucket, src.bucket)
|
|
||||||
}
|
|
||||||
if src.encryptionKey != nil {
|
|
||||||
return nil, fmt.Errorf("storage: compose source %s.%s must not have encryption key", src.bucket, src.object)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
req := &composeObjectRequest{
|
|
||||||
dstBucket: c.dst.bucket,
|
|
||||||
predefinedACL: c.PredefinedACL,
|
|
||||||
sendCRC32C: c.SendCRC32C,
|
|
||||||
}
|
|
||||||
req.dstObject = destinationObject{
|
|
||||||
name: c.dst.object,
|
|
||||||
bucket: c.dst.bucket,
|
|
||||||
conds: c.dst.conds,
|
|
||||||
attrs: &c.ObjectAttrs,
|
|
||||||
encryptionKey: c.dst.encryptionKey,
|
|
||||||
}
|
|
||||||
for _, src := range c.srcs {
|
|
||||||
s := sourceObject{
|
|
||||||
name: src.object,
|
|
||||||
bucket: src.bucket,
|
|
||||||
gen: src.gen,
|
|
||||||
conds: src.conds,
|
|
||||||
}
|
|
||||||
req.srcs = append(req.srcs, s)
|
|
||||||
}
|
|
||||||
|
|
||||||
isIdempotent := c.dst.conds != nil && (c.dst.conds.GenerationMatch != 0 || c.dst.conds.DoesNotExist)
|
|
||||||
opts := makeStorageOpts(isIdempotent, c.dst.retry, c.dst.userProject)
|
|
||||||
return c.dst.c.tc.ComposeObject(ctx, req, opts...)
|
|
||||||
}
|
|
||||||
328
vendor/cloud.google.com/go/storage/doc.go
generated
vendored
328
vendor/cloud.google.com/go/storage/doc.go
generated
vendored
|
|
@ -1,328 +0,0 @@
|
||||||
// Copyright 2016 Google LLC
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
/*
|
|
||||||
Package storage provides an easy way to work with Google Cloud Storage.
|
|
||||||
Google Cloud Storage stores data in named objects, which are grouped into buckets.
|
|
||||||
|
|
||||||
More information about Google Cloud Storage is available at
|
|
||||||
https://cloud.google.com/storage/docs.
|
|
||||||
|
|
||||||
See https://pkg.go.dev/cloud.google.com/go for authentication, timeouts,
|
|
||||||
connection pooling and similar aspects of this package.
|
|
||||||
|
|
||||||
# Creating a Client
|
|
||||||
|
|
||||||
To start working with this package, create a [Client]:
|
|
||||||
|
|
||||||
ctx := context.Background()
|
|
||||||
client, err := storage.NewClient(ctx)
|
|
||||||
if err != nil {
|
|
||||||
// TODO: Handle error.
|
|
||||||
}
|
|
||||||
|
|
||||||
The client will use your default application credentials. Clients should be
|
|
||||||
reused instead of created as needed. The methods of [Client] are safe for
|
|
||||||
concurrent use by multiple goroutines.
|
|
||||||
|
|
||||||
If you only wish to access public data, you can create
|
|
||||||
an unauthenticated client with
|
|
||||||
|
|
||||||
client, err := storage.NewClient(ctx, option.WithoutAuthentication())
|
|
||||||
|
|
||||||
To use an emulator with this library, you can set the STORAGE_EMULATOR_HOST
|
|
||||||
environment variable to the address at which your emulator is running. This will
|
|
||||||
send requests to that address instead of to Cloud Storage. You can then create
|
|
||||||
and use a client as usual:
|
|
||||||
|
|
||||||
// Set STORAGE_EMULATOR_HOST environment variable.
|
|
||||||
err := os.Setenv("STORAGE_EMULATOR_HOST", "localhost:9000")
|
|
||||||
if err != nil {
|
|
||||||
// TODO: Handle error.
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create client as usual.
|
|
||||||
client, err := storage.NewClient(ctx)
|
|
||||||
if err != nil {
|
|
||||||
// TODO: Handle error.
|
|
||||||
}
|
|
||||||
|
|
||||||
// This request is now directed to http://localhost:9000/storage/v1/b
|
|
||||||
// instead of https://storage.googleapis.com/storage/v1/b
|
|
||||||
if err := client.Bucket("my-bucket").Create(ctx, projectID, nil); err != nil {
|
|
||||||
// TODO: Handle error.
|
|
||||||
}
|
|
||||||
|
|
||||||
Please note that there is no official emulator for Cloud Storage.
|
|
||||||
|
|
||||||
# Buckets
|
|
||||||
|
|
||||||
A Google Cloud Storage bucket is a collection of objects. To work with a
|
|
||||||
bucket, make a bucket handle:
|
|
||||||
|
|
||||||
bkt := client.Bucket(bucketName)
|
|
||||||
|
|
||||||
A handle is a reference to a bucket. You can have a handle even if the
|
|
||||||
bucket doesn't exist yet. To create a bucket in Google Cloud Storage,
|
|
||||||
call [BucketHandle.Create]:
|
|
||||||
|
|
||||||
if err := bkt.Create(ctx, projectID, nil); err != nil {
|
|
||||||
// TODO: Handle error.
|
|
||||||
}
|
|
||||||
|
|
||||||
Note that although buckets are associated with projects, bucket names are
|
|
||||||
global across all projects.
|
|
||||||
|
|
||||||
Each bucket has associated metadata, represented in this package by
|
|
||||||
[BucketAttrs]. The third argument to [BucketHandle.Create] allows you to set
|
|
||||||
the initial [BucketAttrs] of a bucket. To retrieve a bucket's attributes, use
|
|
||||||
[BucketHandle.Attrs]:
|
|
||||||
|
|
||||||
attrs, err := bkt.Attrs(ctx)
|
|
||||||
if err != nil {
|
|
||||||
// TODO: Handle error.
|
|
||||||
}
|
|
||||||
fmt.Printf("bucket %s, created at %s, is located in %s with storage class %s\n",
|
|
||||||
attrs.Name, attrs.Created, attrs.Location, attrs.StorageClass)
|
|
||||||
|
|
||||||
# Objects
|
|
||||||
|
|
||||||
An object holds arbitrary data as a sequence of bytes, like a file. You
|
|
||||||
refer to objects using a handle, just as with buckets, but unlike buckets
|
|
||||||
you don't explicitly create an object. Instead, the first time you write
|
|
||||||
to an object it will be created. You can use the standard Go [io.Reader]
|
|
||||||
and [io.Writer] interfaces to read and write object data:
|
|
||||||
|
|
||||||
obj := bkt.Object("data")
|
|
||||||
// Write something to obj.
|
|
||||||
// w implements io.Writer.
|
|
||||||
w := obj.NewWriter(ctx)
|
|
||||||
// Write some text to obj. This will either create the object or overwrite whatever is there already.
|
|
||||||
if _, err := fmt.Fprintf(w, "This object contains text.\n"); err != nil {
|
|
||||||
// TODO: Handle error.
|
|
||||||
}
|
|
||||||
// Close, just like writing a file.
|
|
||||||
if err := w.Close(); err != nil {
|
|
||||||
// TODO: Handle error.
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read it back.
|
|
||||||
r, err := obj.NewReader(ctx)
|
|
||||||
if err != nil {
|
|
||||||
// TODO: Handle error.
|
|
||||||
}
|
|
||||||
defer r.Close()
|
|
||||||
if _, err := io.Copy(os.Stdout, r); err != nil {
|
|
||||||
// TODO: Handle error.
|
|
||||||
}
|
|
||||||
// Prints "This object contains text."
|
|
||||||
|
|
||||||
Objects also have attributes, which you can fetch with [ObjectHandle.Attrs]:
|
|
||||||
|
|
||||||
objAttrs, err := obj.Attrs(ctx)
|
|
||||||
if err != nil {
|
|
||||||
// TODO: Handle error.
|
|
||||||
}
|
|
||||||
fmt.Printf("object %s has size %d and can be read using %s\n",
|
|
||||||
objAttrs.Name, objAttrs.Size, objAttrs.MediaLink)
|
|
||||||
|
|
||||||
# Listing objects
|
|
||||||
|
|
||||||
Listing objects in a bucket is done with the [BucketHandle.Objects] method:
|
|
||||||
|
|
||||||
query := &storage.Query{Prefix: ""}
|
|
||||||
|
|
||||||
var names []string
|
|
||||||
it := bkt.Objects(ctx, query)
|
|
||||||
for {
|
|
||||||
attrs, err := it.Next()
|
|
||||||
if err == iterator.Done {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
names = append(names, attrs.Name)
|
|
||||||
}
|
|
||||||
|
|
||||||
Objects are listed lexicographically by name. To filter objects
|
|
||||||
lexicographically, [Query.StartOffset] and/or [Query.EndOffset] can be used:
|
|
||||||
|
|
||||||
query := &storage.Query{
|
|
||||||
Prefix: "",
|
|
||||||
StartOffset: "bar/", // Only list objects lexicographically >= "bar/"
|
|
||||||
EndOffset: "foo/", // Only list objects lexicographically < "foo/"
|
|
||||||
}
|
|
||||||
|
|
||||||
// ... as before
|
|
||||||
|
|
||||||
If only a subset of object attributes is needed when listing, specifying this
|
|
||||||
subset using [Query.SetAttrSelection] may speed up the listing process:
|
|
||||||
|
|
||||||
query := &storage.Query{Prefix: ""}
|
|
||||||
query.SetAttrSelection([]string{"Name"})
|
|
||||||
|
|
||||||
// ... as before
|
|
||||||
|
|
||||||
# ACLs
|
|
||||||
|
|
||||||
Both objects and buckets have ACLs (Access Control Lists). An ACL is a list of
|
|
||||||
ACLRules, each of which specifies the role of a user, group or project. ACLs
|
|
||||||
are suitable for fine-grained control, but you may prefer using IAM to control
|
|
||||||
access at the project level (see [Cloud Storage IAM docs].
|
|
||||||
|
|
||||||
To list the ACLs of a bucket or object, obtain an [ACLHandle] and call [ACLHandle.List]:
|
|
||||||
|
|
||||||
acls, err := obj.ACL().List(ctx)
|
|
||||||
if err != nil {
|
|
||||||
// TODO: Handle error.
|
|
||||||
}
|
|
||||||
for _, rule := range acls {
|
|
||||||
fmt.Printf("%s has role %s\n", rule.Entity, rule.Role)
|
|
||||||
}
|
|
||||||
|
|
||||||
You can also set and delete ACLs.
|
|
||||||
|
|
||||||
# Conditions
|
|
||||||
|
|
||||||
Every object has a generation and a metageneration. The generation changes
|
|
||||||
whenever the content changes, and the metageneration changes whenever the
|
|
||||||
metadata changes. [Conditions] let you check these values before an operation;
|
|
||||||
the operation only executes if the conditions match. You can use conditions to
|
|
||||||
prevent race conditions in read-modify-write operations.
|
|
||||||
|
|
||||||
For example, say you've read an object's metadata into objAttrs. Now
|
|
||||||
you want to write to that object, but only if its contents haven't changed
|
|
||||||
since you read it. Here is how to express that:
|
|
||||||
|
|
||||||
w = obj.If(storage.Conditions{GenerationMatch: objAttrs.Generation}).NewWriter(ctx)
|
|
||||||
// Proceed with writing as above.
|
|
||||||
|
|
||||||
# Signed URLs
|
|
||||||
|
|
||||||
You can obtain a URL that lets anyone read or write an object for a limited time.
|
|
||||||
Signing a URL requires credentials authorized to sign a URL. To use the same
|
|
||||||
authentication that was used when instantiating the Storage client, use
|
|
||||||
[BucketHandle.SignedURL].
|
|
||||||
|
|
||||||
url, err := client.Bucket(bucketName).SignedURL(objectName, opts)
|
|
||||||
if err != nil {
|
|
||||||
// TODO: Handle error.
|
|
||||||
}
|
|
||||||
fmt.Println(url)
|
|
||||||
|
|
||||||
You can also sign a URL without creating a client. See the documentation of
|
|
||||||
[SignedURL] for details.
|
|
||||||
|
|
||||||
url, err := storage.SignedURL(bucketName, "shared-object", opts)
|
|
||||||
if err != nil {
|
|
||||||
// TODO: Handle error.
|
|
||||||
}
|
|
||||||
fmt.Println(url)
|
|
||||||
|
|
||||||
# Post Policy V4 Signed Request
|
|
||||||
|
|
||||||
A type of signed request that allows uploads through HTML forms directly to Cloud Storage with
|
|
||||||
temporary permission. Conditions can be applied to restrict how the HTML form is used and exercised
|
|
||||||
by a user.
|
|
||||||
|
|
||||||
For more information, please see the [XML POST Object docs] as well
|
|
||||||
as the documentation of [BucketHandle.GenerateSignedPostPolicyV4].
|
|
||||||
|
|
||||||
pv4, err := client.Bucket(bucketName).GenerateSignedPostPolicyV4(objectName, opts)
|
|
||||||
if err != nil {
|
|
||||||
// TODO: Handle error.
|
|
||||||
}
|
|
||||||
fmt.Printf("URL: %s\nFields; %v\n", pv4.URL, pv4.Fields)
|
|
||||||
|
|
||||||
# Credential requirements for signing
|
|
||||||
|
|
||||||
If the GoogleAccessID and PrivateKey option fields are not provided, they will
|
|
||||||
be automatically detected by [BucketHandle.SignedURL] and
|
|
||||||
[BucketHandle.GenerateSignedPostPolicyV4] if any of the following are true:
|
|
||||||
- you are authenticated to the Storage Client with a service account's
|
|
||||||
downloaded private key, either directly in code or by setting the
|
|
||||||
GOOGLE_APPLICATION_CREDENTIALS environment variable (see [Other Environments]),
|
|
||||||
- your application is running on Google Compute Engine (GCE), or
|
|
||||||
- you are logged into [gcloud using application default credentials]
|
|
||||||
with [impersonation enabled].
|
|
||||||
|
|
||||||
Detecting GoogleAccessID may not be possible if you are authenticated using a
|
|
||||||
token source or using [option.WithHTTPClient]. In this case, you can provide a
|
|
||||||
service account email for GoogleAccessID and the client will attempt to sign
|
|
||||||
the URL or Post Policy using that service account.
|
|
||||||
|
|
||||||
To generate the signature, you must have:
|
|
||||||
- iam.serviceAccounts.signBlob permissions on the GoogleAccessID service
|
|
||||||
account, and
|
|
||||||
- the [IAM Service Account Credentials API] enabled (unless authenticating
|
|
||||||
with a downloaded private key).
|
|
||||||
|
|
||||||
# Errors
|
|
||||||
|
|
||||||
Errors returned by this client are often of the type [googleapi.Error].
|
|
||||||
These errors can be introspected for more information by using [errors.As]
|
|
||||||
with the richer [googleapi.Error] type. For example:
|
|
||||||
|
|
||||||
var e *googleapi.Error
|
|
||||||
if ok := errors.As(err, &e); ok {
|
|
||||||
if e.Code == 409 { ... }
|
|
||||||
}
|
|
||||||
|
|
||||||
# Retrying failed requests
|
|
||||||
|
|
||||||
Methods in this package may retry calls that fail with transient errors.
|
|
||||||
Retrying continues indefinitely unless the controlling context is canceled, the
|
|
||||||
client is closed, or a non-transient error is received. To stop retries from
|
|
||||||
continuing, use context timeouts or cancellation.
|
|
||||||
|
|
||||||
The retry strategy in this library follows best practices for Cloud Storage. By
|
|
||||||
default, operations are retried only if they are idempotent, and exponential
|
|
||||||
backoff with jitter is employed. In addition, errors are only retried if they
|
|
||||||
are defined as transient by the service. See the [Cloud Storage retry docs]
|
|
||||||
for more information.
|
|
||||||
|
|
||||||
Users can configure non-default retry behavior for a single library call (using
|
|
||||||
[BucketHandle.Retryer] and [ObjectHandle.Retryer]) or for all calls made by a
|
|
||||||
client (using [Client.SetRetry]). For example:
|
|
||||||
|
|
||||||
o := client.Bucket(bucket).Object(object).Retryer(
|
|
||||||
// Use WithBackoff to change the timing of the exponential backoff.
|
|
||||||
storage.WithBackoff(gax.Backoff{
|
|
||||||
Initial: 2 * time.Second,
|
|
||||||
}),
|
|
||||||
// Use WithPolicy to configure the idempotency policy. RetryAlways will
|
|
||||||
// retry the operation even if it is non-idempotent.
|
|
||||||
storage.WithPolicy(storage.RetryAlways),
|
|
||||||
)
|
|
||||||
|
|
||||||
// Use a context timeout to set an overall deadline on the call, including all
|
|
||||||
// potential retries.
|
|
||||||
ctx, cancel := context.WithTimeout(ctx, 5*time.Second)
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
// Delete an object using the specified strategy and timeout.
|
|
||||||
if err := o.Delete(ctx); err != nil {
|
|
||||||
// Handle err.
|
|
||||||
}
|
|
||||||
|
|
||||||
[Cloud Storage IAM docs]: https://cloud.google.com/storage/docs/access-control/iam
|
|
||||||
[XML POST Object docs]: https://cloud.google.com/storage/docs/xml-api/post-object
|
|
||||||
[Cloud Storage retry docs]: https://cloud.google.com/storage/docs/retry-strategy
|
|
||||||
[Other Environments]: https://cloud.google.com/storage/docs/authentication#libauth
|
|
||||||
[gcloud using application default credentials]: https://cloud.google.com/sdk/gcloud/reference/auth/application-default/login
|
|
||||||
[impersonation enabled]: https://cloud.google.com/sdk/gcloud/reference#--impersonate-service-account
|
|
||||||
[IAM Service Account Credentials API]: https://console.developers.google.com/apis/api/iamcredentials.googleapis.com/overview
|
|
||||||
*/
|
|
||||||
package storage // import "cloud.google.com/go/storage"
|
|
||||||
92
vendor/cloud.google.com/go/storage/emulator_test.sh
generated
vendored
92
vendor/cloud.google.com/go/storage/emulator_test.sh
generated
vendored
|
|
@ -1,92 +0,0 @@
|
||||||
#!/bin/bash
|
|
||||||
# Copyright 2021 Google LLC
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License..
|
|
||||||
|
|
||||||
# Fail on any error
|
|
||||||
set -eo pipefail
|
|
||||||
|
|
||||||
# Display commands being run
|
|
||||||
set -x
|
|
||||||
|
|
||||||
# Only run on Go 1.17+
|
|
||||||
min_minor_ver=17
|
|
||||||
|
|
||||||
v=`go version | { read _ _ v _; echo ${v#go}; }`
|
|
||||||
comps=(${v//./ })
|
|
||||||
minor_ver=${comps[1]}
|
|
||||||
|
|
||||||
if [ "$minor_ver" -lt "$min_minor_ver" ]; then
|
|
||||||
echo minor version $minor_ver, skipping
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
export STORAGE_EMULATOR_HOST="http://localhost:9000"
|
|
||||||
export STORAGE_EMULATOR_HOST_GRPC="localhost:8888"
|
|
||||||
|
|
||||||
DEFAULT_IMAGE_NAME='gcr.io/cloud-devrel-public-resources/storage-testbench'
|
|
||||||
DEFAULT_IMAGE_TAG='latest'
|
|
||||||
DOCKER_IMAGE=${DEFAULT_IMAGE_NAME}:${DEFAULT_IMAGE_TAG}
|
|
||||||
CONTAINER_NAME=storage_testbench
|
|
||||||
|
|
||||||
# Note: --net=host makes the container bind directly to the Docker host’s network,
|
|
||||||
# with no network isolation. If we were to use port-mapping instead, reset connection errors
|
|
||||||
# would be captured differently and cause unexpected test behaviour.
|
|
||||||
# The host networking driver works only on Linux hosts.
|
|
||||||
# See more about using host networking: https://docs.docker.com/network/host/
|
|
||||||
DOCKER_NETWORK="--net=host"
|
|
||||||
# Note: We do not expect the RetryConformanceTest suite to pass on darwin due to
|
|
||||||
# differences in the network errors emitted by the system.
|
|
||||||
if [ `go env GOOS` == 'darwin' ]; then
|
|
||||||
DOCKER_NETWORK="-p 9000:9000 -p 8888:8888"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Get the docker image for the testbench
|
|
||||||
docker pull $DOCKER_IMAGE
|
|
||||||
|
|
||||||
# Start the testbench
|
|
||||||
|
|
||||||
docker run --name $CONTAINER_NAME --rm -d $DOCKER_NETWORK $DOCKER_IMAGE
|
|
||||||
echo "Running the Cloud Storage testbench: $STORAGE_EMULATOR_HOST"
|
|
||||||
sleep 1
|
|
||||||
|
|
||||||
# Stop the testbench & cleanup environment variables
|
|
||||||
function cleanup() {
|
|
||||||
echo "Cleanup testbench"
|
|
||||||
docker stop $CONTAINER_NAME
|
|
||||||
unset STORAGE_EMULATOR_HOST;
|
|
||||||
unset STORAGE_EMULATOR_HOST_GRPC;
|
|
||||||
}
|
|
||||||
trap cleanup EXIT
|
|
||||||
|
|
||||||
# Check that the server is running - retry several times to allow for start-up time
|
|
||||||
response=$(curl -w "%{http_code}\n" $STORAGE_EMULATOR_HOST --retry-connrefused --retry 5 -o /dev/null)
|
|
||||||
|
|
||||||
if [[ $response != 200 ]]
|
|
||||||
then
|
|
||||||
echo "Testbench server did not start correctly"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Start the gRPC server on port 8888.
|
|
||||||
echo "Starting the gRPC server on port 8888"
|
|
||||||
response=$(curl -w "%{http_code}\n" --retry 5 --retry-max-time 40 -o /dev/null "$STORAGE_EMULATOR_HOST/start_grpc?port=8888")
|
|
||||||
|
|
||||||
if [[ $response != 200 ]]
|
|
||||||
then
|
|
||||||
echo "Testbench gRPC server did not start correctly"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Run tests
|
|
||||||
go test -v -timeout 10m ./ -run="^Test(RetryConformance|.*Emulated)$" -short 2>&1 | tee -a sponge_log.log
|
|
||||||
1699
vendor/cloud.google.com/go/storage/grpc_client.go
generated
vendored
1699
vendor/cloud.google.com/go/storage/grpc_client.go
generated
vendored
File diff suppressed because it is too large
Load diff
392
vendor/cloud.google.com/go/storage/hmac.go
generated
vendored
392
vendor/cloud.google.com/go/storage/hmac.go
generated
vendored
|
|
@ -1,392 +0,0 @@
|
||||||
// Copyright 2019 Google LLC
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package storage
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
storagepb "cloud.google.com/go/storage/internal/apiv2/stubs"
|
|
||||||
"google.golang.org/api/iterator"
|
|
||||||
raw "google.golang.org/api/storage/v1"
|
|
||||||
)
|
|
||||||
|
|
||||||
// HMACState is the state of the HMAC key.
|
|
||||||
//
|
|
||||||
// This type is EXPERIMENTAL and subject to change or removal without notice.
|
|
||||||
type HMACState string
|
|
||||||
|
|
||||||
const (
|
|
||||||
// Active is the status for an active key that can be used to sign
|
|
||||||
// requests.
|
|
||||||
Active HMACState = "ACTIVE"
|
|
||||||
|
|
||||||
// Inactive is the status for an inactive key thus requests signed by
|
|
||||||
// this key will be denied.
|
|
||||||
Inactive HMACState = "INACTIVE"
|
|
||||||
|
|
||||||
// Deleted is the status for a key that is deleted.
|
|
||||||
// Once in this state the key cannot key cannot be recovered
|
|
||||||
// and does not count towards key limits. Deleted keys will be cleaned
|
|
||||||
// up later.
|
|
||||||
Deleted HMACState = "DELETED"
|
|
||||||
)
|
|
||||||
|
|
||||||
// HMACKey is the representation of a Google Cloud Storage HMAC key.
|
|
||||||
//
|
|
||||||
// HMAC keys are used to authenticate signed access to objects. To enable HMAC key
|
|
||||||
// authentication, please visit https://cloud.google.com/storage/docs/migrating.
|
|
||||||
//
|
|
||||||
// This type is EXPERIMENTAL and subject to change or removal without notice.
|
|
||||||
type HMACKey struct {
|
|
||||||
// The HMAC's secret key.
|
|
||||||
Secret string
|
|
||||||
|
|
||||||
// AccessID is the ID of the HMAC key.
|
|
||||||
AccessID string
|
|
||||||
|
|
||||||
// Etag is the HTTP/1.1 Entity tag.
|
|
||||||
Etag string
|
|
||||||
|
|
||||||
// ID is the ID of the HMAC key, including the ProjectID and AccessID.
|
|
||||||
ID string
|
|
||||||
|
|
||||||
// ProjectID is the ID of the project that owns the
|
|
||||||
// service account to which the key authenticates.
|
|
||||||
ProjectID string
|
|
||||||
|
|
||||||
// ServiceAccountEmail is the email address
|
|
||||||
// of the key's associated service account.
|
|
||||||
ServiceAccountEmail string
|
|
||||||
|
|
||||||
// CreatedTime is the creation time of the HMAC key.
|
|
||||||
CreatedTime time.Time
|
|
||||||
|
|
||||||
// UpdatedTime is the last modification time of the HMAC key metadata.
|
|
||||||
UpdatedTime time.Time
|
|
||||||
|
|
||||||
// State is the state of the HMAC key.
|
|
||||||
// It can be one of StateActive, StateInactive or StateDeleted.
|
|
||||||
State HMACState
|
|
||||||
}
|
|
||||||
|
|
||||||
// HMACKeyHandle helps provide access and management for HMAC keys.
|
|
||||||
//
|
|
||||||
// This type is EXPERIMENTAL and subject to change or removal without notice.
|
|
||||||
type HMACKeyHandle struct {
|
|
||||||
projectID string
|
|
||||||
accessID string
|
|
||||||
retry *retryConfig
|
|
||||||
tc storageClient
|
|
||||||
}
|
|
||||||
|
|
||||||
// HMACKeyHandle creates a handle that will be used for HMACKey operations.
|
|
||||||
//
|
|
||||||
// This method is EXPERIMENTAL and subject to change or removal without notice.
|
|
||||||
func (c *Client) HMACKeyHandle(projectID, accessID string) *HMACKeyHandle {
|
|
||||||
return &HMACKeyHandle{
|
|
||||||
projectID: projectID,
|
|
||||||
accessID: accessID,
|
|
||||||
retry: c.retry,
|
|
||||||
tc: c.tc,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get invokes an RPC to retrieve the HMAC key referenced by the
|
|
||||||
// HMACKeyHandle's accessID.
|
|
||||||
//
|
|
||||||
// Options such as UserProjectForHMACKeys can be used to set the
|
|
||||||
// userProject to be billed against for operations.
|
|
||||||
//
|
|
||||||
// This method is EXPERIMENTAL and subject to change or removal without notice.
|
|
||||||
func (hkh *HMACKeyHandle) Get(ctx context.Context, opts ...HMACKeyOption) (*HMACKey, error) {
|
|
||||||
desc := new(hmacKeyDesc)
|
|
||||||
for _, opt := range opts {
|
|
||||||
opt.withHMACKeyDesc(desc)
|
|
||||||
}
|
|
||||||
|
|
||||||
o := makeStorageOpts(true, hkh.retry, desc.userProjectID)
|
|
||||||
hk, err := hkh.tc.GetHMACKey(ctx, hkh.projectID, hkh.accessID, o...)
|
|
||||||
|
|
||||||
return hk, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Delete invokes an RPC to delete the key referenced by accessID, on Google Cloud Storage.
|
|
||||||
// Only inactive HMAC keys can be deleted.
|
|
||||||
// After deletion, a key cannot be used to authenticate requests.
|
|
||||||
//
|
|
||||||
// This method is EXPERIMENTAL and subject to change or removal without notice.
|
|
||||||
func (hkh *HMACKeyHandle) Delete(ctx context.Context, opts ...HMACKeyOption) error {
|
|
||||||
desc := new(hmacKeyDesc)
|
|
||||||
for _, opt := range opts {
|
|
||||||
opt.withHMACKeyDesc(desc)
|
|
||||||
}
|
|
||||||
|
|
||||||
o := makeStorageOpts(true, hkh.retry, desc.userProjectID)
|
|
||||||
return hkh.tc.DeleteHMACKey(ctx, hkh.projectID, hkh.accessID, o...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func toHMACKeyFromRaw(hk *raw.HmacKey, updatedTimeCanBeNil bool) (*HMACKey, error) {
|
|
||||||
hkmd := hk.Metadata
|
|
||||||
if hkmd == nil {
|
|
||||||
return nil, errors.New("field Metadata cannot be nil")
|
|
||||||
}
|
|
||||||
createdTime, err := time.Parse(time.RFC3339, hkmd.TimeCreated)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("field CreatedTime: %v", err)
|
|
||||||
}
|
|
||||||
updatedTime, err := time.Parse(time.RFC3339, hkmd.Updated)
|
|
||||||
if err != nil && !updatedTimeCanBeNil {
|
|
||||||
return nil, fmt.Errorf("field UpdatedTime: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
hmKey := &HMACKey{
|
|
||||||
AccessID: hkmd.AccessId,
|
|
||||||
Secret: hk.Secret,
|
|
||||||
Etag: hkmd.Etag,
|
|
||||||
ID: hkmd.Id,
|
|
||||||
State: HMACState(hkmd.State),
|
|
||||||
ProjectID: hkmd.ProjectId,
|
|
||||||
CreatedTime: createdTime,
|
|
||||||
UpdatedTime: updatedTime,
|
|
||||||
|
|
||||||
ServiceAccountEmail: hkmd.ServiceAccountEmail,
|
|
||||||
}
|
|
||||||
|
|
||||||
return hmKey, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func toHMACKeyFromProto(pbmd *storagepb.HmacKeyMetadata) *HMACKey {
|
|
||||||
if pbmd == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return &HMACKey{
|
|
||||||
AccessID: pbmd.GetAccessId(),
|
|
||||||
ID: pbmd.GetId(),
|
|
||||||
State: HMACState(pbmd.GetState()),
|
|
||||||
ProjectID: pbmd.GetProject(),
|
|
||||||
CreatedTime: convertProtoTime(pbmd.GetCreateTime()),
|
|
||||||
UpdatedTime: convertProtoTime(pbmd.GetUpdateTime()),
|
|
||||||
ServiceAccountEmail: pbmd.GetServiceAccountEmail(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// CreateHMACKey invokes an RPC for Google Cloud Storage to create a new HMACKey.
|
|
||||||
//
|
|
||||||
// This method is EXPERIMENTAL and subject to change or removal without notice.
|
|
||||||
func (c *Client) CreateHMACKey(ctx context.Context, projectID, serviceAccountEmail string, opts ...HMACKeyOption) (*HMACKey, error) {
|
|
||||||
if projectID == "" {
|
|
||||||
return nil, errors.New("storage: expecting a non-blank projectID")
|
|
||||||
}
|
|
||||||
if serviceAccountEmail == "" {
|
|
||||||
return nil, errors.New("storage: expecting a non-blank service account email")
|
|
||||||
}
|
|
||||||
|
|
||||||
desc := new(hmacKeyDesc)
|
|
||||||
for _, opt := range opts {
|
|
||||||
opt.withHMACKeyDesc(desc)
|
|
||||||
}
|
|
||||||
|
|
||||||
o := makeStorageOpts(false, c.retry, desc.userProjectID)
|
|
||||||
hk, err := c.tc.CreateHMACKey(ctx, projectID, serviceAccountEmail, o...)
|
|
||||||
return hk, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// HMACKeyAttrsToUpdate defines the attributes of an HMACKey that will be updated.
|
|
||||||
//
|
|
||||||
// This type is EXPERIMENTAL and subject to change or removal without notice.
|
|
||||||
type HMACKeyAttrsToUpdate struct {
|
|
||||||
// State is required and must be either StateActive or StateInactive.
|
|
||||||
State HMACState
|
|
||||||
|
|
||||||
// Etag is an optional field and it is the HTTP/1.1 Entity tag.
|
|
||||||
Etag string
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update mutates the HMACKey referred to by accessID.
|
|
||||||
//
|
|
||||||
// This method is EXPERIMENTAL and subject to change or removal without notice.
|
|
||||||
func (h *HMACKeyHandle) Update(ctx context.Context, au HMACKeyAttrsToUpdate, opts ...HMACKeyOption) (*HMACKey, error) {
|
|
||||||
if au.State != Active && au.State != Inactive {
|
|
||||||
return nil, fmt.Errorf("storage: invalid state %q for update, must be either %q or %q", au.State, Active, Inactive)
|
|
||||||
}
|
|
||||||
|
|
||||||
desc := new(hmacKeyDesc)
|
|
||||||
for _, opt := range opts {
|
|
||||||
opt.withHMACKeyDesc(desc)
|
|
||||||
}
|
|
||||||
|
|
||||||
isIdempotent := len(au.Etag) > 0
|
|
||||||
o := makeStorageOpts(isIdempotent, h.retry, desc.userProjectID)
|
|
||||||
hk, err := h.tc.UpdateHMACKey(ctx, h.projectID, desc.forServiceAccountEmail, h.accessID, &au, o...)
|
|
||||||
return hk, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// An HMACKeysIterator is an iterator over HMACKeys.
|
|
||||||
//
|
|
||||||
// Note: This iterator is not safe for concurrent operations without explicit synchronization.
|
|
||||||
//
|
|
||||||
// This type is EXPERIMENTAL and subject to change or removal without notice.
|
|
||||||
type HMACKeysIterator struct {
|
|
||||||
ctx context.Context
|
|
||||||
raw *raw.ProjectsHmacKeysService
|
|
||||||
projectID string
|
|
||||||
hmacKeys []*HMACKey
|
|
||||||
pageInfo *iterator.PageInfo
|
|
||||||
nextFunc func() error
|
|
||||||
index int
|
|
||||||
desc hmacKeyDesc
|
|
||||||
retry *retryConfig
|
|
||||||
}
|
|
||||||
|
|
||||||
// ListHMACKeys returns an iterator for listing HMACKeys.
|
|
||||||
//
|
|
||||||
// Note: This iterator is not safe for concurrent operations without explicit synchronization.
|
|
||||||
//
|
|
||||||
// This method is EXPERIMENTAL and subject to change or removal without notice.
|
|
||||||
func (c *Client) ListHMACKeys(ctx context.Context, projectID string, opts ...HMACKeyOption) *HMACKeysIterator {
|
|
||||||
desc := new(hmacKeyDesc)
|
|
||||||
for _, opt := range opts {
|
|
||||||
opt.withHMACKeyDesc(desc)
|
|
||||||
}
|
|
||||||
|
|
||||||
o := makeStorageOpts(true, c.retry, desc.userProjectID)
|
|
||||||
return c.tc.ListHMACKeys(ctx, projectID, desc.forServiceAccountEmail, desc.showDeletedKeys, o...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Next returns the next result. Its second return value is iterator.Done if
|
|
||||||
// there are no more results. Once Next returns iterator.Done, all subsequent
|
|
||||||
// calls will return iterator.Done.
|
|
||||||
//
|
|
||||||
// Note: This iterator is not safe for concurrent operations without explicit synchronization.
|
|
||||||
//
|
|
||||||
// This method is EXPERIMENTAL and subject to change or removal without notice.
|
|
||||||
func (it *HMACKeysIterator) Next() (*HMACKey, error) {
|
|
||||||
if err := it.nextFunc(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
key := it.hmacKeys[it.index]
|
|
||||||
it.index++
|
|
||||||
|
|
||||||
return key, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
|
|
||||||
//
|
|
||||||
// Note: This iterator is not safe for concurrent operations without explicit synchronization.
|
|
||||||
//
|
|
||||||
// This method is EXPERIMENTAL and subject to change or removal without notice.
|
|
||||||
func (it *HMACKeysIterator) PageInfo() *iterator.PageInfo { return it.pageInfo }
|
|
||||||
|
|
||||||
func (it *HMACKeysIterator) fetch(pageSize int, pageToken string) (token string, err error) {
|
|
||||||
// TODO: Remove fetch method upon integration. This method is internalized into
|
|
||||||
// httpStorageClient.ListHMACKeys() as it is the only caller.
|
|
||||||
call := it.raw.List(it.projectID)
|
|
||||||
setClientHeader(call.Header())
|
|
||||||
if pageToken != "" {
|
|
||||||
call = call.PageToken(pageToken)
|
|
||||||
}
|
|
||||||
if it.desc.showDeletedKeys {
|
|
||||||
call = call.ShowDeletedKeys(true)
|
|
||||||
}
|
|
||||||
if it.desc.userProjectID != "" {
|
|
||||||
call = call.UserProject(it.desc.userProjectID)
|
|
||||||
}
|
|
||||||
if it.desc.forServiceAccountEmail != "" {
|
|
||||||
call = call.ServiceAccountEmail(it.desc.forServiceAccountEmail)
|
|
||||||
}
|
|
||||||
if pageSize > 0 {
|
|
||||||
call = call.MaxResults(int64(pageSize))
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx := it.ctx
|
|
||||||
var resp *raw.HmacKeysMetadata
|
|
||||||
err = run(it.ctx, func() error {
|
|
||||||
resp, err = call.Context(ctx).Do()
|
|
||||||
return err
|
|
||||||
}, it.retry, true, setRetryHeaderHTTP(call))
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, metadata := range resp.Items {
|
|
||||||
hk := &raw.HmacKey{
|
|
||||||
Metadata: metadata,
|
|
||||||
}
|
|
||||||
hkey, err := toHMACKeyFromRaw(hk, true)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
it.hmacKeys = append(it.hmacKeys, hkey)
|
|
||||||
}
|
|
||||||
return resp.NextPageToken, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type hmacKeyDesc struct {
|
|
||||||
forServiceAccountEmail string
|
|
||||||
showDeletedKeys bool
|
|
||||||
userProjectID string
|
|
||||||
}
|
|
||||||
|
|
||||||
// HMACKeyOption configures the behavior of HMACKey related methods and actions.
|
|
||||||
//
|
|
||||||
// This interface is EXPERIMENTAL and subject to change or removal without notice.
|
|
||||||
type HMACKeyOption interface {
|
|
||||||
withHMACKeyDesc(*hmacKeyDesc)
|
|
||||||
}
|
|
||||||
|
|
||||||
type hmacKeyDescFunc func(*hmacKeyDesc)
|
|
||||||
|
|
||||||
func (hkdf hmacKeyDescFunc) withHMACKeyDesc(hkd *hmacKeyDesc) {
|
|
||||||
hkdf(hkd)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ForHMACKeyServiceAccountEmail returns HMAC Keys that are
|
|
||||||
// associated with the email address of a service account in the project.
|
|
||||||
//
|
|
||||||
// Only one service account email can be used as a filter, so if multiple
|
|
||||||
// of these options are applied, the last email to be set will be used.
|
|
||||||
//
|
|
||||||
// This option is EXPERIMENTAL and subject to change or removal without notice.
|
|
||||||
func ForHMACKeyServiceAccountEmail(serviceAccountEmail string) HMACKeyOption {
|
|
||||||
return hmacKeyDescFunc(func(hkd *hmacKeyDesc) {
|
|
||||||
hkd.forServiceAccountEmail = serviceAccountEmail
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// ShowDeletedHMACKeys will also list keys whose state is "DELETED".
|
|
||||||
//
|
|
||||||
// This option is EXPERIMENTAL and subject to change or removal without notice.
|
|
||||||
func ShowDeletedHMACKeys() HMACKeyOption {
|
|
||||||
return hmacKeyDescFunc(func(hkd *hmacKeyDesc) {
|
|
||||||
hkd.showDeletedKeys = true
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// UserProjectForHMACKeys will bill the request against userProjectID
|
|
||||||
// if userProjectID is non-empty.
|
|
||||||
//
|
|
||||||
// Note: This is a noop right now and only provided for API compatibility.
|
|
||||||
//
|
|
||||||
// This option is EXPERIMENTAL and subject to change or removal without notice.
|
|
||||||
func UserProjectForHMACKeys(userProjectID string) HMACKeyOption {
|
|
||||||
return hmacKeyDescFunc(func(hkd *hmacKeyDesc) {
|
|
||||||
hkd.userProjectID = userProjectID
|
|
||||||
})
|
|
||||||
}
|
|
||||||
1347
vendor/cloud.google.com/go/storage/http_client.go
generated
vendored
1347
vendor/cloud.google.com/go/storage/http_client.go
generated
vendored
File diff suppressed because it is too large
Load diff
133
vendor/cloud.google.com/go/storage/iam.go
generated
vendored
133
vendor/cloud.google.com/go/storage/iam.go
generated
vendored
|
|
@ -1,133 +0,0 @@
|
||||||
// Copyright 2017 Google LLC
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package storage
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
|
|
||||||
"cloud.google.com/go/iam"
|
|
||||||
"cloud.google.com/go/internal/trace"
|
|
||||||
raw "google.golang.org/api/storage/v1"
|
|
||||||
iampb "google.golang.org/genproto/googleapis/iam/v1"
|
|
||||||
"google.golang.org/genproto/googleapis/type/expr"
|
|
||||||
)
|
|
||||||
|
|
||||||
// IAM provides access to IAM access control for the bucket.
|
|
||||||
func (b *BucketHandle) IAM() *iam.Handle {
|
|
||||||
return iam.InternalNewHandleClient(&iamClient{
|
|
||||||
userProject: b.userProject,
|
|
||||||
retry: b.retry,
|
|
||||||
client: b.c,
|
|
||||||
}, b.name)
|
|
||||||
}
|
|
||||||
|
|
||||||
// iamClient implements the iam.client interface.
|
|
||||||
type iamClient struct {
|
|
||||||
userProject string
|
|
||||||
retry *retryConfig
|
|
||||||
client *Client
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *iamClient) Get(ctx context.Context, resource string) (p *iampb.Policy, err error) {
|
|
||||||
return c.GetWithVersion(ctx, resource, 1)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *iamClient) GetWithVersion(ctx context.Context, resource string, requestedPolicyVersion int32) (p *iampb.Policy, err error) {
|
|
||||||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.IAM.Get")
|
|
||||||
defer func() { trace.EndSpan(ctx, err) }()
|
|
||||||
|
|
||||||
o := makeStorageOpts(true, c.retry, c.userProject)
|
|
||||||
return c.client.tc.GetIamPolicy(ctx, resource, requestedPolicyVersion, o...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *iamClient) Set(ctx context.Context, resource string, p *iampb.Policy) (err error) {
|
|
||||||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.IAM.Set")
|
|
||||||
defer func() { trace.EndSpan(ctx, err) }()
|
|
||||||
|
|
||||||
isIdempotent := len(p.Etag) > 0
|
|
||||||
o := makeStorageOpts(isIdempotent, c.retry, c.userProject)
|
|
||||||
return c.client.tc.SetIamPolicy(ctx, resource, p, o...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *iamClient) Test(ctx context.Context, resource string, perms []string) (permissions []string, err error) {
|
|
||||||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.IAM.Test")
|
|
||||||
defer func() { trace.EndSpan(ctx, err) }()
|
|
||||||
|
|
||||||
o := makeStorageOpts(true, c.retry, c.userProject)
|
|
||||||
return c.client.tc.TestIamPermissions(ctx, resource, perms, o...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func iamToStoragePolicy(ip *iampb.Policy) *raw.Policy {
|
|
||||||
return &raw.Policy{
|
|
||||||
Bindings: iamToStorageBindings(ip.Bindings),
|
|
||||||
Etag: string(ip.Etag),
|
|
||||||
Version: int64(ip.Version),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func iamToStorageBindings(ibs []*iampb.Binding) []*raw.PolicyBindings {
|
|
||||||
var rbs []*raw.PolicyBindings
|
|
||||||
for _, ib := range ibs {
|
|
||||||
rbs = append(rbs, &raw.PolicyBindings{
|
|
||||||
Role: ib.Role,
|
|
||||||
Members: ib.Members,
|
|
||||||
Condition: iamToStorageCondition(ib.Condition),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
return rbs
|
|
||||||
}
|
|
||||||
|
|
||||||
func iamToStorageCondition(exprpb *expr.Expr) *raw.Expr {
|
|
||||||
if exprpb == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return &raw.Expr{
|
|
||||||
Expression: exprpb.Expression,
|
|
||||||
Description: exprpb.Description,
|
|
||||||
Location: exprpb.Location,
|
|
||||||
Title: exprpb.Title,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func iamFromStoragePolicy(rp *raw.Policy) *iampb.Policy {
|
|
||||||
return &iampb.Policy{
|
|
||||||
Bindings: iamFromStorageBindings(rp.Bindings),
|
|
||||||
Etag: []byte(rp.Etag),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func iamFromStorageBindings(rbs []*raw.PolicyBindings) []*iampb.Binding {
|
|
||||||
var ibs []*iampb.Binding
|
|
||||||
for _, rb := range rbs {
|
|
||||||
ibs = append(ibs, &iampb.Binding{
|
|
||||||
Role: rb.Role,
|
|
||||||
Members: rb.Members,
|
|
||||||
Condition: iamFromStorageCondition(rb.Condition),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
return ibs
|
|
||||||
}
|
|
||||||
|
|
||||||
func iamFromStorageCondition(rawexpr *raw.Expr) *expr.Expr {
|
|
||||||
if rawexpr == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return &expr.Expr{
|
|
||||||
Expression: rawexpr.Expression,
|
|
||||||
Description: rawexpr.Description,
|
|
||||||
Location: rawexpr.Location,
|
|
||||||
Title: rawexpr.Title,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
174
vendor/cloud.google.com/go/storage/internal/apiv2/doc.go
generated
vendored
174
vendor/cloud.google.com/go/storage/internal/apiv2/doc.go
generated
vendored
|
|
@ -1,174 +0,0 @@
|
||||||
// Copyright 2022 Google LLC
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// https://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
// Code generated by protoc-gen-go_gapic. DO NOT EDIT.
|
|
||||||
|
|
||||||
// Package storage is an auto-generated package for the
|
|
||||||
// Cloud Storage API.
|
|
||||||
//
|
|
||||||
// Lets you store and retrieve potentially-large, immutable data objects.
|
|
||||||
//
|
|
||||||
// NOTE: This package is in alpha. It is not stable, and is likely to change.
|
|
||||||
//
|
|
||||||
// # Example usage
|
|
||||||
//
|
|
||||||
// To get started with this package, create a client.
|
|
||||||
//
|
|
||||||
// ctx := context.Background()
|
|
||||||
// // This snippet has been automatically generated and should be regarded as a code template only.
|
|
||||||
// // It will require modifications to work:
|
|
||||||
// // - It may require correct/in-range values for request initialization.
|
|
||||||
// // - It may require specifying regional endpoints when creating the service client as shown in:
|
|
||||||
// // https://pkg.go.dev/cloud.google.com/go#hdr-Client_Options
|
|
||||||
// c, err := storage.NewClient(ctx)
|
|
||||||
// if err != nil {
|
|
||||||
// // TODO: Handle error.
|
|
||||||
// }
|
|
||||||
// defer c.Close()
|
|
||||||
//
|
|
||||||
// The client will use your default application credentials. Clients should be reused instead of created as needed.
|
|
||||||
// The methods of Client are safe for concurrent use by multiple goroutines.
|
|
||||||
// The returned client must be Closed when it is done being used.
|
|
||||||
//
|
|
||||||
// # Using the Client
|
|
||||||
//
|
|
||||||
// The following is an example of making an API call with the newly created client.
|
|
||||||
//
|
|
||||||
// ctx := context.Background()
|
|
||||||
// // This snippet has been automatically generated and should be regarded as a code template only.
|
|
||||||
// // It will require modifications to work:
|
|
||||||
// // - It may require correct/in-range values for request initialization.
|
|
||||||
// // - It may require specifying regional endpoints when creating the service client as shown in:
|
|
||||||
// // https://pkg.go.dev/cloud.google.com/go#hdr-Client_Options
|
|
||||||
// c, err := storage.NewClient(ctx)
|
|
||||||
// if err != nil {
|
|
||||||
// // TODO: Handle error.
|
|
||||||
// }
|
|
||||||
// defer c.Close()
|
|
||||||
//
|
|
||||||
// req := &storagepb.DeleteBucketRequest{
|
|
||||||
// // TODO: Fill request struct fields.
|
|
||||||
// // See https://pkg.go.dev/cloud.google.com/go/storage/internal/apiv2/stubs#DeleteBucketRequest.
|
|
||||||
// }
|
|
||||||
// err = c.DeleteBucket(ctx, req)
|
|
||||||
// if err != nil {
|
|
||||||
// // TODO: Handle error.
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// # Use of Context
|
|
||||||
//
|
|
||||||
// The ctx passed to NewClient is used for authentication requests and
|
|
||||||
// for creating the underlying connection, but is not used for subsequent calls.
|
|
||||||
// Individual methods on the client use the ctx given to them.
|
|
||||||
//
|
|
||||||
// To close the open connection, use the Close() method.
|
|
||||||
//
|
|
||||||
// For information about setting deadlines, reusing contexts, and more
|
|
||||||
// please visit https://pkg.go.dev/cloud.google.com/go.
|
|
||||||
package storage // import "cloud.google.com/go/storage/internal/apiv2"
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"os"
|
|
||||||
"runtime"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"unicode"
|
|
||||||
|
|
||||||
"google.golang.org/api/option"
|
|
||||||
"google.golang.org/grpc/metadata"
|
|
||||||
)
|
|
||||||
|
|
||||||
// For more information on implementing a client constructor hook, see
|
|
||||||
// https://github.com/googleapis/google-cloud-go/wiki/Customizing-constructors.
|
|
||||||
type clientHookParams struct{}
|
|
||||||
type clientHook func(context.Context, clientHookParams) ([]option.ClientOption, error)
|
|
||||||
|
|
||||||
var versionClient string
|
|
||||||
|
|
||||||
func getVersionClient() string {
|
|
||||||
if versionClient == "" {
|
|
||||||
return "UNKNOWN"
|
|
||||||
}
|
|
||||||
return versionClient
|
|
||||||
}
|
|
||||||
|
|
||||||
func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context {
|
|
||||||
out, _ := metadata.FromOutgoingContext(ctx)
|
|
||||||
out = out.Copy()
|
|
||||||
for _, md := range mds {
|
|
||||||
for k, v := range md {
|
|
||||||
out[k] = append(out[k], v...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return metadata.NewOutgoingContext(ctx, out)
|
|
||||||
}
|
|
||||||
|
|
||||||
func checkDisableDeadlines() (bool, error) {
|
|
||||||
raw, ok := os.LookupEnv("GOOGLE_API_GO_EXPERIMENTAL_DISABLE_DEFAULT_DEADLINE")
|
|
||||||
if !ok {
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
b, err := strconv.ParseBool(raw)
|
|
||||||
return b, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// DefaultAuthScopes reports the default set of authentication scopes to use with this package.
|
|
||||||
func DefaultAuthScopes() []string {
|
|
||||||
return []string{
|
|
||||||
"https://www.googleapis.com/auth/cloud-platform",
|
|
||||||
"https://www.googleapis.com/auth/cloud-platform.read-only",
|
|
||||||
"https://www.googleapis.com/auth/devstorage.full_control",
|
|
||||||
"https://www.googleapis.com/auth/devstorage.read_only",
|
|
||||||
"https://www.googleapis.com/auth/devstorage.read_write",
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// versionGo returns the Go runtime version. The returned string
|
|
||||||
// has no whitespace, suitable for reporting in header.
|
|
||||||
func versionGo() string {
|
|
||||||
const develPrefix = "devel +"
|
|
||||||
|
|
||||||
s := runtime.Version()
|
|
||||||
if strings.HasPrefix(s, develPrefix) {
|
|
||||||
s = s[len(develPrefix):]
|
|
||||||
if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 {
|
|
||||||
s = s[:p]
|
|
||||||
}
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
notSemverRune := func(r rune) bool {
|
|
||||||
return !strings.ContainsRune("0123456789.", r)
|
|
||||||
}
|
|
||||||
|
|
||||||
if strings.HasPrefix(s, "go1") {
|
|
||||||
s = s[2:]
|
|
||||||
var prerelease string
|
|
||||||
if p := strings.IndexFunc(s, notSemverRune); p >= 0 {
|
|
||||||
s, prerelease = s[:p], s[p:]
|
|
||||||
}
|
|
||||||
if strings.HasSuffix(s, ".") {
|
|
||||||
s += "0"
|
|
||||||
} else if strings.Count(s, ".") < 2 {
|
|
||||||
s += ".0"
|
|
||||||
}
|
|
||||||
if prerelease != "" {
|
|
||||||
s += "-" + prerelease
|
|
||||||
}
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
return "UNKNOWN"
|
|
||||||
}
|
|
||||||
168
vendor/cloud.google.com/go/storage/internal/apiv2/gapic_metadata.json
generated
vendored
168
vendor/cloud.google.com/go/storage/internal/apiv2/gapic_metadata.json
generated
vendored
|
|
@ -1,168 +0,0 @@
|
||||||
{
|
|
||||||
"schema": "1.0",
|
|
||||||
"comment": "This file maps proto services/RPCs to the corresponding library clients/methods.",
|
|
||||||
"language": "go",
|
|
||||||
"protoPackage": "google.storage.v2",
|
|
||||||
"libraryPackage": "cloud.google.com/go/storage/internal/apiv2",
|
|
||||||
"services": {
|
|
||||||
"Storage": {
|
|
||||||
"clients": {
|
|
||||||
"grpc": {
|
|
||||||
"libraryClient": "Client",
|
|
||||||
"rpcs": {
|
|
||||||
"CancelResumableWrite": {
|
|
||||||
"methods": [
|
|
||||||
"CancelResumableWrite"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"ComposeObject": {
|
|
||||||
"methods": [
|
|
||||||
"ComposeObject"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"CreateBucket": {
|
|
||||||
"methods": [
|
|
||||||
"CreateBucket"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"CreateHmacKey": {
|
|
||||||
"methods": [
|
|
||||||
"CreateHmacKey"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"CreateNotification": {
|
|
||||||
"methods": [
|
|
||||||
"CreateNotification"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"DeleteBucket": {
|
|
||||||
"methods": [
|
|
||||||
"DeleteBucket"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"DeleteHmacKey": {
|
|
||||||
"methods": [
|
|
||||||
"DeleteHmacKey"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"DeleteNotification": {
|
|
||||||
"methods": [
|
|
||||||
"DeleteNotification"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"DeleteObject": {
|
|
||||||
"methods": [
|
|
||||||
"DeleteObject"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"GetBucket": {
|
|
||||||
"methods": [
|
|
||||||
"GetBucket"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"GetHmacKey": {
|
|
||||||
"methods": [
|
|
||||||
"GetHmacKey"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"GetIamPolicy": {
|
|
||||||
"methods": [
|
|
||||||
"GetIamPolicy"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"GetNotification": {
|
|
||||||
"methods": [
|
|
||||||
"GetNotification"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"GetObject": {
|
|
||||||
"methods": [
|
|
||||||
"GetObject"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"GetServiceAccount": {
|
|
||||||
"methods": [
|
|
||||||
"GetServiceAccount"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"ListBuckets": {
|
|
||||||
"methods": [
|
|
||||||
"ListBuckets"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"ListHmacKeys": {
|
|
||||||
"methods": [
|
|
||||||
"ListHmacKeys"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"ListNotifications": {
|
|
||||||
"methods": [
|
|
||||||
"ListNotifications"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"ListObjects": {
|
|
||||||
"methods": [
|
|
||||||
"ListObjects"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"LockBucketRetentionPolicy": {
|
|
||||||
"methods": [
|
|
||||||
"LockBucketRetentionPolicy"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"QueryWriteStatus": {
|
|
||||||
"methods": [
|
|
||||||
"QueryWriteStatus"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"ReadObject": {
|
|
||||||
"methods": [
|
|
||||||
"ReadObject"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"RewriteObject": {
|
|
||||||
"methods": [
|
|
||||||
"RewriteObject"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"SetIamPolicy": {
|
|
||||||
"methods": [
|
|
||||||
"SetIamPolicy"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"StartResumableWrite": {
|
|
||||||
"methods": [
|
|
||||||
"StartResumableWrite"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"TestIamPermissions": {
|
|
||||||
"methods": [
|
|
||||||
"TestIamPermissions"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"UpdateBucket": {
|
|
||||||
"methods": [
|
|
||||||
"UpdateBucket"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"UpdateHmacKey": {
|
|
||||||
"methods": [
|
|
||||||
"UpdateHmacKey"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"UpdateObject": {
|
|
||||||
"methods": [
|
|
||||||
"UpdateObject"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"WriteObject": {
|
|
||||||
"methods": [
|
|
||||||
"WriteObject"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
26
vendor/cloud.google.com/go/storage/internal/apiv2/metadata.go
generated
vendored
26
vendor/cloud.google.com/go/storage/internal/apiv2/metadata.go
generated
vendored
|
|
@ -1,26 +0,0 @@
|
||||||
// Copyright 2022 Google LLC
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// https://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package storage
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
|
|
||||||
"google.golang.org/grpc/metadata"
|
|
||||||
)
|
|
||||||
|
|
||||||
// InsertMetadata inserts the given gRPC metadata into the outgoing context.
|
|
||||||
func InsertMetadata(ctx context.Context, mds ...metadata.MD) context.Context {
|
|
||||||
return insertMetadata(ctx, mds...)
|
|
||||||
}
|
|
||||||
1517
vendor/cloud.google.com/go/storage/internal/apiv2/storage_client.go
generated
vendored
1517
vendor/cloud.google.com/go/storage/internal/apiv2/storage_client.go
generated
vendored
File diff suppressed because it is too large
Load diff
10606
vendor/cloud.google.com/go/storage/internal/apiv2/stubs/storage.pb.go
generated
vendored
10606
vendor/cloud.google.com/go/storage/internal/apiv2/stubs/storage.pb.go
generated
vendored
File diff suppressed because it is too large
Load diff
23
vendor/cloud.google.com/go/storage/internal/apiv2/version.go
generated
vendored
23
vendor/cloud.google.com/go/storage/internal/apiv2/version.go
generated
vendored
|
|
@ -1,23 +0,0 @@
|
||||||
// Copyright 2022 Google LLC
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
// Code generated by gapicgen. DO NOT EDIT.
|
|
||||||
|
|
||||||
package storage
|
|
||||||
|
|
||||||
import "cloud.google.com/go/storage/internal"
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
versionClient = internal.Version
|
|
||||||
}
|
|
||||||
18
vendor/cloud.google.com/go/storage/internal/version.go
generated
vendored
18
vendor/cloud.google.com/go/storage/internal/version.go
generated
vendored
|
|
@ -1,18 +0,0 @@
|
||||||
// Copyright 2022 Google LLC
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package internal
|
|
||||||
|
|
||||||
// Version is the current tagged release of the library.
|
|
||||||
const Version = "1.27.0"
|
|
||||||
146
vendor/cloud.google.com/go/storage/invoke.go
generated
vendored
146
vendor/cloud.google.com/go/storage/invoke.go
generated
vendored
|
|
@ -1,146 +0,0 @@
|
||||||
// Copyright 2014 Google LLC
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package storage
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"net"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"cloud.google.com/go/internal"
|
|
||||||
"cloud.google.com/go/internal/version"
|
|
||||||
sinternal "cloud.google.com/go/storage/internal"
|
|
||||||
"github.com/google/uuid"
|
|
||||||
gax "github.com/googleapis/gax-go/v2"
|
|
||||||
"google.golang.org/api/googleapi"
|
|
||||||
"google.golang.org/grpc/codes"
|
|
||||||
"google.golang.org/grpc/status"
|
|
||||||
)
|
|
||||||
|
|
||||||
var defaultRetry *retryConfig = &retryConfig{}
|
|
||||||
var xGoogDefaultHeader = fmt.Sprintf("gl-go/%s gccl/%s", version.Go(), sinternal.Version)
|
|
||||||
|
|
||||||
// run determines whether a retry is necessary based on the config and
|
|
||||||
// idempotency information. It then calls the function with or without retries
|
|
||||||
// as appropriate, using the configured settings.
|
|
||||||
func run(ctx context.Context, call func() error, retry *retryConfig, isIdempotent bool, setHeader func(string, int)) error {
|
|
||||||
attempts := 1
|
|
||||||
invocationID := uuid.New().String()
|
|
||||||
|
|
||||||
if retry == nil {
|
|
||||||
retry = defaultRetry
|
|
||||||
}
|
|
||||||
if (retry.policy == RetryIdempotent && !isIdempotent) || retry.policy == RetryNever {
|
|
||||||
setHeader(invocationID, attempts)
|
|
||||||
return call()
|
|
||||||
}
|
|
||||||
bo := gax.Backoff{}
|
|
||||||
if retry.backoff != nil {
|
|
||||||
bo.Multiplier = retry.backoff.Multiplier
|
|
||||||
bo.Initial = retry.backoff.Initial
|
|
||||||
bo.Max = retry.backoff.Max
|
|
||||||
}
|
|
||||||
var errorFunc func(err error) bool = ShouldRetry
|
|
||||||
if retry.shouldRetry != nil {
|
|
||||||
errorFunc = retry.shouldRetry
|
|
||||||
}
|
|
||||||
|
|
||||||
return internal.Retry(ctx, bo, func() (stop bool, err error) {
|
|
||||||
setHeader(invocationID, attempts)
|
|
||||||
err = call()
|
|
||||||
attempts++
|
|
||||||
return !errorFunc(err), err
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func setRetryHeaderHTTP(req interface{ Header() http.Header }) func(string, int) {
|
|
||||||
return func(invocationID string, attempts int) {
|
|
||||||
if req == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
header := req.Header()
|
|
||||||
invocationHeader := fmt.Sprintf("gccl-invocation-id/%v gccl-attempt-count/%v", invocationID, attempts)
|
|
||||||
xGoogHeader := strings.Join([]string{invocationHeader, xGoogDefaultHeader}, " ")
|
|
||||||
header.Set("x-goog-api-client", xGoogHeader)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: Implement method setting header via context for gRPC
|
|
||||||
func setRetryHeaderGRPC(_ context.Context) func(string, int) {
|
|
||||||
return func(_ string, _ int) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ShouldRetry returns true if an error is retryable, based on best practice
|
|
||||||
// guidance from GCS. See
|
|
||||||
// https://cloud.google.com/storage/docs/retry-strategy#go for more information
|
|
||||||
// on what errors are considered retryable.
|
|
||||||
//
|
|
||||||
// If you would like to customize retryable errors, use the WithErrorFunc to
|
|
||||||
// supply a RetryOption to your library calls. For example, to retry additional
|
|
||||||
// errors, you can write a custom func that wraps ShouldRetry and also specifies
|
|
||||||
// additional errors that should return true.
|
|
||||||
func ShouldRetry(err error) bool {
|
|
||||||
if err == nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if errors.Is(err, io.ErrUnexpectedEOF) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
switch e := err.(type) {
|
|
||||||
case *net.OpError:
|
|
||||||
if strings.Contains(e.Error(), "use of closed network connection") {
|
|
||||||
// TODO: check against net.ErrClosed (go 1.16+) instead of string
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
case *googleapi.Error:
|
|
||||||
// Retry on 408, 429, and 5xx, according to
|
|
||||||
// https://cloud.google.com/storage/docs/exponential-backoff.
|
|
||||||
return e.Code == 408 || e.Code == 429 || (e.Code >= 500 && e.Code < 600)
|
|
||||||
case *url.Error:
|
|
||||||
// Retry socket-level errors ECONNREFUSED and ECONNRESET (from syscall).
|
|
||||||
// Unfortunately the error type is unexported, so we resort to string
|
|
||||||
// matching.
|
|
||||||
retriable := []string{"connection refused", "connection reset"}
|
|
||||||
for _, s := range retriable {
|
|
||||||
if strings.Contains(e.Error(), s) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case interface{ Temporary() bool }:
|
|
||||||
if e.Temporary() {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// HTTP 429, 502, 503, and 504 all map to gRPC UNAVAILABLE per
|
|
||||||
// https://grpc.github.io/grpc/core/md_doc_http-grpc-status-mapping.html.
|
|
||||||
//
|
|
||||||
// This is only necessary for the experimental gRPC-based media operations.
|
|
||||||
if st, ok := status.FromError(err); ok && st.Code() == codes.Unavailable {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
// Unwrap is only supported in go1.13.x+
|
|
||||||
if e, ok := err.(interface{ Unwrap() error }); ok {
|
|
||||||
return ShouldRetry(e.Unwrap())
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
200
vendor/cloud.google.com/go/storage/notifications.go
generated
vendored
200
vendor/cloud.google.com/go/storage/notifications.go
generated
vendored
|
|
@ -1,200 +0,0 @@
|
||||||
// Copyright 2017 Google LLC
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package storage
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"regexp"
|
|
||||||
|
|
||||||
"cloud.google.com/go/internal/trace"
|
|
||||||
storagepb "cloud.google.com/go/storage/internal/apiv2/stubs"
|
|
||||||
raw "google.golang.org/api/storage/v1"
|
|
||||||
)
|
|
||||||
|
|
||||||
// A Notification describes how to send Cloud PubSub messages when certain
|
|
||||||
// events occur in a bucket.
|
|
||||||
type Notification struct {
|
|
||||||
//The ID of the notification.
|
|
||||||
ID string
|
|
||||||
|
|
||||||
// The ID of the topic to which this subscription publishes.
|
|
||||||
TopicID string
|
|
||||||
|
|
||||||
// The ID of the project to which the topic belongs.
|
|
||||||
TopicProjectID string
|
|
||||||
|
|
||||||
// Only send notifications about listed event types. If empty, send notifications
|
|
||||||
// for all event types.
|
|
||||||
// See https://cloud.google.com/storage/docs/pubsub-notifications#events.
|
|
||||||
EventTypes []string
|
|
||||||
|
|
||||||
// If present, only apply this notification configuration to object names that
|
|
||||||
// begin with this prefix.
|
|
||||||
ObjectNamePrefix string
|
|
||||||
|
|
||||||
// An optional list of additional attributes to attach to each Cloud PubSub
|
|
||||||
// message published for this notification subscription.
|
|
||||||
CustomAttributes map[string]string
|
|
||||||
|
|
||||||
// The contents of the message payload.
|
|
||||||
// See https://cloud.google.com/storage/docs/pubsub-notifications#payload.
|
|
||||||
PayloadFormat string
|
|
||||||
}
|
|
||||||
|
|
||||||
// Values for Notification.PayloadFormat.
|
|
||||||
const (
|
|
||||||
// Send no payload with notification messages.
|
|
||||||
NoPayload = "NONE"
|
|
||||||
|
|
||||||
// Send object metadata as JSON with notification messages.
|
|
||||||
JSONPayload = "JSON_API_V1"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Values for Notification.EventTypes.
|
|
||||||
const (
|
|
||||||
// Event that occurs when an object is successfully created.
|
|
||||||
ObjectFinalizeEvent = "OBJECT_FINALIZE"
|
|
||||||
|
|
||||||
// Event that occurs when the metadata of an existing object changes.
|
|
||||||
ObjectMetadataUpdateEvent = "OBJECT_METADATA_UPDATE"
|
|
||||||
|
|
||||||
// Event that occurs when an object is permanently deleted.
|
|
||||||
ObjectDeleteEvent = "OBJECT_DELETE"
|
|
||||||
|
|
||||||
// Event that occurs when the live version of an object becomes an
|
|
||||||
// archived version.
|
|
||||||
ObjectArchiveEvent = "OBJECT_ARCHIVE"
|
|
||||||
)
|
|
||||||
|
|
||||||
func toNotification(rn *raw.Notification) *Notification {
|
|
||||||
n := &Notification{
|
|
||||||
ID: rn.Id,
|
|
||||||
EventTypes: rn.EventTypes,
|
|
||||||
ObjectNamePrefix: rn.ObjectNamePrefix,
|
|
||||||
CustomAttributes: rn.CustomAttributes,
|
|
||||||
PayloadFormat: rn.PayloadFormat,
|
|
||||||
}
|
|
||||||
n.TopicProjectID, n.TopicID = parseNotificationTopic(rn.Topic)
|
|
||||||
return n
|
|
||||||
}
|
|
||||||
|
|
||||||
func toNotificationFromProto(pbn *storagepb.Notification) *Notification {
|
|
||||||
n := &Notification{
|
|
||||||
ID: pbn.GetName(),
|
|
||||||
EventTypes: pbn.GetEventTypes(),
|
|
||||||
ObjectNamePrefix: pbn.GetObjectNamePrefix(),
|
|
||||||
CustomAttributes: pbn.GetCustomAttributes(),
|
|
||||||
PayloadFormat: pbn.GetPayloadFormat(),
|
|
||||||
}
|
|
||||||
n.TopicProjectID, n.TopicID = parseNotificationTopic(pbn.Topic)
|
|
||||||
return n
|
|
||||||
}
|
|
||||||
|
|
||||||
func toProtoNotification(n *Notification) *storagepb.Notification {
|
|
||||||
return &storagepb.Notification{
|
|
||||||
Name: n.ID,
|
|
||||||
Topic: fmt.Sprintf("//pubsub.googleapis.com/projects/%s/topics/%s",
|
|
||||||
n.TopicProjectID, n.TopicID),
|
|
||||||
EventTypes: n.EventTypes,
|
|
||||||
ObjectNamePrefix: n.ObjectNamePrefix,
|
|
||||||
CustomAttributes: n.CustomAttributes,
|
|
||||||
PayloadFormat: n.PayloadFormat,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var topicRE = regexp.MustCompile("^//pubsub.googleapis.com/projects/([^/]+)/topics/([^/]+)")
|
|
||||||
|
|
||||||
// parseNotificationTopic extracts the project and topic IDs from from the full
|
|
||||||
// resource name returned by the service. If the name is malformed, it returns
|
|
||||||
// "?" for both IDs.
|
|
||||||
func parseNotificationTopic(nt string) (projectID, topicID string) {
|
|
||||||
matches := topicRE.FindStringSubmatch(nt)
|
|
||||||
if matches == nil {
|
|
||||||
return "?", "?"
|
|
||||||
}
|
|
||||||
return matches[1], matches[2]
|
|
||||||
}
|
|
||||||
|
|
||||||
func toRawNotification(n *Notification) *raw.Notification {
|
|
||||||
return &raw.Notification{
|
|
||||||
Id: n.ID,
|
|
||||||
Topic: fmt.Sprintf("//pubsub.googleapis.com/projects/%s/topics/%s",
|
|
||||||
n.TopicProjectID, n.TopicID),
|
|
||||||
EventTypes: n.EventTypes,
|
|
||||||
ObjectNamePrefix: n.ObjectNamePrefix,
|
|
||||||
CustomAttributes: n.CustomAttributes,
|
|
||||||
PayloadFormat: string(n.PayloadFormat),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// AddNotification adds a notification to b. You must set n's TopicProjectID, TopicID
|
|
||||||
// and PayloadFormat, and must not set its ID. The other fields are all optional. The
|
|
||||||
// returned Notification's ID can be used to refer to it.
|
|
||||||
func (b *BucketHandle) AddNotification(ctx context.Context, n *Notification) (ret *Notification, err error) {
|
|
||||||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.AddNotification")
|
|
||||||
defer func() { trace.EndSpan(ctx, err) }()
|
|
||||||
|
|
||||||
if n.ID != "" {
|
|
||||||
return nil, errors.New("storage: AddNotification: ID must not be set")
|
|
||||||
}
|
|
||||||
if n.TopicProjectID == "" {
|
|
||||||
return nil, errors.New("storage: AddNotification: missing TopicProjectID")
|
|
||||||
}
|
|
||||||
if n.TopicID == "" {
|
|
||||||
return nil, errors.New("storage: AddNotification: missing TopicID")
|
|
||||||
}
|
|
||||||
|
|
||||||
opts := makeStorageOpts(false, b.retry, b.userProject)
|
|
||||||
ret, err = b.c.tc.CreateNotification(ctx, b.name, n, opts...)
|
|
||||||
return ret, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Notifications returns all the Notifications configured for this bucket, as a map
|
|
||||||
// indexed by notification ID.
|
|
||||||
func (b *BucketHandle) Notifications(ctx context.Context) (n map[string]*Notification, err error) {
|
|
||||||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.Notifications")
|
|
||||||
defer func() { trace.EndSpan(ctx, err) }()
|
|
||||||
|
|
||||||
opts := makeStorageOpts(true, b.retry, b.userProject)
|
|
||||||
n, err = b.c.tc.ListNotifications(ctx, b.name, opts...)
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func notificationsToMap(rns []*raw.Notification) map[string]*Notification {
|
|
||||||
m := map[string]*Notification{}
|
|
||||||
for _, rn := range rns {
|
|
||||||
m[rn.Id] = toNotification(rn)
|
|
||||||
}
|
|
||||||
return m
|
|
||||||
}
|
|
||||||
|
|
||||||
func notificationsToMapFromProto(ns []*storagepb.Notification) map[string]*Notification {
|
|
||||||
m := map[string]*Notification{}
|
|
||||||
for _, n := range ns {
|
|
||||||
m[n.Name] = toNotificationFromProto(n)
|
|
||||||
}
|
|
||||||
return m
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeleteNotification deletes the notification with the given ID.
|
|
||||||
func (b *BucketHandle) DeleteNotification(ctx context.Context, id string) (err error) {
|
|
||||||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.DeleteNotification")
|
|
||||||
defer func() { trace.EndSpan(ctx, err) }()
|
|
||||||
|
|
||||||
opts := makeStorageOpts(true, b.retry, b.userProject)
|
|
||||||
return b.c.tc.DeleteNotification(ctx, b.name, id, opts...)
|
|
||||||
}
|
|
||||||
436
vendor/cloud.google.com/go/storage/post_policy_v4.go
generated
vendored
436
vendor/cloud.google.com/go/storage/post_policy_v4.go
generated
vendored
|
|
@ -1,436 +0,0 @@
|
||||||
// Copyright 2020 Google LLC
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package storage
|
|
||||||
|
|
||||||
import (
|
|
||||||
"crypto"
|
|
||||||
"crypto/rand"
|
|
||||||
"crypto/rsa"
|
|
||||||
"crypto/sha256"
|
|
||||||
"encoding/base64"
|
|
||||||
"encoding/json"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"net/url"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// PostPolicyV4Options are used to construct a signed post policy.
|
|
||||||
// Please see https://cloud.google.com/storage/docs/xml-api/post-object
|
|
||||||
// for reference about the fields.
|
|
||||||
type PostPolicyV4Options struct {
|
|
||||||
// GoogleAccessID represents the authorizer of the signed URL generation.
|
|
||||||
// It is typically the Google service account client email address from
|
|
||||||
// the Google Developers Console in the form of "xxx@developer.gserviceaccount.com".
|
|
||||||
// Required.
|
|
||||||
GoogleAccessID string
|
|
||||||
|
|
||||||
// PrivateKey is the Google service account private key. It is obtainable
|
|
||||||
// from the Google Developers Console.
|
|
||||||
// At https://console.developers.google.com/project/<your-project-id>/apiui/credential,
|
|
||||||
// create a service account client ID or reuse one of your existing service account
|
|
||||||
// credentials. Click on the "Generate new P12 key" to generate and download
|
|
||||||
// a new private key. Once you download the P12 file, use the following command
|
|
||||||
// to convert it into a PEM file.
|
|
||||||
//
|
|
||||||
// $ openssl pkcs12 -in key.p12 -passin pass:notasecret -out key.pem -nodes
|
|
||||||
//
|
|
||||||
// Provide the contents of the PEM file as a byte slice.
|
|
||||||
// Exactly one of PrivateKey or SignBytes must be non-nil.
|
|
||||||
PrivateKey []byte
|
|
||||||
|
|
||||||
// SignBytes is a function for implementing custom signing.
|
|
||||||
//
|
|
||||||
// Deprecated: Use SignRawBytes. If both SignBytes and SignRawBytes are defined,
|
|
||||||
// SignBytes will be ignored.
|
|
||||||
// This SignBytes function expects the bytes it receives to be hashed, while
|
|
||||||
// SignRawBytes accepts the raw bytes without hashing, allowing more flexibility.
|
|
||||||
// Add the following to the top of your signing function to hash the bytes
|
|
||||||
// to use SignRawBytes instead:
|
|
||||||
// shaSum := sha256.Sum256(bytes)
|
|
||||||
// bytes = shaSum[:]
|
|
||||||
//
|
|
||||||
SignBytes func(hashBytes []byte) (signature []byte, err error)
|
|
||||||
|
|
||||||
// SignRawBytes is a function for implementing custom signing. For example, if
|
|
||||||
// your application is running on Google App Engine, you can use
|
|
||||||
// appengine's internal signing function:
|
|
||||||
// ctx := appengine.NewContext(request)
|
|
||||||
// acc, _ := appengine.ServiceAccount(ctx)
|
|
||||||
// &PostPolicyV4Options{
|
|
||||||
// GoogleAccessID: acc,
|
|
||||||
// SignRawBytes: func(b []byte) ([]byte, error) {
|
|
||||||
// _, signedBytes, err := appengine.SignBytes(ctx, b)
|
|
||||||
// return signedBytes, err
|
|
||||||
// },
|
|
||||||
// // etc.
|
|
||||||
// })
|
|
||||||
//
|
|
||||||
// SignRawBytes is equivalent to the SignBytes field on SignedURLOptions;
|
|
||||||
// that is, you may use the same signing function for the two.
|
|
||||||
//
|
|
||||||
// Exactly one of PrivateKey or SignRawBytes must be non-nil.
|
|
||||||
SignRawBytes func(bytes []byte) (signature []byte, err error)
|
|
||||||
|
|
||||||
// Expires is the expiration time on the signed URL.
|
|
||||||
// It must be a time in the future.
|
|
||||||
// Required.
|
|
||||||
Expires time.Time
|
|
||||||
|
|
||||||
// Style provides options for the type of URL to use. Options are
|
|
||||||
// PathStyle (default), BucketBoundHostname, and VirtualHostedStyle. See
|
|
||||||
// https://cloud.google.com/storage/docs/request-endpoints for details.
|
|
||||||
// Optional.
|
|
||||||
Style URLStyle
|
|
||||||
|
|
||||||
// Insecure when set indicates that the generated URL's scheme
|
|
||||||
// will use "http" instead of "https" (default).
|
|
||||||
// Optional.
|
|
||||||
Insecure bool
|
|
||||||
|
|
||||||
// Fields specifies the attributes of a PostPolicyV4 request.
|
|
||||||
// When Fields is non-nil, its attributes must match those that will
|
|
||||||
// passed into field Conditions.
|
|
||||||
// Optional.
|
|
||||||
Fields *PolicyV4Fields
|
|
||||||
|
|
||||||
// The conditions that the uploaded file will be expected to conform to.
|
|
||||||
// When used, the failure of an upload to satisfy a condition will result in
|
|
||||||
// a 4XX status code, back with the message describing the problem.
|
|
||||||
// Optional.
|
|
||||||
Conditions []PostPolicyV4Condition
|
|
||||||
|
|
||||||
shouldHashSignBytes bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func (opts *PostPolicyV4Options) clone() *PostPolicyV4Options {
|
|
||||||
return &PostPolicyV4Options{
|
|
||||||
GoogleAccessID: opts.GoogleAccessID,
|
|
||||||
PrivateKey: opts.PrivateKey,
|
|
||||||
SignBytes: opts.SignBytes,
|
|
||||||
SignRawBytes: opts.SignRawBytes,
|
|
||||||
Expires: opts.Expires,
|
|
||||||
Style: opts.Style,
|
|
||||||
Insecure: opts.Insecure,
|
|
||||||
Fields: opts.Fields,
|
|
||||||
Conditions: opts.Conditions,
|
|
||||||
shouldHashSignBytes: opts.shouldHashSignBytes,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// PolicyV4Fields describes the attributes for a PostPolicyV4 request.
|
|
||||||
type PolicyV4Fields struct {
|
|
||||||
// ACL specifies the access control permissions for the object.
|
|
||||||
// Optional.
|
|
||||||
ACL string
|
|
||||||
// CacheControl specifies the caching directives for the object.
|
|
||||||
// Optional.
|
|
||||||
CacheControl string
|
|
||||||
// ContentType specifies the media type of the object.
|
|
||||||
// Optional.
|
|
||||||
ContentType string
|
|
||||||
// ContentDisposition specifies how the file will be served back to requesters.
|
|
||||||
// Optional.
|
|
||||||
ContentDisposition string
|
|
||||||
// ContentEncoding specifies the decompressive transcoding that the object.
|
|
||||||
// This field is complementary to ContentType in that the file could be
|
|
||||||
// compressed but ContentType specifies the file's original media type.
|
|
||||||
// Optional.
|
|
||||||
ContentEncoding string
|
|
||||||
// Metadata specifies custom metadata for the object.
|
|
||||||
// If any key doesn't begin with "x-goog-meta-", an error will be returned.
|
|
||||||
// Optional.
|
|
||||||
Metadata map[string]string
|
|
||||||
// StatusCodeOnSuccess when set, specifies the status code that Cloud Storage
|
|
||||||
// will serve back on successful upload of the object.
|
|
||||||
// Optional.
|
|
||||||
StatusCodeOnSuccess int
|
|
||||||
// RedirectToURLOnSuccess when set, specifies the URL that Cloud Storage
|
|
||||||
// will serve back on successful upload of the object.
|
|
||||||
// Optional.
|
|
||||||
RedirectToURLOnSuccess string
|
|
||||||
}
|
|
||||||
|
|
||||||
// PostPolicyV4 describes the URL and respective form fields for a generated PostPolicyV4 request.
|
|
||||||
type PostPolicyV4 struct {
|
|
||||||
// URL is the generated URL that the file upload will be made to.
|
|
||||||
URL string
|
|
||||||
// Fields specifies the generated key-values that the file uploader
|
|
||||||
// must include in their multipart upload form.
|
|
||||||
Fields map[string]string
|
|
||||||
}
|
|
||||||
|
|
||||||
// PostPolicyV4Condition describes the constraints that the subsequent
|
|
||||||
// object upload's multipart form fields will be expected to conform to.
|
|
||||||
type PostPolicyV4Condition interface {
|
|
||||||
isEmpty() bool
|
|
||||||
json.Marshaler
|
|
||||||
}
|
|
||||||
|
|
||||||
type startsWith struct {
|
|
||||||
key, value string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (sw *startsWith) MarshalJSON() ([]byte, error) {
|
|
||||||
return json.Marshal([]string{"starts-with", sw.key, sw.value})
|
|
||||||
}
|
|
||||||
func (sw *startsWith) isEmpty() bool {
|
|
||||||
return sw.value == ""
|
|
||||||
}
|
|
||||||
|
|
||||||
// ConditionStartsWith checks that an attributes starts with value.
|
|
||||||
// An empty value will cause this condition to be ignored.
|
|
||||||
func ConditionStartsWith(key, value string) PostPolicyV4Condition {
|
|
||||||
return &startsWith{key, value}
|
|
||||||
}
|
|
||||||
|
|
||||||
type contentLengthRangeCondition struct {
|
|
||||||
start, end uint64
|
|
||||||
}
|
|
||||||
|
|
||||||
func (clr *contentLengthRangeCondition) MarshalJSON() ([]byte, error) {
|
|
||||||
return json.Marshal([]interface{}{"content-length-range", clr.start, clr.end})
|
|
||||||
}
|
|
||||||
func (clr *contentLengthRangeCondition) isEmpty() bool {
|
|
||||||
return clr.start == 0 && clr.end == 0
|
|
||||||
}
|
|
||||||
|
|
||||||
type singleValueCondition struct {
|
|
||||||
name, value string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (svc *singleValueCondition) MarshalJSON() ([]byte, error) {
|
|
||||||
return json.Marshal(map[string]string{svc.name: svc.value})
|
|
||||||
}
|
|
||||||
func (svc *singleValueCondition) isEmpty() bool {
|
|
||||||
return svc.value == ""
|
|
||||||
}
|
|
||||||
|
|
||||||
// ConditionContentLengthRange constraints the limits that the
|
|
||||||
// multipart upload's range header will be expected to be within.
|
|
||||||
func ConditionContentLengthRange(start, end uint64) PostPolicyV4Condition {
|
|
||||||
return &contentLengthRangeCondition{start, end}
|
|
||||||
}
|
|
||||||
|
|
||||||
func conditionRedirectToURLOnSuccess(redirectURL string) PostPolicyV4Condition {
|
|
||||||
return &singleValueCondition{"success_action_redirect", redirectURL}
|
|
||||||
}
|
|
||||||
|
|
||||||
func conditionStatusCodeOnSuccess(statusCode int) PostPolicyV4Condition {
|
|
||||||
svc := &singleValueCondition{name: "success_action_status"}
|
|
||||||
if statusCode > 0 {
|
|
||||||
svc.value = fmt.Sprintf("%d", statusCode)
|
|
||||||
}
|
|
||||||
return svc
|
|
||||||
}
|
|
||||||
|
|
||||||
// GenerateSignedPostPolicyV4 generates a PostPolicyV4 value from bucket, object and opts.
|
|
||||||
// The generated URL and fields will then allow an unauthenticated client to perform multipart uploads.
|
|
||||||
// If initializing a Storage Client, instead use the Bucket.GenerateSignedPostPolicyV4
|
|
||||||
// method which uses the Client's credentials to handle authentication.
|
|
||||||
func GenerateSignedPostPolicyV4(bucket, object string, opts *PostPolicyV4Options) (*PostPolicyV4, error) {
|
|
||||||
if bucket == "" {
|
|
||||||
return nil, errors.New("storage: bucket must be non-empty")
|
|
||||||
}
|
|
||||||
if object == "" {
|
|
||||||
return nil, errors.New("storage: object must be non-empty")
|
|
||||||
}
|
|
||||||
now := utcNow()
|
|
||||||
if err := validatePostPolicyV4Options(opts, now); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
var signingFn func(hashedBytes []byte) ([]byte, error)
|
|
||||||
switch {
|
|
||||||
case opts.SignRawBytes != nil:
|
|
||||||
signingFn = opts.SignRawBytes
|
|
||||||
case opts.shouldHashSignBytes:
|
|
||||||
signingFn = opts.SignBytes
|
|
||||||
case len(opts.PrivateKey) != 0:
|
|
||||||
parsedRSAPrivKey, err := parseKey(opts.PrivateKey)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
signingFn = func(b []byte) ([]byte, error) {
|
|
||||||
sum := sha256.Sum256(b)
|
|
||||||
return rsa.SignPKCS1v15(rand.Reader, parsedRSAPrivKey, crypto.SHA256, sum[:])
|
|
||||||
}
|
|
||||||
|
|
||||||
default:
|
|
||||||
return nil, errors.New("storage: exactly one of PrivateKey or SignRawBytes must be set")
|
|
||||||
}
|
|
||||||
|
|
||||||
var descFields PolicyV4Fields
|
|
||||||
if opts.Fields != nil {
|
|
||||||
descFields = *opts.Fields
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := validateMetadata(descFields.Metadata); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Build the policy.
|
|
||||||
conds := make([]PostPolicyV4Condition, len(opts.Conditions))
|
|
||||||
copy(conds, opts.Conditions)
|
|
||||||
conds = append(conds,
|
|
||||||
// These are ordered lexicographically. Technically the order doesn't matter
|
|
||||||
// for creating the policy, but we use this order to match the
|
|
||||||
// cross-language conformance tests for this feature.
|
|
||||||
&singleValueCondition{"acl", descFields.ACL},
|
|
||||||
&singleValueCondition{"cache-control", descFields.CacheControl},
|
|
||||||
&singleValueCondition{"content-disposition", descFields.ContentDisposition},
|
|
||||||
&singleValueCondition{"content-encoding", descFields.ContentEncoding},
|
|
||||||
&singleValueCondition{"content-type", descFields.ContentType},
|
|
||||||
conditionRedirectToURLOnSuccess(descFields.RedirectToURLOnSuccess),
|
|
||||||
conditionStatusCodeOnSuccess(descFields.StatusCodeOnSuccess),
|
|
||||||
)
|
|
||||||
|
|
||||||
YYYYMMDD := now.Format(yearMonthDay)
|
|
||||||
policyFields := map[string]string{
|
|
||||||
"key": object,
|
|
||||||
"x-goog-date": now.Format(iso8601),
|
|
||||||
"x-goog-credential": opts.GoogleAccessID + "/" + YYYYMMDD + "/auto/storage/goog4_request",
|
|
||||||
"x-goog-algorithm": "GOOG4-RSA-SHA256",
|
|
||||||
"acl": descFields.ACL,
|
|
||||||
"cache-control": descFields.CacheControl,
|
|
||||||
"content-disposition": descFields.ContentDisposition,
|
|
||||||
"content-encoding": descFields.ContentEncoding,
|
|
||||||
"content-type": descFields.ContentType,
|
|
||||||
"success_action_redirect": descFields.RedirectToURLOnSuccess,
|
|
||||||
}
|
|
||||||
for key, value := range descFields.Metadata {
|
|
||||||
conds = append(conds, &singleValueCondition{key, value})
|
|
||||||
policyFields[key] = value
|
|
||||||
}
|
|
||||||
|
|
||||||
// Following from the order expected by the conformance test cases,
|
|
||||||
// hence manually inserting these fields in a specific order.
|
|
||||||
conds = append(conds,
|
|
||||||
&singleValueCondition{"bucket", bucket},
|
|
||||||
&singleValueCondition{"key", object},
|
|
||||||
&singleValueCondition{"x-goog-date", now.Format(iso8601)},
|
|
||||||
&singleValueCondition{
|
|
||||||
name: "x-goog-credential",
|
|
||||||
value: opts.GoogleAccessID + "/" + YYYYMMDD + "/auto/storage/goog4_request",
|
|
||||||
},
|
|
||||||
&singleValueCondition{"x-goog-algorithm", "GOOG4-RSA-SHA256"},
|
|
||||||
)
|
|
||||||
|
|
||||||
nonEmptyConds := make([]PostPolicyV4Condition, 0, len(opts.Conditions))
|
|
||||||
for _, cond := range conds {
|
|
||||||
if cond == nil || !cond.isEmpty() {
|
|
||||||
nonEmptyConds = append(nonEmptyConds, cond)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
condsAsJSON, err := json.Marshal(map[string]interface{}{
|
|
||||||
"conditions": nonEmptyConds,
|
|
||||||
"expiration": opts.Expires.Format(time.RFC3339),
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("storage: PostPolicyV4 JSON serialization failed: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
b64Policy := base64.StdEncoding.EncodeToString(condsAsJSON)
|
|
||||||
var signature []byte
|
|
||||||
var signErr error
|
|
||||||
|
|
||||||
if opts.shouldHashSignBytes {
|
|
||||||
// SignBytes expects hashed bytes as input instead of raw bytes, so we hash them
|
|
||||||
shaSum := sha256.Sum256([]byte(b64Policy))
|
|
||||||
signature, signErr = signingFn(shaSum[:])
|
|
||||||
} else {
|
|
||||||
signature, signErr = signingFn([]byte(b64Policy))
|
|
||||||
}
|
|
||||||
if signErr != nil {
|
|
||||||
return nil, signErr
|
|
||||||
}
|
|
||||||
|
|
||||||
policyFields["policy"] = b64Policy
|
|
||||||
policyFields["x-goog-signature"] = fmt.Sprintf("%x", signature)
|
|
||||||
|
|
||||||
// Construct the URL.
|
|
||||||
scheme := "https"
|
|
||||||
if opts.Insecure {
|
|
||||||
scheme = "http"
|
|
||||||
}
|
|
||||||
path := opts.Style.path(bucket, "") + "/"
|
|
||||||
u := &url.URL{
|
|
||||||
Path: path,
|
|
||||||
RawPath: pathEncodeV4(path),
|
|
||||||
Host: opts.Style.host(bucket),
|
|
||||||
Scheme: scheme,
|
|
||||||
}
|
|
||||||
|
|
||||||
if descFields.StatusCodeOnSuccess > 0 {
|
|
||||||
policyFields["success_action_status"] = fmt.Sprintf("%d", descFields.StatusCodeOnSuccess)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Clear out fields with blanks values.
|
|
||||||
for key, value := range policyFields {
|
|
||||||
if value == "" {
|
|
||||||
delete(policyFields, key)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
pp4 := &PostPolicyV4{
|
|
||||||
Fields: policyFields,
|
|
||||||
URL: u.String(),
|
|
||||||
}
|
|
||||||
return pp4, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// validatePostPolicyV4Options checks that:
|
|
||||||
// * GoogleAccessID is set
|
|
||||||
// * either PrivateKey or SignRawBytes/SignBytes is set, but not both
|
|
||||||
// * the deadline set in Expires is not in the past
|
|
||||||
// * if Style is not set, it'll use PathStyle
|
|
||||||
// * sets shouldHashSignBytes to true if opts.SignBytes should be used
|
|
||||||
func validatePostPolicyV4Options(opts *PostPolicyV4Options, now time.Time) error {
|
|
||||||
if opts == nil || opts.GoogleAccessID == "" {
|
|
||||||
return errors.New("storage: missing required GoogleAccessID")
|
|
||||||
}
|
|
||||||
if privBlank, signBlank := len(opts.PrivateKey) == 0, opts.SignBytes == nil && opts.SignRawBytes == nil; privBlank == signBlank {
|
|
||||||
return errors.New("storage: exactly one of PrivateKey or SignRawBytes must be set")
|
|
||||||
}
|
|
||||||
if opts.Expires.Before(now) {
|
|
||||||
return errors.New("storage: expecting Expires to be in the future")
|
|
||||||
}
|
|
||||||
if opts.Style == nil {
|
|
||||||
opts.Style = PathStyle()
|
|
||||||
}
|
|
||||||
if opts.SignRawBytes == nil && opts.SignBytes != nil {
|
|
||||||
opts.shouldHashSignBytes = true
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// validateMetadata ensures that all keys passed in have a prefix of "x-goog-meta-",
|
|
||||||
// otherwise it will return an error.
|
|
||||||
func validateMetadata(hdrs map[string]string) (err error) {
|
|
||||||
if len(hdrs) == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
badKeys := make([]string, 0, len(hdrs))
|
|
||||||
for key := range hdrs {
|
|
||||||
if !strings.HasPrefix(key, "x-goog-meta-") {
|
|
||||||
badKeys = append(badKeys, key)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if len(badKeys) != 0 {
|
|
||||||
err = errors.New("storage: expected metadata to begin with x-goog-meta-, got " + strings.Join(badKeys, ", "))
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
266
vendor/cloud.google.com/go/storage/reader.go
generated
vendored
266
vendor/cloud.google.com/go/storage/reader.go
generated
vendored
|
|
@ -1,266 +0,0 @@
|
||||||
// Copyright 2016 Google LLC
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package storage
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"hash/crc32"
|
|
||||||
"io"
|
|
||||||
"io/ioutil"
|
|
||||||
"net/http"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"cloud.google.com/go/internal/trace"
|
|
||||||
)
|
|
||||||
|
|
||||||
var crc32cTable = crc32.MakeTable(crc32.Castagnoli)
|
|
||||||
|
|
||||||
// ReaderObjectAttrs are attributes about the object being read. These are populated
|
|
||||||
// during the New call. This struct only holds a subset of object attributes: to
|
|
||||||
// get the full set of attributes, use ObjectHandle.Attrs.
|
|
||||||
//
|
|
||||||
// Each field is read-only.
|
|
||||||
type ReaderObjectAttrs struct {
|
|
||||||
// Size is the length of the object's content.
|
|
||||||
Size int64
|
|
||||||
|
|
||||||
// StartOffset is the byte offset within the object
|
|
||||||
// from which reading begins.
|
|
||||||
// This value is only non-zero for range requests.
|
|
||||||
StartOffset int64
|
|
||||||
|
|
||||||
// ContentType is the MIME type of the object's content.
|
|
||||||
ContentType string
|
|
||||||
|
|
||||||
// ContentEncoding is the encoding of the object's content.
|
|
||||||
ContentEncoding string
|
|
||||||
|
|
||||||
// CacheControl specifies whether and for how long browser and Internet
|
|
||||||
// caches are allowed to cache your objects.
|
|
||||||
CacheControl string
|
|
||||||
|
|
||||||
// LastModified is the time that the object was last modified.
|
|
||||||
LastModified time.Time
|
|
||||||
|
|
||||||
// Generation is the generation number of the object's content.
|
|
||||||
Generation int64
|
|
||||||
|
|
||||||
// Metageneration is the version of the metadata for this object at
|
|
||||||
// this generation. This field is used for preconditions and for
|
|
||||||
// detecting changes in metadata. A metageneration number is only
|
|
||||||
// meaningful in the context of a particular generation of a
|
|
||||||
// particular object.
|
|
||||||
Metageneration int64
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewReader creates a new Reader to read the contents of the
|
|
||||||
// object.
|
|
||||||
// ErrObjectNotExist will be returned if the object is not found.
|
|
||||||
//
|
|
||||||
// The caller must call Close on the returned Reader when done reading.
|
|
||||||
func (o *ObjectHandle) NewReader(ctx context.Context) (*Reader, error) {
|
|
||||||
return o.NewRangeReader(ctx, 0, -1)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewRangeReader reads part of an object, reading at most length bytes
|
|
||||||
// starting at the given offset. If length is negative, the object is read
|
|
||||||
// until the end. If offset is negative, the object is read abs(offset) bytes
|
|
||||||
// from the end, and length must also be negative to indicate all remaining
|
|
||||||
// bytes will be read.
|
|
||||||
//
|
|
||||||
// If the object's metadata property "Content-Encoding" is set to "gzip" or satisfies
|
|
||||||
// decompressive transcoding per https://cloud.google.com/storage/docs/transcoding
|
|
||||||
// that file will be served back whole, regardless of the requested range as
|
|
||||||
// Google Cloud Storage dictates.
|
|
||||||
func (o *ObjectHandle) NewRangeReader(ctx context.Context, offset, length int64) (r *Reader, err error) {
|
|
||||||
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Object.NewRangeReader")
|
|
||||||
defer func() { trace.EndSpan(ctx, err) }()
|
|
||||||
|
|
||||||
if err := o.validate(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if offset < 0 && length >= 0 {
|
|
||||||
return nil, fmt.Errorf("storage: invalid offset %d < 0 requires negative length", offset)
|
|
||||||
}
|
|
||||||
if o.conds != nil {
|
|
||||||
if err := o.conds.validate("NewRangeReader"); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
opts := makeStorageOpts(true, o.retry, o.userProject)
|
|
||||||
|
|
||||||
params := &newRangeReaderParams{
|
|
||||||
bucket: o.bucket,
|
|
||||||
object: o.object,
|
|
||||||
gen: o.gen,
|
|
||||||
offset: offset,
|
|
||||||
length: length,
|
|
||||||
encryptionKey: o.encryptionKey,
|
|
||||||
conds: o.conds,
|
|
||||||
readCompressed: o.readCompressed,
|
|
||||||
}
|
|
||||||
|
|
||||||
r, err = o.c.tc.NewRangeReader(ctx, params, opts...)
|
|
||||||
|
|
||||||
return r, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// decompressiveTranscoding returns true if the request was served decompressed
|
|
||||||
// and different than its original storage form. This happens when the "Content-Encoding"
|
|
||||||
// header is "gzip".
|
|
||||||
// See:
|
|
||||||
// - https://cloud.google.com/storage/docs/transcoding#transcoding_and_gzip
|
|
||||||
// - https://github.com/googleapis/google-cloud-go/issues/1800
|
|
||||||
func decompressiveTranscoding(res *http.Response) bool {
|
|
||||||
// Decompressive Transcoding.
|
|
||||||
return res.Header.Get("Content-Encoding") == "gzip" ||
|
|
||||||
res.Header.Get("X-Goog-Stored-Content-Encoding") == "gzip"
|
|
||||||
}
|
|
||||||
|
|
||||||
func uncompressedByServer(res *http.Response) bool {
|
|
||||||
// If the data is stored as gzip but is not encoded as gzip, then it
|
|
||||||
// was uncompressed by the server.
|
|
||||||
return res.Header.Get("X-Goog-Stored-Content-Encoding") == "gzip" &&
|
|
||||||
res.Header.Get("Content-Encoding") != "gzip"
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseCRC32c(res *http.Response) (uint32, bool) {
|
|
||||||
const prefix = "crc32c="
|
|
||||||
for _, spec := range res.Header["X-Goog-Hash"] {
|
|
||||||
if strings.HasPrefix(spec, prefix) {
|
|
||||||
c, err := decodeUint32(spec[len(prefix):])
|
|
||||||
if err == nil {
|
|
||||||
return c, true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return 0, false
|
|
||||||
}
|
|
||||||
|
|
||||||
// setConditionsHeaders sets precondition request headers for downloads
|
|
||||||
// using the XML API. It assumes that the conditions have been validated.
|
|
||||||
func setConditionsHeaders(headers http.Header, conds *Conditions) error {
|
|
||||||
if conds == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if conds.MetagenerationMatch != 0 {
|
|
||||||
headers.Set("x-goog-if-metageneration-match", fmt.Sprint(conds.MetagenerationMatch))
|
|
||||||
}
|
|
||||||
switch {
|
|
||||||
case conds.GenerationMatch != 0:
|
|
||||||
headers.Set("x-goog-if-generation-match", fmt.Sprint(conds.GenerationMatch))
|
|
||||||
case conds.DoesNotExist:
|
|
||||||
headers.Set("x-goog-if-generation-match", "0")
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wrap a request to look similar to an apiary library request, in order to
|
|
||||||
// be used by run().
|
|
||||||
type readerRequestWrapper struct {
|
|
||||||
req *http.Request
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *readerRequestWrapper) Header() http.Header {
|
|
||||||
return w.req.Header
|
|
||||||
}
|
|
||||||
|
|
||||||
var emptyBody = ioutil.NopCloser(strings.NewReader(""))
|
|
||||||
|
|
||||||
// Reader reads a Cloud Storage object.
|
|
||||||
// It implements io.Reader.
|
|
||||||
//
|
|
||||||
// Typically, a Reader computes the CRC of the downloaded content and compares it to
|
|
||||||
// the stored CRC, returning an error from Read if there is a mismatch. This integrity check
|
|
||||||
// is skipped if transcoding occurs. See https://cloud.google.com/storage/docs/transcoding.
|
|
||||||
type Reader struct {
|
|
||||||
Attrs ReaderObjectAttrs
|
|
||||||
seen, remain, size int64
|
|
||||||
checkCRC bool // should we check the CRC?
|
|
||||||
wantCRC uint32 // the CRC32c value the server sent in the header
|
|
||||||
gotCRC uint32 // running crc
|
|
||||||
|
|
||||||
reader io.ReadCloser
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close closes the Reader. It must be called when done reading.
|
|
||||||
func (r *Reader) Close() error {
|
|
||||||
return r.reader.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *Reader) Read(p []byte) (int, error) {
|
|
||||||
n, err := r.reader.Read(p)
|
|
||||||
if r.remain != -1 {
|
|
||||||
r.remain -= int64(n)
|
|
||||||
}
|
|
||||||
if r.checkCRC {
|
|
||||||
r.gotCRC = crc32.Update(r.gotCRC, crc32cTable, p[:n])
|
|
||||||
// Check CRC here. It would be natural to check it in Close, but
|
|
||||||
// everybody defers Close on the assumption that it doesn't return
|
|
||||||
// anything worth looking at.
|
|
||||||
if err == io.EOF {
|
|
||||||
if r.gotCRC != r.wantCRC {
|
|
||||||
return n, fmt.Errorf("storage: bad CRC on read: got %d, want %d",
|
|
||||||
r.gotCRC, r.wantCRC)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Size returns the size of the object in bytes.
|
|
||||||
// The returned value is always the same and is not affected by
|
|
||||||
// calls to Read or Close.
|
|
||||||
//
|
|
||||||
// Deprecated: use Reader.Attrs.Size.
|
|
||||||
func (r *Reader) Size() int64 {
|
|
||||||
return r.Attrs.Size
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remain returns the number of bytes left to read, or -1 if unknown.
|
|
||||||
func (r *Reader) Remain() int64 {
|
|
||||||
return r.remain
|
|
||||||
}
|
|
||||||
|
|
||||||
// ContentType returns the content type of the object.
|
|
||||||
//
|
|
||||||
// Deprecated: use Reader.Attrs.ContentType.
|
|
||||||
func (r *Reader) ContentType() string {
|
|
||||||
return r.Attrs.ContentType
|
|
||||||
}
|
|
||||||
|
|
||||||
// ContentEncoding returns the content encoding of the object.
|
|
||||||
//
|
|
||||||
// Deprecated: use Reader.Attrs.ContentEncoding.
|
|
||||||
func (r *Reader) ContentEncoding() string {
|
|
||||||
return r.Attrs.ContentEncoding
|
|
||||||
}
|
|
||||||
|
|
||||||
// CacheControl returns the cache control of the object.
|
|
||||||
//
|
|
||||||
// Deprecated: use Reader.Attrs.CacheControl.
|
|
||||||
func (r *Reader) CacheControl() string {
|
|
||||||
return r.Attrs.CacheControl
|
|
||||||
}
|
|
||||||
|
|
||||||
// LastModified returns the value of the Last-Modified header.
|
|
||||||
//
|
|
||||||
// Deprecated: use Reader.Attrs.LastModified.
|
|
||||||
func (r *Reader) LastModified() (time.Time, error) {
|
|
||||||
return r.Attrs.LastModified, nil
|
|
||||||
}
|
|
||||||
12
vendor/cloud.google.com/go/storage/release-please-config.json
generated
vendored
12
vendor/cloud.google.com/go/storage/release-please-config.json
generated
vendored
|
|
@ -1,12 +0,0 @@
|
||||||
{
|
|
||||||
"release-type": "go-yoshi",
|
|
||||||
"separate-pull-requests": true,
|
|
||||||
"include-component-in-tag": true,
|
|
||||||
"tag-separator": "/",
|
|
||||||
"packages": {
|
|
||||||
"storage": {
|
|
||||||
"component": "storage"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"plugins": ["sentence-case"]
|
|
||||||
}
|
|
||||||
2088
vendor/cloud.google.com/go/storage/storage.go
generated
vendored
2088
vendor/cloud.google.com/go/storage/storage.go
generated
vendored
File diff suppressed because it is too large
Load diff
30067
vendor/cloud.google.com/go/storage/storage.replay
generated
vendored
30067
vendor/cloud.google.com/go/storage/storage.replay
generated
vendored
File diff suppressed because one or more lines are too long
270
vendor/cloud.google.com/go/storage/writer.go
generated
vendored
270
vendor/cloud.google.com/go/storage/writer.go
generated
vendored
|
|
@ -1,270 +0,0 @@
|
||||||
// Copyright 2014 Google LLC
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package storage
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
"unicode/utf8"
|
|
||||||
)
|
|
||||||
|
|
||||||
// A Writer writes a Cloud Storage object.
|
|
||||||
type Writer struct {
|
|
||||||
// ObjectAttrs are optional attributes to set on the object. Any attributes
|
|
||||||
// must be initialized before the first Write call. Nil or zero-valued
|
|
||||||
// attributes are ignored.
|
|
||||||
ObjectAttrs
|
|
||||||
|
|
||||||
// SendCRC32C specifies whether to transmit a CRC32C field. It should be set
|
|
||||||
// to true in addition to setting the Writer's CRC32C field, because zero
|
|
||||||
// is a valid CRC and normally a zero would not be transmitted.
|
|
||||||
// If a CRC32C is sent, and the data written does not match the checksum,
|
|
||||||
// the write will be rejected.
|
|
||||||
//
|
|
||||||
// Note: SendCRC32C must be set to true BEFORE the first call to
|
|
||||||
// Writer.Write() in order to send the checksum. If it is set after that
|
|
||||||
// point, the checksum will be ignored.
|
|
||||||
SendCRC32C bool
|
|
||||||
|
|
||||||
// ChunkSize controls the maximum number of bytes of the object that the
|
|
||||||
// Writer will attempt to send to the server in a single request. Objects
|
|
||||||
// smaller than the size will be sent in a single request, while larger
|
|
||||||
// objects will be split over multiple requests. The value will be rounded up
|
|
||||||
// to the nearest multiple of 256K. The default ChunkSize is 16MiB.
|
|
||||||
//
|
|
||||||
// Each Writer will internally allocate a buffer of size ChunkSize. This is
|
|
||||||
// used to buffer input data and allow for the input to be sent again if a
|
|
||||||
// request must be retried.
|
|
||||||
//
|
|
||||||
// If you upload small objects (< 16MiB), you should set ChunkSize
|
|
||||||
// to a value slightly larger than the objects' sizes to avoid memory bloat.
|
|
||||||
// This is especially important if you are uploading many small objects
|
|
||||||
// concurrently. See
|
|
||||||
// https://cloud.google.com/storage/docs/json_api/v1/how-tos/upload#size
|
|
||||||
// for more information about performance trade-offs related to ChunkSize.
|
|
||||||
//
|
|
||||||
// If ChunkSize is set to zero, chunking will be disabled and the object will
|
|
||||||
// be uploaded in a single request without the use of a buffer. This will
|
|
||||||
// further reduce memory used during uploads, but will also prevent the writer
|
|
||||||
// from retrying in case of a transient error from the server or resuming an
|
|
||||||
// upload that fails midway through, since the buffer is required in order to
|
|
||||||
// retry the failed request.
|
|
||||||
//
|
|
||||||
// ChunkSize must be set before the first Write call.
|
|
||||||
ChunkSize int
|
|
||||||
|
|
||||||
// ChunkRetryDeadline sets a per-chunk retry deadline for multi-chunk
|
|
||||||
// resumable uploads.
|
|
||||||
//
|
|
||||||
// For uploads of larger files, the Writer will attempt to retry if the
|
|
||||||
// request to upload a particular chunk fails with a transient error.
|
|
||||||
// If a single chunk has been attempting to upload for longer than this
|
|
||||||
// deadline and the request fails, it will no longer be retried, and the error
|
|
||||||
// will be returned to the caller. This is only applicable for files which are
|
|
||||||
// large enough to require a multi-chunk resumable upload. The default value
|
|
||||||
// is 32s. Users may want to pick a longer deadline if they are using larger
|
|
||||||
// values for ChunkSize or if they expect to have a slow or unreliable
|
|
||||||
// internet connection.
|
|
||||||
//
|
|
||||||
// To set a deadline on the entire upload, use context timeout or
|
|
||||||
// cancellation.
|
|
||||||
ChunkRetryDeadline time.Duration
|
|
||||||
|
|
||||||
// ProgressFunc can be used to monitor the progress of a large write.
|
|
||||||
// operation. If ProgressFunc is not nil and writing requires multiple
|
|
||||||
// calls to the underlying service (see
|
|
||||||
// https://cloud.google.com/storage/docs/json_api/v1/how-tos/resumable-upload),
|
|
||||||
// then ProgressFunc will be invoked after each call with the number of bytes of
|
|
||||||
// content copied so far.
|
|
||||||
//
|
|
||||||
// ProgressFunc should return quickly without blocking.
|
|
||||||
ProgressFunc func(int64)
|
|
||||||
|
|
||||||
ctx context.Context
|
|
||||||
o *ObjectHandle
|
|
||||||
|
|
||||||
opened bool
|
|
||||||
pw *io.PipeWriter
|
|
||||||
|
|
||||||
donec chan struct{} // closed after err and obj are set.
|
|
||||||
obj *ObjectAttrs
|
|
||||||
|
|
||||||
mu sync.Mutex
|
|
||||||
err error
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write appends to w. It implements the io.Writer interface.
|
|
||||||
//
|
|
||||||
// Since writes happen asynchronously, Write may return a nil
|
|
||||||
// error even though the write failed (or will fail). Always
|
|
||||||
// use the error returned from Writer.Close to determine if
|
|
||||||
// the upload was successful.
|
|
||||||
//
|
|
||||||
// Writes will be retried on transient errors from the server, unless
|
|
||||||
// Writer.ChunkSize has been set to zero.
|
|
||||||
func (w *Writer) Write(p []byte) (n int, err error) {
|
|
||||||
w.mu.Lock()
|
|
||||||
werr := w.err
|
|
||||||
w.mu.Unlock()
|
|
||||||
if werr != nil {
|
|
||||||
return 0, werr
|
|
||||||
}
|
|
||||||
if !w.opened {
|
|
||||||
if err := w.openWriter(); err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
n, err = w.pw.Write(p)
|
|
||||||
if err != nil {
|
|
||||||
w.mu.Lock()
|
|
||||||
werr := w.err
|
|
||||||
w.mu.Unlock()
|
|
||||||
// Preserve existing functionality that when context is canceled, Write will return
|
|
||||||
// context.Canceled instead of "io: read/write on closed pipe". This hides the
|
|
||||||
// pipe implementation detail from users and makes Write seem as though it's an RPC.
|
|
||||||
if errors.Is(werr, context.Canceled) || errors.Is(werr, context.DeadlineExceeded) {
|
|
||||||
return n, werr
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close completes the write operation and flushes any buffered data.
|
|
||||||
// If Close doesn't return an error, metadata about the written object
|
|
||||||
// can be retrieved by calling Attrs.
|
|
||||||
func (w *Writer) Close() error {
|
|
||||||
if !w.opened {
|
|
||||||
if err := w.openWriter(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Closing either the read or write causes the entire pipe to close.
|
|
||||||
if err := w.pw.Close(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
<-w.donec
|
|
||||||
w.mu.Lock()
|
|
||||||
defer w.mu.Unlock()
|
|
||||||
return w.err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *Writer) openWriter() (err error) {
|
|
||||||
if err := w.validateWriteAttrs(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if w.o.gen != defaultGen {
|
|
||||||
return fmt.Errorf("storage: generation not supported on Writer, got %v", w.o.gen)
|
|
||||||
}
|
|
||||||
|
|
||||||
isIdempotent := w.o.conds != nil && (w.o.conds.GenerationMatch >= 0 || w.o.conds.DoesNotExist == true)
|
|
||||||
opts := makeStorageOpts(isIdempotent, w.o.retry, w.o.userProject)
|
|
||||||
go w.monitorCancel()
|
|
||||||
params := &openWriterParams{
|
|
||||||
ctx: w.ctx,
|
|
||||||
chunkSize: w.ChunkSize,
|
|
||||||
chunkRetryDeadline: w.ChunkRetryDeadline,
|
|
||||||
bucket: w.o.bucket,
|
|
||||||
attrs: &w.ObjectAttrs,
|
|
||||||
conds: w.o.conds,
|
|
||||||
encryptionKey: w.o.encryptionKey,
|
|
||||||
sendCRC32C: w.SendCRC32C,
|
|
||||||
donec: w.donec,
|
|
||||||
setError: w.error,
|
|
||||||
progress: w.progress,
|
|
||||||
setObj: func(o *ObjectAttrs) { w.obj = o },
|
|
||||||
}
|
|
||||||
w.pw, err = w.o.c.tc.OpenWriter(params, opts...)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
w.opened = true
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// monitorCancel is intended to be used as a background goroutine. It monitors the
|
|
||||||
// context, and when it observes that the context has been canceled, it manually
|
|
||||||
// closes things that do not take a context.
|
|
||||||
func (w *Writer) monitorCancel() {
|
|
||||||
select {
|
|
||||||
case <-w.ctx.Done():
|
|
||||||
w.mu.Lock()
|
|
||||||
werr := w.ctx.Err()
|
|
||||||
w.err = werr
|
|
||||||
w.mu.Unlock()
|
|
||||||
|
|
||||||
// Closing either the read or write causes the entire pipe to close.
|
|
||||||
w.CloseWithError(werr)
|
|
||||||
case <-w.donec:
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// CloseWithError aborts the write operation with the provided error.
|
|
||||||
// CloseWithError always returns nil.
|
|
||||||
//
|
|
||||||
// Deprecated: cancel the context passed to NewWriter instead.
|
|
||||||
func (w *Writer) CloseWithError(err error) error {
|
|
||||||
if !w.opened {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return w.pw.CloseWithError(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Attrs returns metadata about a successfully-written object.
|
|
||||||
// It's only valid to call it after Close returns nil.
|
|
||||||
func (w *Writer) Attrs() *ObjectAttrs {
|
|
||||||
return w.obj
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *Writer) validateWriteAttrs() error {
|
|
||||||
attrs := w.ObjectAttrs
|
|
||||||
// Check the developer didn't change the object Name (this is unfortunate, but
|
|
||||||
// we don't want to store an object under the wrong name).
|
|
||||||
if attrs.Name != w.o.object {
|
|
||||||
return fmt.Errorf("storage: Writer.Name %q does not match object name %q", attrs.Name, w.o.object)
|
|
||||||
}
|
|
||||||
if !utf8.ValidString(attrs.Name) {
|
|
||||||
return fmt.Errorf("storage: object name %q is not valid UTF-8", attrs.Name)
|
|
||||||
}
|
|
||||||
if attrs.KMSKeyName != "" && w.o.encryptionKey != nil {
|
|
||||||
return errors.New("storage: cannot use KMSKeyName with a customer-supplied encryption key")
|
|
||||||
}
|
|
||||||
if w.ChunkSize < 0 {
|
|
||||||
return errors.New("storage: Writer.ChunkSize must be non-negative")
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// progress is a convenience wrapper that reports write progress to the Writer
|
|
||||||
// ProgressFunc if it is set and progress is non-zero.
|
|
||||||
func (w *Writer) progress(p int64) {
|
|
||||||
if w.ProgressFunc != nil && p != 0 {
|
|
||||||
w.ProgressFunc(p)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// error acquires the Writer's lock, sets the Writer's err to the given error,
|
|
||||||
// then relinquishes the lock.
|
|
||||||
func (w *Writer) error(err error) {
|
|
||||||
w.mu.Lock()
|
|
||||||
w.err = err
|
|
||||||
w.mu.Unlock()
|
|
||||||
}
|
|
||||||
21
vendor/github.com/Azure/azure-pipeline-go/LICENSE
generated
vendored
21
vendor/github.com/Azure/azure-pipeline-go/LICENSE
generated
vendored
|
|
@ -1,21 +0,0 @@
|
||||||
MIT License
|
|
||||||
|
|
||||||
Copyright (c) Microsoft Corporation. All rights reserved.
|
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
|
||||||
in the Software without restriction, including without limitation the rights
|
|
||||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
||||||
copies of the Software, and to permit persons to whom the Software is
|
|
||||||
furnished to do so, subject to the following conditions:
|
|
||||||
|
|
||||||
The above copyright notice and this permission notice shall be included in all
|
|
||||||
copies or substantial portions of the Software.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
||||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
||||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
||||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
||||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
||||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
||||||
SOFTWARE
|
|
||||||
284
vendor/github.com/Azure/azure-pipeline-go/pipeline/core.go
generated
vendored
284
vendor/github.com/Azure/azure-pipeline-go/pipeline/core.go
generated
vendored
|
|
@ -1,284 +0,0 @@
|
||||||
package pipeline
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"github.com/mattn/go-ieproxy"
|
|
||||||
"net"
|
|
||||||
"net/http"
|
|
||||||
"os"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// The Factory interface represents an object that can create its Policy object. Each HTTP request sent
|
|
||||||
// requires that this Factory create a new instance of its Policy object.
|
|
||||||
type Factory interface {
|
|
||||||
New(next Policy, po *PolicyOptions) Policy
|
|
||||||
}
|
|
||||||
|
|
||||||
// FactoryFunc is an adapter that allows the use of an ordinary function as a Factory interface.
|
|
||||||
type FactoryFunc func(next Policy, po *PolicyOptions) PolicyFunc
|
|
||||||
|
|
||||||
// New calls f(next,po).
|
|
||||||
func (f FactoryFunc) New(next Policy, po *PolicyOptions) Policy {
|
|
||||||
return f(next, po)
|
|
||||||
}
|
|
||||||
|
|
||||||
// The Policy interface represents a mutable Policy object created by a Factory. The object can mutate/process
|
|
||||||
// the HTTP request and then forward it on to the next Policy object in the linked-list. The returned
|
|
||||||
// Response goes backward through the linked-list for additional processing.
|
|
||||||
// NOTE: Request is passed by value so changes do not change the caller's version of
|
|
||||||
// the request. However, Request has some fields that reference mutable objects (not strings).
|
|
||||||
// These references are copied; a deep copy is not performed. Specifically, this means that
|
|
||||||
// you should avoid modifying the objects referred to by these fields: URL, Header, Body,
|
|
||||||
// GetBody, TransferEncoding, Form, MultipartForm, Trailer, TLS, Cancel, and Response.
|
|
||||||
type Policy interface {
|
|
||||||
Do(ctx context.Context, request Request) (Response, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
// PolicyFunc is an adapter that allows the use of an ordinary function as a Policy interface.
|
|
||||||
type PolicyFunc func(ctx context.Context, request Request) (Response, error)
|
|
||||||
|
|
||||||
// Do calls f(ctx, request).
|
|
||||||
func (f PolicyFunc) Do(ctx context.Context, request Request) (Response, error) {
|
|
||||||
return f(ctx, request)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Options configures a Pipeline's behavior.
|
|
||||||
type Options struct {
|
|
||||||
HTTPSender Factory // If sender is nil, then the pipeline's default client is used to send the HTTP requests.
|
|
||||||
Log LogOptions
|
|
||||||
}
|
|
||||||
|
|
||||||
// LogLevel tells a logger the minimum level to log. When code reports a log entry,
|
|
||||||
// the LogLevel indicates the level of the log entry. The logger only records entries
|
|
||||||
// whose level is at least the level it was told to log. See the Log* constants.
|
|
||||||
// For example, if a logger is configured with LogError, then LogError, LogPanic,
|
|
||||||
// and LogFatal entries will be logged; lower level entries are ignored.
|
|
||||||
type LogLevel uint32
|
|
||||||
|
|
||||||
const (
|
|
||||||
// LogNone tells a logger not to log any entries passed to it.
|
|
||||||
LogNone LogLevel = iota
|
|
||||||
|
|
||||||
// LogFatal tells a logger to log all LogFatal entries passed to it.
|
|
||||||
LogFatal
|
|
||||||
|
|
||||||
// LogPanic tells a logger to log all LogPanic and LogFatal entries passed to it.
|
|
||||||
LogPanic
|
|
||||||
|
|
||||||
// LogError tells a logger to log all LogError, LogPanic and LogFatal entries passed to it.
|
|
||||||
LogError
|
|
||||||
|
|
||||||
// LogWarning tells a logger to log all LogWarning, LogError, LogPanic and LogFatal entries passed to it.
|
|
||||||
LogWarning
|
|
||||||
|
|
||||||
// LogInfo tells a logger to log all LogInfo, LogWarning, LogError, LogPanic and LogFatal entries passed to it.
|
|
||||||
LogInfo
|
|
||||||
|
|
||||||
// LogDebug tells a logger to log all LogDebug, LogInfo, LogWarning, LogError, LogPanic and LogFatal entries passed to it.
|
|
||||||
LogDebug
|
|
||||||
)
|
|
||||||
|
|
||||||
// LogOptions configures the pipeline's logging mechanism & level filtering.
|
|
||||||
type LogOptions struct {
|
|
||||||
Log func(level LogLevel, message string)
|
|
||||||
|
|
||||||
// ShouldLog is called periodically allowing you to return whether the specified LogLevel should be logged or not.
|
|
||||||
// An application can return different values over the its lifetime; this allows the application to dynamically
|
|
||||||
// alter what is logged. NOTE: This method can be called by multiple goroutines simultaneously so make sure
|
|
||||||
// you implement it in a goroutine-safe way. If nil, nothing is logged (the equivalent of returning LogNone).
|
|
||||||
// Usually, the function will be implemented simply like this: return level <= LogWarning
|
|
||||||
ShouldLog func(level LogLevel) bool
|
|
||||||
}
|
|
||||||
|
|
||||||
type pipeline struct {
|
|
||||||
factories []Factory
|
|
||||||
options Options
|
|
||||||
}
|
|
||||||
|
|
||||||
// The Pipeline interface represents an ordered list of Factory objects and an object implementing the HTTPSender interface.
|
|
||||||
// You construct a Pipeline by calling the pipeline.NewPipeline function. To send an HTTP request, call pipeline.NewRequest
|
|
||||||
// and then call Pipeline's Do method passing a context, the request, and a method-specific Factory (or nil). Passing a
|
|
||||||
// method-specific Factory allows this one call to Do to inject a Policy into the linked-list. The policy is injected where
|
|
||||||
// the MethodFactoryMarker (see the pipeline.MethodFactoryMarker function) is in the slice of Factory objects.
|
|
||||||
//
|
|
||||||
// When Do is called, the Pipeline object asks each Factory object to construct its Policy object and adds each Policy to a linked-list.
|
|
||||||
// THen, Do sends the Context and Request through all the Policy objects. The final Policy object sends the request over the network
|
|
||||||
// (via the HTTPSender object passed to NewPipeline) and the response is returned backwards through all the Policy objects.
|
|
||||||
// Since Pipeline and Factory objects are goroutine-safe, you typically create 1 Pipeline object and reuse it to make many HTTP requests.
|
|
||||||
type Pipeline interface {
|
|
||||||
Do(ctx context.Context, methodFactory Factory, request Request) (Response, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewPipeline creates a new goroutine-safe Pipeline object from the slice of Factory objects and the specified options.
|
|
||||||
func NewPipeline(factories []Factory, o Options) Pipeline {
|
|
||||||
if o.HTTPSender == nil {
|
|
||||||
o.HTTPSender = newDefaultHTTPClientFactory()
|
|
||||||
}
|
|
||||||
if o.Log.Log == nil {
|
|
||||||
o.Log.Log = func(LogLevel, string) {} // No-op logger
|
|
||||||
}
|
|
||||||
return &pipeline{factories: factories, options: o}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Do is called for each and every HTTP request. It tells each Factory to create its own (mutable) Policy object
|
|
||||||
// replacing a MethodFactoryMarker factory (if it exists) with the methodFactory passed in. Then, the Context and Request
|
|
||||||
// are sent through the pipeline of Policy objects (which can transform the Request's URL/query parameters/headers) and
|
|
||||||
// ultimately sends the transformed HTTP request over the network.
|
|
||||||
func (p *pipeline) Do(ctx context.Context, methodFactory Factory, request Request) (Response, error) {
|
|
||||||
response, err := p.newPolicies(methodFactory).Do(ctx, request)
|
|
||||||
request.close()
|
|
||||||
return response, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *pipeline) newPolicies(methodFactory Factory) Policy {
|
|
||||||
// The last Policy is the one that actually sends the request over the wire and gets the response.
|
|
||||||
// It is overridable via the Options' HTTPSender field.
|
|
||||||
po := &PolicyOptions{pipeline: p} // One object shared by all policy objects
|
|
||||||
next := p.options.HTTPSender.New(nil, po)
|
|
||||||
|
|
||||||
// Walk over the slice of Factory objects in reverse (from wire to API)
|
|
||||||
markers := 0
|
|
||||||
for i := len(p.factories) - 1; i >= 0; i-- {
|
|
||||||
factory := p.factories[i]
|
|
||||||
if _, ok := factory.(methodFactoryMarker); ok {
|
|
||||||
markers++
|
|
||||||
if markers > 1 {
|
|
||||||
panic("MethodFactoryMarker can only appear once in the pipeline")
|
|
||||||
}
|
|
||||||
if methodFactory != nil {
|
|
||||||
// Replace MethodFactoryMarker with passed-in methodFactory
|
|
||||||
next = methodFactory.New(next, po)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// Use the slice's Factory to construct its Policy
|
|
||||||
next = factory.New(next, po)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Each Factory has created its Policy
|
|
||||||
if markers == 0 && methodFactory != nil {
|
|
||||||
panic("Non-nil methodFactory requires MethodFactoryMarker in the pipeline")
|
|
||||||
}
|
|
||||||
return next // Return head of the Policy object linked-list
|
|
||||||
}
|
|
||||||
|
|
||||||
// A PolicyOptions represents optional information that can be used by a node in the
|
|
||||||
// linked-list of Policy objects. A PolicyOptions is passed to the Factory's New method
|
|
||||||
// which passes it (if desired) to the Policy object it creates. Today, the Policy object
|
|
||||||
// uses the options to perform logging. But, in the future, this could be used for more.
|
|
||||||
type PolicyOptions struct {
|
|
||||||
pipeline *pipeline
|
|
||||||
}
|
|
||||||
|
|
||||||
// ShouldLog returns true if the specified log level should be logged.
|
|
||||||
func (po *PolicyOptions) ShouldLog(level LogLevel) bool {
|
|
||||||
if po.pipeline.options.Log.ShouldLog != nil {
|
|
||||||
return po.pipeline.options.Log.ShouldLog(level)
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Log logs a string to the Pipeline's Logger.
|
|
||||||
func (po *PolicyOptions) Log(level LogLevel, msg string) {
|
|
||||||
if !po.ShouldLog(level) {
|
|
||||||
return // Short circuit message formatting if we're not logging it
|
|
||||||
}
|
|
||||||
|
|
||||||
// We are logging it, ensure trailing newline
|
|
||||||
if len(msg) == 0 || msg[len(msg)-1] != '\n' {
|
|
||||||
msg += "\n" // Ensure trailing newline
|
|
||||||
}
|
|
||||||
po.pipeline.options.Log.Log(level, msg)
|
|
||||||
|
|
||||||
// If logger doesn't handle fatal/panic, we'll do it here.
|
|
||||||
if level == LogFatal {
|
|
||||||
os.Exit(1)
|
|
||||||
} else if level == LogPanic {
|
|
||||||
panic(msg)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var pipelineHTTPClient = newDefaultHTTPClient()
|
|
||||||
|
|
||||||
func newDefaultHTTPClient() *http.Client {
|
|
||||||
// We want the Transport to have a large connection pool
|
|
||||||
return &http.Client{
|
|
||||||
Transport: &http.Transport{
|
|
||||||
Proxy: ieproxy.GetProxyFunc(),
|
|
||||||
// We use Dial instead of DialContext as DialContext has been reported to cause slower performance.
|
|
||||||
Dial /*Context*/ : (&net.Dialer{
|
|
||||||
Timeout: 30 * time.Second,
|
|
||||||
KeepAlive: 30 * time.Second,
|
|
||||||
DualStack: true,
|
|
||||||
}).Dial, /*Context*/
|
|
||||||
MaxIdleConns: 0, // No limit
|
|
||||||
MaxIdleConnsPerHost: 100,
|
|
||||||
IdleConnTimeout: 90 * time.Second,
|
|
||||||
TLSHandshakeTimeout: 10 * time.Second,
|
|
||||||
ExpectContinueTimeout: 1 * time.Second,
|
|
||||||
DisableKeepAlives: false,
|
|
||||||
DisableCompression: false,
|
|
||||||
MaxResponseHeaderBytes: 0,
|
|
||||||
//ResponseHeaderTimeout: time.Duration{},
|
|
||||||
//ExpectContinueTimeout: time.Duration{},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// newDefaultHTTPClientFactory creates a DefaultHTTPClientPolicyFactory object that sends HTTP requests to a Go's default http.Client.
|
|
||||||
func newDefaultHTTPClientFactory() Factory {
|
|
||||||
return FactoryFunc(func(next Policy, po *PolicyOptions) PolicyFunc {
|
|
||||||
return func(ctx context.Context, request Request) (Response, error) {
|
|
||||||
r, err := pipelineHTTPClient.Do(request.WithContext(ctx))
|
|
||||||
if err != nil {
|
|
||||||
err = NewError(err, "HTTP request failed")
|
|
||||||
}
|
|
||||||
return NewHTTPResponse(r), err
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
var mfm = methodFactoryMarker{} // Singleton
|
|
||||||
|
|
||||||
// MethodFactoryMarker returns a special marker Factory object. When Pipeline's Do method is called, any
|
|
||||||
// MethodMarkerFactory object is replaced with the specified methodFactory object. If nil is passed fro Do's
|
|
||||||
// methodFactory parameter, then the MethodFactoryMarker is ignored as the linked-list of Policy objects is created.
|
|
||||||
func MethodFactoryMarker() Factory {
|
|
||||||
return mfm
|
|
||||||
}
|
|
||||||
|
|
||||||
type methodFactoryMarker struct {
|
|
||||||
}
|
|
||||||
|
|
||||||
func (methodFactoryMarker) New(next Policy, po *PolicyOptions) Policy {
|
|
||||||
panic("methodFactoryMarker policy should have been replaced with a method policy")
|
|
||||||
}
|
|
||||||
|
|
||||||
// LogSanitizer can be implemented to clean secrets from lines logged by ForceLog
|
|
||||||
// By default no implemetation is provided here, because pipeline may be used in many different
|
|
||||||
// contexts, so the correct implementation is context-dependent
|
|
||||||
type LogSanitizer interface {
|
|
||||||
SanitizeLogMessage(raw string) string
|
|
||||||
}
|
|
||||||
|
|
||||||
var sanitizer LogSanitizer
|
|
||||||
var enableForceLog bool = true
|
|
||||||
|
|
||||||
// SetLogSanitizer can be called to supply a custom LogSanitizer.
|
|
||||||
// There is no threadsafety or locking on the underlying variable,
|
|
||||||
// so call this function just once at startup of your application
|
|
||||||
// (Don't later try to change the sanitizer on the fly).
|
|
||||||
func SetLogSanitizer(s LogSanitizer)(){
|
|
||||||
sanitizer = s
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetForceLogEnabled can be used to disable ForceLog
|
|
||||||
// There is no threadsafety or locking on the underlying variable,
|
|
||||||
// so call this function just once at startup of your application
|
|
||||||
// (Don't later try to change the setting on the fly).
|
|
||||||
func SetForceLogEnabled(enable bool)() {
|
|
||||||
enableForceLog = enable
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
14
vendor/github.com/Azure/azure-pipeline-go/pipeline/defaultlog.go
generated
vendored
14
vendor/github.com/Azure/azure-pipeline-go/pipeline/defaultlog.go
generated
vendored
|
|
@ -1,14 +0,0 @@
|
||||||
package pipeline
|
|
||||||
|
|
||||||
|
|
||||||
// ForceLog should rarely be used. It forceable logs an entry to the
|
|
||||||
// Windows Event Log (on Windows) or to the SysLog (on Linux)
|
|
||||||
func ForceLog(level LogLevel, msg string) {
|
|
||||||
if !enableForceLog {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if sanitizer != nil {
|
|
||||||
msg = sanitizer.SanitizeLogMessage(msg)
|
|
||||||
}
|
|
||||||
forceLog(level, msg)
|
|
||||||
}
|
|
||||||
33
vendor/github.com/Azure/azure-pipeline-go/pipeline/defaultlog_syslog.go
generated
vendored
33
vendor/github.com/Azure/azure-pipeline-go/pipeline/defaultlog_syslog.go
generated
vendored
|
|
@ -1,33 +0,0 @@
|
||||||
// +build !windows,!nacl,!plan9
|
|
||||||
|
|
||||||
package pipeline
|
|
||||||
|
|
||||||
import (
|
|
||||||
"log"
|
|
||||||
"log/syslog"
|
|
||||||
)
|
|
||||||
|
|
||||||
// forceLog should rarely be used. It forceable logs an entry to the
|
|
||||||
// Windows Event Log (on Windows) or to the SysLog (on Linux)
|
|
||||||
func forceLog(level LogLevel, msg string) {
|
|
||||||
if defaultLogger == nil {
|
|
||||||
return // Return fast if we failed to create the logger.
|
|
||||||
}
|
|
||||||
// We are logging it, ensure trailing newline
|
|
||||||
if len(msg) == 0 || msg[len(msg)-1] != '\n' {
|
|
||||||
msg += "\n" // Ensure trailing newline
|
|
||||||
}
|
|
||||||
switch level {
|
|
||||||
case LogFatal:
|
|
||||||
defaultLogger.Fatal(msg)
|
|
||||||
case LogPanic:
|
|
||||||
defaultLogger.Panic(msg)
|
|
||||||
case LogError, LogWarning, LogInfo:
|
|
||||||
defaultLogger.Print(msg)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var defaultLogger = func() *log.Logger {
|
|
||||||
l, _ := syslog.NewLogger(syslog.LOG_USER|syslog.LOG_WARNING, log.LstdFlags)
|
|
||||||
return l
|
|
||||||
}()
|
|
||||||
61
vendor/github.com/Azure/azure-pipeline-go/pipeline/defaultlog_windows.go
generated
vendored
61
vendor/github.com/Azure/azure-pipeline-go/pipeline/defaultlog_windows.go
generated
vendored
|
|
@ -1,61 +0,0 @@
|
||||||
package pipeline
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
"syscall"
|
|
||||||
"unsafe"
|
|
||||||
)
|
|
||||||
|
|
||||||
// forceLog should rarely be used. It forceable logs an entry to the
|
|
||||||
// Windows Event Log (on Windows) or to the SysLog (on Linux)
|
|
||||||
func forceLog(level LogLevel, msg string) {
|
|
||||||
var el eventType
|
|
||||||
switch level {
|
|
||||||
case LogError, LogFatal, LogPanic:
|
|
||||||
el = elError
|
|
||||||
case LogWarning:
|
|
||||||
el = elWarning
|
|
||||||
case LogInfo:
|
|
||||||
el = elInfo
|
|
||||||
}
|
|
||||||
// We are logging it, ensure trailing newline
|
|
||||||
if len(msg) == 0 || msg[len(msg)-1] != '\n' {
|
|
||||||
msg += "\n" // Ensure trailing newline
|
|
||||||
}
|
|
||||||
reportEvent(el, 0, msg)
|
|
||||||
}
|
|
||||||
|
|
||||||
type eventType int16
|
|
||||||
|
|
||||||
const (
|
|
||||||
elSuccess eventType = 0
|
|
||||||
elError eventType = 1
|
|
||||||
elWarning eventType = 2
|
|
||||||
elInfo eventType = 4
|
|
||||||
)
|
|
||||||
|
|
||||||
var reportEvent = func() func(eventType eventType, eventID int32, msg string) {
|
|
||||||
advAPI32 := syscall.MustLoadDLL("advapi32.dll") // lower case to tie in with Go's sysdll registration
|
|
||||||
registerEventSource := advAPI32.MustFindProc("RegisterEventSourceW")
|
|
||||||
|
|
||||||
sourceName, _ := os.Executable()
|
|
||||||
sourceNameUTF16, _ := syscall.UTF16PtrFromString(sourceName)
|
|
||||||
handle, _, lastErr := registerEventSource.Call(uintptr(0), uintptr(unsafe.Pointer(sourceNameUTF16)))
|
|
||||||
if lastErr == nil { // On error, logging is a no-op
|
|
||||||
return func(eventType eventType, eventID int32, msg string) {}
|
|
||||||
}
|
|
||||||
reportEvent := advAPI32.MustFindProc("ReportEventW")
|
|
||||||
return func(eventType eventType, eventID int32, msg string) {
|
|
||||||
s, _ := syscall.UTF16PtrFromString(msg)
|
|
||||||
_, _, _ = reportEvent.Call(
|
|
||||||
uintptr(handle), // HANDLE hEventLog
|
|
||||||
uintptr(eventType), // WORD wType
|
|
||||||
uintptr(0), // WORD wCategory
|
|
||||||
uintptr(eventID), // DWORD dwEventID
|
|
||||||
uintptr(0), // PSID lpUserSid
|
|
||||||
uintptr(1), // WORD wNumStrings
|
|
||||||
uintptr(0), // DWORD dwDataSize
|
|
||||||
uintptr(unsafe.Pointer(&s)), // LPCTSTR *lpStrings
|
|
||||||
uintptr(0)) // LPVOID lpRawData
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
161
vendor/github.com/Azure/azure-pipeline-go/pipeline/doc.go
generated
vendored
161
vendor/github.com/Azure/azure-pipeline-go/pipeline/doc.go
generated
vendored
|
|
@ -1,161 +0,0 @@
|
||||||
// Copyright 2017 Microsoft Corporation. All rights reserved.
|
|
||||||
// Use of this source code is governed by an MIT
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
/*
|
|
||||||
Package pipeline implements an HTTP request/response middleware pipeline whose
|
|
||||||
policy objects mutate an HTTP request's URL, query parameters, and/or headers before
|
|
||||||
the request is sent over the wire.
|
|
||||||
|
|
||||||
Not all policy objects mutate an HTTP request; some policy objects simply impact the
|
|
||||||
flow of requests/responses by performing operations such as logging, retry policies,
|
|
||||||
timeouts, failure injection, and deserialization of response payloads.
|
|
||||||
|
|
||||||
Implementing the Policy Interface
|
|
||||||
|
|
||||||
To implement a policy, define a struct that implements the pipeline.Policy interface's Do method. Your Do
|
|
||||||
method is called when an HTTP request wants to be sent over the network. Your Do method can perform any
|
|
||||||
operation(s) it desires. For example, it can log the outgoing request, mutate the URL, headers, and/or query
|
|
||||||
parameters, inject a failure, etc. Your Do method must then forward the HTTP request to next Policy object
|
|
||||||
in a linked-list ensuring that the remaining Policy objects perform their work. Ultimately, the last Policy
|
|
||||||
object sends the HTTP request over the network (by calling the HTTPSender's Do method).
|
|
||||||
|
|
||||||
When an HTTP response comes back, each Policy object in the linked-list gets a chance to process the response
|
|
||||||
(in reverse order). The Policy object can log the response, retry the operation if due to a transient failure
|
|
||||||
or timeout, deserialize the response body, etc. Ultimately, the last Policy object returns the HTTP response
|
|
||||||
to the code that initiated the original HTTP request.
|
|
||||||
|
|
||||||
Here is a template for how to define a pipeline.Policy object:
|
|
||||||
|
|
||||||
type myPolicy struct {
|
|
||||||
node PolicyNode
|
|
||||||
// TODO: Add configuration/setting fields here (if desired)...
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *myPolicy) Do(ctx context.Context, request pipeline.Request) (pipeline.Response, error) {
|
|
||||||
// TODO: Mutate/process the HTTP request here...
|
|
||||||
response, err := p.node.Do(ctx, request) // Forward HTTP request to next Policy & get HTTP response
|
|
||||||
// TODO: Mutate/process the HTTP response here...
|
|
||||||
return response, err // Return response/error to previous Policy
|
|
||||||
}
|
|
||||||
|
|
||||||
Implementing the Factory Interface
|
|
||||||
|
|
||||||
Each Policy struct definition requires a factory struct definition that implements the pipeline.Factory interface's New
|
|
||||||
method. The New method is called when application code wants to initiate a new HTTP request. Factory's New method is
|
|
||||||
passed a pipeline.PolicyNode object which contains a reference to the owning pipeline.Pipeline object (discussed later) and
|
|
||||||
a reference to the next Policy object in the linked list. The New method should create its corresponding Policy object
|
|
||||||
passing it the PolicyNode and any other configuration/settings fields appropriate for the specific Policy object.
|
|
||||||
|
|
||||||
Here is a template for how to define a pipeline.Policy object:
|
|
||||||
|
|
||||||
// NOTE: Once created & initialized, Factory objects should be goroutine-safe (ex: immutable);
|
|
||||||
// this allows reuse (efficient use of memory) and makes these objects usable by multiple goroutines concurrently.
|
|
||||||
type myPolicyFactory struct {
|
|
||||||
// TODO: Add any configuration/setting fields if desired...
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *myPolicyFactory) New(node pipeline.PolicyNode) Policy {
|
|
||||||
return &myPolicy{node: node} // TODO: Also initialize any configuration/setting fields here (if desired)...
|
|
||||||
}
|
|
||||||
|
|
||||||
Using your Factory and Policy objects via a Pipeline
|
|
||||||
|
|
||||||
To use the Factory and Policy objects, an application constructs a slice of Factory objects and passes
|
|
||||||
this slice to the pipeline.NewPipeline function.
|
|
||||||
|
|
||||||
func NewPipeline(factories []pipeline.Factory, sender pipeline.HTTPSender) Pipeline
|
|
||||||
|
|
||||||
This function also requires an object implementing the HTTPSender interface. For simple scenarios,
|
|
||||||
passing nil for HTTPSender causes a standard Go http.Client object to be created and used to actually
|
|
||||||
send the HTTP response over the network. For more advanced scenarios, you can pass your own HTTPSender
|
|
||||||
object in. This allows sharing of http.Client objects or the use of custom-configured http.Client objects
|
|
||||||
or other objects that can simulate the network requests for testing purposes.
|
|
||||||
|
|
||||||
Now that you have a pipeline.Pipeline object, you can create a pipeline.Request object (which is a simple
|
|
||||||
wrapper around Go's standard http.Request object) and pass it to Pipeline's Do method along with passing a
|
|
||||||
context.Context for cancelling the HTTP request (if desired).
|
|
||||||
|
|
||||||
type Pipeline interface {
|
|
||||||
Do(ctx context.Context, methodFactory pipeline.Factory, request pipeline.Request) (pipeline.Response, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
Do iterates over the slice of Factory objects and tells each one to create its corresponding
|
|
||||||
Policy object. After the linked-list of Policy objects have been created, Do calls the first
|
|
||||||
Policy object passing it the Context & HTTP request parameters. These parameters now flow through
|
|
||||||
all the Policy objects giving each object a chance to look at and/or mutate the HTTP request.
|
|
||||||
The last Policy object sends the message over the network.
|
|
||||||
|
|
||||||
When the network operation completes, the HTTP response and error return values pass
|
|
||||||
back through the same Policy objects in reverse order. Most Policy objects ignore the
|
|
||||||
response/error but some log the result, retry the operation (depending on the exact
|
|
||||||
reason the operation failed), or deserialize the response's body. Your own Policy
|
|
||||||
objects can do whatever they like when processing outgoing requests or incoming responses.
|
|
||||||
|
|
||||||
Note that after an I/O request runs to completion, the Policy objects for that request
|
|
||||||
are garbage collected. However, Pipeline object (like Factory objects) are goroutine-safe allowing
|
|
||||||
them to be created once and reused over many I/O operations. This allows for efficient use of
|
|
||||||
memory and also makes them safely usable by multiple goroutines concurrently.
|
|
||||||
|
|
||||||
Inserting a Method-Specific Factory into the Linked-List of Policy Objects
|
|
||||||
|
|
||||||
While Pipeline and Factory objects can be reused over many different operations, it is
|
|
||||||
common to have special behavior for a specific operation/method. For example, a method
|
|
||||||
may need to deserialize the response's body to an instance of a specific data type.
|
|
||||||
To accommodate this, the Pipeline's Do method takes an additional method-specific
|
|
||||||
Factory object. The Do method tells this Factory to create a Policy object and
|
|
||||||
injects this method-specific Policy object into the linked-list of Policy objects.
|
|
||||||
|
|
||||||
When creating a Pipeline object, the slice of Factory objects passed must have 1
|
|
||||||
(and only 1) entry marking where the method-specific Factory should be injected.
|
|
||||||
The Factory marker is obtained by calling the pipeline.MethodFactoryMarker() function:
|
|
||||||
|
|
||||||
func MethodFactoryMarker() pipeline.Factory
|
|
||||||
|
|
||||||
Creating an HTTP Request Object
|
|
||||||
|
|
||||||
The HTTP request object passed to Pipeline's Do method is not Go's http.Request struct.
|
|
||||||
Instead, it is a pipeline.Request struct which is a simple wrapper around Go's standard
|
|
||||||
http.Request. You create a pipeline.Request object by calling the pipeline.NewRequest function:
|
|
||||||
|
|
||||||
func NewRequest(method string, url url.URL, options pipeline.RequestOptions) (request pipeline.Request, err error)
|
|
||||||
|
|
||||||
To this function, you must pass a pipeline.RequestOptions that looks like this:
|
|
||||||
|
|
||||||
type RequestOptions struct {
|
|
||||||
// The readable and seekable stream to be sent to the server as the request's body.
|
|
||||||
Body io.ReadSeeker
|
|
||||||
|
|
||||||
// The callback method (if not nil) to be invoked to report progress as the stream is uploaded in the HTTP request.
|
|
||||||
Progress ProgressReceiver
|
|
||||||
}
|
|
||||||
|
|
||||||
The method and struct ensure that the request's body stream is a read/seekable stream.
|
|
||||||
A seekable stream is required so that upon retry, the final Policy object can seek
|
|
||||||
the stream back to the beginning before retrying the network request and re-uploading the
|
|
||||||
body. In addition, you can associate a ProgressReceiver callback function which will be
|
|
||||||
invoked periodically to report progress while bytes are being read from the body stream
|
|
||||||
and sent over the network.
|
|
||||||
|
|
||||||
Processing the HTTP Response
|
|
||||||
|
|
||||||
When an HTTP response comes in from the network, a reference to Go's http.Response struct is
|
|
||||||
embedded in a struct that implements the pipeline.Response interface:
|
|
||||||
|
|
||||||
type Response interface {
|
|
||||||
Response() *http.Response
|
|
||||||
}
|
|
||||||
|
|
||||||
This interface is returned through all the Policy objects. Each Policy object can call the Response
|
|
||||||
interface's Response method to examine (or mutate) the embedded http.Response object.
|
|
||||||
|
|
||||||
A Policy object can internally define another struct (implementing the pipeline.Response interface)
|
|
||||||
that embeds an http.Response and adds additional fields and return this structure to other Policy
|
|
||||||
objects. This allows a Policy object to deserialize the body to some other struct and return the
|
|
||||||
original http.Response and the additional struct back through the Policy chain. Other Policy objects
|
|
||||||
can see the Response but cannot see the additional struct with the deserialized body. After all the
|
|
||||||
Policy objects have returned, the pipeline.Response interface is returned by Pipeline's Do method.
|
|
||||||
The caller of this method can perform a type assertion attempting to get back to the struct type
|
|
||||||
really returned by the Policy object. If the type assertion is successful, the caller now has
|
|
||||||
access to both the http.Response and the deserialized struct object.*/
|
|
||||||
package pipeline
|
|
||||||
184
vendor/github.com/Azure/azure-pipeline-go/pipeline/error.go
generated
vendored
184
vendor/github.com/Azure/azure-pipeline-go/pipeline/error.go
generated
vendored
|
|
@ -1,184 +0,0 @@
|
||||||
package pipeline
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"runtime"
|
|
||||||
)
|
|
||||||
|
|
||||||
type causer interface {
|
|
||||||
Cause() error
|
|
||||||
}
|
|
||||||
|
|
||||||
func errorWithPC(msg string, pc uintptr) string {
|
|
||||||
s := ""
|
|
||||||
if fn := runtime.FuncForPC(pc); fn != nil {
|
|
||||||
file, line := fn.FileLine(pc)
|
|
||||||
s = fmt.Sprintf("-> %v, %v:%v\n", fn.Name(), file, line)
|
|
||||||
}
|
|
||||||
s += msg + "\n\n"
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
func getPC(callersToSkip int) uintptr {
|
|
||||||
// Get the PC of Initialize method's caller.
|
|
||||||
pc := [1]uintptr{}
|
|
||||||
_ = runtime.Callers(callersToSkip, pc[:])
|
|
||||||
return pc[0]
|
|
||||||
}
|
|
||||||
|
|
||||||
// ErrorNode can be an embedded field in a private error object. This field
|
|
||||||
// adds Program Counter support and a 'cause' (reference to a preceding error).
|
|
||||||
// When initializing a error type with this embedded field, initialize the
|
|
||||||
// ErrorNode field by calling ErrorNode{}.Initialize(cause).
|
|
||||||
type ErrorNode struct {
|
|
||||||
pc uintptr // Represents a Program Counter that you can get symbols for.
|
|
||||||
cause error // Refers to the preceding error (or nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Error returns a string with the PC's symbols or "" if the PC is invalid.
|
|
||||||
// When defining a new error type, have its Error method call this one passing
|
|
||||||
// it the string representation of the error.
|
|
||||||
func (e *ErrorNode) Error(msg string) string {
|
|
||||||
s := errorWithPC(msg, e.pc)
|
|
||||||
if e.cause != nil {
|
|
||||||
s += e.cause.Error() + "\n"
|
|
||||||
}
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// Cause returns the error that preceded this error.
|
|
||||||
func (e *ErrorNode) Cause() error { return e.cause }
|
|
||||||
|
|
||||||
// Unwrap provides compatibility for Go 1.13 error chains.
|
|
||||||
func (e *ErrorNode) Unwrap() error { return e.cause }
|
|
||||||
|
|
||||||
// Temporary returns true if the error occurred due to a temporary condition.
|
|
||||||
func (e ErrorNode) Temporary() bool {
|
|
||||||
type temporary interface {
|
|
||||||
Temporary() bool
|
|
||||||
}
|
|
||||||
|
|
||||||
for err := e.cause; err != nil; {
|
|
||||||
if t, ok := err.(temporary); ok {
|
|
||||||
return t.Temporary()
|
|
||||||
}
|
|
||||||
|
|
||||||
if cause, ok := err.(causer); ok {
|
|
||||||
err = cause.Cause()
|
|
||||||
} else {
|
|
||||||
err = nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Timeout returns true if the error occurred due to time expiring.
|
|
||||||
func (e ErrorNode) Timeout() bool {
|
|
||||||
type timeout interface {
|
|
||||||
Timeout() bool
|
|
||||||
}
|
|
||||||
|
|
||||||
for err := e.cause; err != nil; {
|
|
||||||
if t, ok := err.(timeout); ok {
|
|
||||||
return t.Timeout()
|
|
||||||
}
|
|
||||||
|
|
||||||
if cause, ok := err.(causer); ok {
|
|
||||||
err = cause.Cause()
|
|
||||||
} else {
|
|
||||||
err = nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Initialize is used to initialize an embedded ErrorNode field.
|
|
||||||
// It captures the caller's program counter and saves the cause (preceding error).
|
|
||||||
// To initialize the field, use "ErrorNode{}.Initialize(cause, 3)". A callersToSkip
|
|
||||||
// value of 3 is very common; but, depending on your code nesting, you may need
|
|
||||||
// a different value.
|
|
||||||
func (ErrorNode) Initialize(cause error, callersToSkip int) ErrorNode {
|
|
||||||
pc := getPC(callersToSkip)
|
|
||||||
return ErrorNode{pc: pc, cause: cause}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Cause walks all the preceding errors and return the originating error.
|
|
||||||
func Cause(err error) error {
|
|
||||||
for err != nil {
|
|
||||||
cause, ok := err.(causer)
|
|
||||||
if !ok {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
err = cause.Cause()
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// ErrorNodeNoCause can be an embedded field in a private error object. This field
|
|
||||||
// adds Program Counter support.
|
|
||||||
// When initializing a error type with this embedded field, initialize the
|
|
||||||
// ErrorNodeNoCause field by calling ErrorNodeNoCause{}.Initialize().
|
|
||||||
type ErrorNodeNoCause struct {
|
|
||||||
pc uintptr // Represents a Program Counter that you can get symbols for.
|
|
||||||
}
|
|
||||||
|
|
||||||
// Error returns a string with the PC's symbols or "" if the PC is invalid.
|
|
||||||
// When defining a new error type, have its Error method call this one passing
|
|
||||||
// it the string representation of the error.
|
|
||||||
func (e *ErrorNodeNoCause) Error(msg string) string {
|
|
||||||
return errorWithPC(msg, e.pc)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Temporary returns true if the error occurred due to a temporary condition.
|
|
||||||
func (e ErrorNodeNoCause) Temporary() bool {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Timeout returns true if the error occurred due to time expiring.
|
|
||||||
func (e ErrorNodeNoCause) Timeout() bool {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Initialize is used to initialize an embedded ErrorNode field.
|
|
||||||
// It captures the caller's program counter.
|
|
||||||
// To initialize the field, use "ErrorNodeNoCause{}.Initialize(3)". A callersToSkip
|
|
||||||
// value of 3 is very common; but, depending on your code nesting, you may need
|
|
||||||
// a different value.
|
|
||||||
func (ErrorNodeNoCause) Initialize(callersToSkip int) ErrorNodeNoCause {
|
|
||||||
pc := getPC(callersToSkip)
|
|
||||||
return ErrorNodeNoCause{pc: pc}
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewError creates a simple string error (like Error.New). But, this
|
|
||||||
// error also captures the caller's Program Counter and the preceding error (if provided).
|
|
||||||
func NewError(cause error, msg string) error {
|
|
||||||
if cause != nil {
|
|
||||||
return &pcError{
|
|
||||||
ErrorNode: ErrorNode{}.Initialize(cause, 3),
|
|
||||||
msg: msg,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return &pcErrorNoCause{
|
|
||||||
ErrorNodeNoCause: ErrorNodeNoCause{}.Initialize(3),
|
|
||||||
msg: msg,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// pcError is a simple string error (like error.New) with an ErrorNode (PC & cause).
|
|
||||||
type pcError struct {
|
|
||||||
ErrorNode
|
|
||||||
msg string
|
|
||||||
}
|
|
||||||
|
|
||||||
// Error satisfies the error interface. It shows the error with Program Counter
|
|
||||||
// symbols and calls Error on the preceding error so you can see the full error chain.
|
|
||||||
func (e *pcError) Error() string { return e.ErrorNode.Error(e.msg) }
|
|
||||||
|
|
||||||
// pcErrorNoCause is a simple string error (like error.New) with an ErrorNode (PC).
|
|
||||||
type pcErrorNoCause struct {
|
|
||||||
ErrorNodeNoCause
|
|
||||||
msg string
|
|
||||||
}
|
|
||||||
|
|
||||||
// Error satisfies the error interface. It shows the error with Program Counter symbols.
|
|
||||||
func (e *pcErrorNoCause) Error() string { return e.ErrorNodeNoCause.Error(e.msg) }
|
|
||||||
82
vendor/github.com/Azure/azure-pipeline-go/pipeline/progress.go
generated
vendored
82
vendor/github.com/Azure/azure-pipeline-go/pipeline/progress.go
generated
vendored
|
|
@ -1,82 +0,0 @@
|
||||||
package pipeline
|
|
||||||
|
|
||||||
import "io"
|
|
||||||
|
|
||||||
// ********** The following is common between the request body AND the response body.
|
|
||||||
|
|
||||||
// ProgressReceiver defines the signature of a callback function invoked as progress is reported.
|
|
||||||
type ProgressReceiver func(bytesTransferred int64)
|
|
||||||
|
|
||||||
// ********** The following are specific to the request body (a ReadSeekCloser)
|
|
||||||
|
|
||||||
// This struct is used when sending a body to the network
|
|
||||||
type requestBodyProgress struct {
|
|
||||||
requestBody io.ReadSeeker // Seeking is required to support retries
|
|
||||||
pr ProgressReceiver
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewRequestBodyProgress adds progress reporting to an HTTP request's body stream.
|
|
||||||
func NewRequestBodyProgress(requestBody io.ReadSeeker, pr ProgressReceiver) io.ReadSeeker {
|
|
||||||
if pr == nil {
|
|
||||||
panic("pr must not be nil")
|
|
||||||
}
|
|
||||||
return &requestBodyProgress{requestBody: requestBody, pr: pr}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read reads a block of data from an inner stream and reports progress
|
|
||||||
func (rbp *requestBodyProgress) Read(p []byte) (n int, err error) {
|
|
||||||
n, err = rbp.requestBody.Read(p)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// Invokes the user's callback method to report progress
|
|
||||||
position, err := rbp.requestBody.Seek(0, io.SeekCurrent)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
rbp.pr(position)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rbp *requestBodyProgress) Seek(offset int64, whence int) (offsetFromStart int64, err error) {
|
|
||||||
return rbp.requestBody.Seek(offset, whence)
|
|
||||||
}
|
|
||||||
|
|
||||||
// requestBodyProgress supports Close but the underlying stream may not; if it does, Close will close it.
|
|
||||||
func (rbp *requestBodyProgress) Close() error {
|
|
||||||
if c, ok := rbp.requestBody.(io.Closer); ok {
|
|
||||||
return c.Close()
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ********** The following are specific to the response body (a ReadCloser)
|
|
||||||
|
|
||||||
// This struct is used when sending a body to the network
|
|
||||||
type responseBodyProgress struct {
|
|
||||||
responseBody io.ReadCloser
|
|
||||||
pr ProgressReceiver
|
|
||||||
offset int64
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewResponseBodyProgress adds progress reporting to an HTTP response's body stream.
|
|
||||||
func NewResponseBodyProgress(responseBody io.ReadCloser, pr ProgressReceiver) io.ReadCloser {
|
|
||||||
if pr == nil {
|
|
||||||
panic("pr must not be nil")
|
|
||||||
}
|
|
||||||
return &responseBodyProgress{responseBody: responseBody, pr: pr, offset: 0}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read reads a block of data from an inner stream and reports progress
|
|
||||||
func (rbp *responseBodyProgress) Read(p []byte) (n int, err error) {
|
|
||||||
n, err = rbp.responseBody.Read(p)
|
|
||||||
rbp.offset += int64(n)
|
|
||||||
|
|
||||||
// Invokes the user's callback method to report progress
|
|
||||||
rbp.pr(rbp.offset)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rbp *responseBodyProgress) Close() error {
|
|
||||||
return rbp.responseBody.Close()
|
|
||||||
}
|
|
||||||
147
vendor/github.com/Azure/azure-pipeline-go/pipeline/request.go
generated
vendored
147
vendor/github.com/Azure/azure-pipeline-go/pipeline/request.go
generated
vendored
|
|
@ -1,147 +0,0 @@
|
||||||
package pipeline
|
|
||||||
|
|
||||||
import (
|
|
||||||
"io"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"strconv"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Request is a thin wrapper over an http.Request. The wrapper provides several helper methods.
|
|
||||||
type Request struct {
|
|
||||||
*http.Request
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewRequest initializes a new HTTP request object with any desired options.
|
|
||||||
func NewRequest(method string, url url.URL, body io.ReadSeeker) (request Request, err error) {
|
|
||||||
// Note: the url is passed by value so that any pipeline operations that modify it do so on a copy.
|
|
||||||
|
|
||||||
// This code to construct an http.Request is copied from http.NewRequest(); we intentionally omitted removeEmptyPort for now.
|
|
||||||
request.Request = &http.Request{
|
|
||||||
Method: method,
|
|
||||||
URL: &url,
|
|
||||||
Proto: "HTTP/1.1",
|
|
||||||
ProtoMajor: 1,
|
|
||||||
ProtoMinor: 1,
|
|
||||||
Header: make(http.Header),
|
|
||||||
Host: url.Host,
|
|
||||||
}
|
|
||||||
|
|
||||||
if body != nil {
|
|
||||||
err = request.SetBody(body)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetBody sets the body and content length, assumes body is not nil.
|
|
||||||
func (r Request) SetBody(body io.ReadSeeker) error {
|
|
||||||
size, err := body.Seek(0, io.SeekEnd)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
body.Seek(0, io.SeekStart)
|
|
||||||
r.ContentLength = size
|
|
||||||
r.Header["Content-Length"] = []string{strconv.FormatInt(size, 10)}
|
|
||||||
|
|
||||||
if size != 0 {
|
|
||||||
r.Body = &retryableRequestBody{body: body}
|
|
||||||
r.GetBody = func() (io.ReadCloser, error) {
|
|
||||||
_, err := body.Seek(0, io.SeekStart)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return r.Body, nil
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// in case the body is an empty stream, we need to use http.NoBody to explicitly provide no content
|
|
||||||
r.Body = http.NoBody
|
|
||||||
r.GetBody = func() (io.ReadCloser, error) {
|
|
||||||
return http.NoBody, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// close the user-provided empty body
|
|
||||||
if c, ok := body.(io.Closer); ok {
|
|
||||||
c.Close()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Copy makes a copy of an http.Request. Specifically, it makes a deep copy
|
|
||||||
// of its Method, URL, Host, Proto(Major/Minor), Header. ContentLength, Close,
|
|
||||||
// RemoteAddr, RequestURI. Copy makes a shallow copy of the Body, GetBody, TLS,
|
|
||||||
// Cancel, Response, and ctx fields. Copy panics if any of these fields are
|
|
||||||
// not nil: TransferEncoding, Form, PostForm, MultipartForm, or Trailer.
|
|
||||||
func (r Request) Copy() Request {
|
|
||||||
if r.TransferEncoding != nil || r.Form != nil || r.PostForm != nil || r.MultipartForm != nil || r.Trailer != nil {
|
|
||||||
panic("Can't make a deep copy of the http.Request because at least one of the following is not nil:" +
|
|
||||||
"TransferEncoding, Form, PostForm, MultipartForm, or Trailer.")
|
|
||||||
}
|
|
||||||
copy := *r.Request // Copy the request
|
|
||||||
urlCopy := *(r.Request.URL) // Copy the URL
|
|
||||||
copy.URL = &urlCopy
|
|
||||||
copy.Header = http.Header{} // Copy the header
|
|
||||||
for k, vs := range r.Header {
|
|
||||||
for _, value := range vs {
|
|
||||||
copy.Header.Add(k, value)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return Request{Request: ©} // Return the copy
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r Request) close() error {
|
|
||||||
if r.Body != nil && r.Body != http.NoBody {
|
|
||||||
c, ok := r.Body.(*retryableRequestBody)
|
|
||||||
if !ok {
|
|
||||||
panic("unexpected request body type (should be *retryableReadSeekerCloser)")
|
|
||||||
}
|
|
||||||
return c.realClose()
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// RewindBody seeks the request's Body stream back to the beginning so it can be resent when retrying an operation.
|
|
||||||
func (r Request) RewindBody() error {
|
|
||||||
if r.Body != nil && r.Body != http.NoBody {
|
|
||||||
s, ok := r.Body.(io.Seeker)
|
|
||||||
if !ok {
|
|
||||||
panic("unexpected request body type (should be io.Seeker)")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reset the stream back to the beginning
|
|
||||||
_, err := s.Seek(0, io.SeekStart)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ********** The following type/methods implement the retryableRequestBody (a ReadSeekCloser)
|
|
||||||
|
|
||||||
// This struct is used when sending a body to the network
|
|
||||||
type retryableRequestBody struct {
|
|
||||||
body io.ReadSeeker // Seeking is required to support retries
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read reads a block of data from an inner stream and reports progress
|
|
||||||
func (b *retryableRequestBody) Read(p []byte) (n int, err error) {
|
|
||||||
return b.body.Read(p)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *retryableRequestBody) Seek(offset int64, whence int) (offsetFromStart int64, err error) {
|
|
||||||
return b.body.Seek(offset, whence)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *retryableRequestBody) Close() error {
|
|
||||||
// We don't want the underlying transport to close the request body on transient failures so this is a nop.
|
|
||||||
// The pipeline closes the request body upon success.
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *retryableRequestBody) realClose() error {
|
|
||||||
if c, ok := b.body.(io.Closer); ok {
|
|
||||||
return c.Close()
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
74
vendor/github.com/Azure/azure-pipeline-go/pipeline/response.go
generated
vendored
74
vendor/github.com/Azure/azure-pipeline-go/pipeline/response.go
generated
vendored
|
|
@ -1,74 +0,0 @@
|
||||||
package pipeline
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"fmt"
|
|
||||||
"net/http"
|
|
||||||
"sort"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// The Response interface exposes an http.Response object as it returns through the pipeline of Policy objects.
|
|
||||||
// This ensures that Policy objects have access to the HTTP response. However, the object this interface encapsulates
|
|
||||||
// might be a struct with additional fields that is created by a Policy object (typically a method-specific Factory).
|
|
||||||
// The method that injected the method-specific Factory gets this returned Response and performs a type assertion
|
|
||||||
// to the expected struct and returns the struct to its caller.
|
|
||||||
type Response interface {
|
|
||||||
Response() *http.Response
|
|
||||||
}
|
|
||||||
|
|
||||||
// This is the default struct that has the http.Response.
|
|
||||||
// A method can replace this struct with its own struct containing an http.Response
|
|
||||||
// field and any other additional fields.
|
|
||||||
type httpResponse struct {
|
|
||||||
response *http.Response
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewHTTPResponse is typically called by a Policy object to return a Response object.
|
|
||||||
func NewHTTPResponse(response *http.Response) Response {
|
|
||||||
return &httpResponse{response: response}
|
|
||||||
}
|
|
||||||
|
|
||||||
// This method satisfies the public Response interface's Response method
|
|
||||||
func (r httpResponse) Response() *http.Response {
|
|
||||||
return r.response
|
|
||||||
}
|
|
||||||
|
|
||||||
// WriteRequestWithResponse appends a formatted HTTP request into a Buffer. If request and/or err are
|
|
||||||
// not nil, then these are also written into the Buffer.
|
|
||||||
func WriteRequestWithResponse(b *bytes.Buffer, request *http.Request, response *http.Response, err error) {
|
|
||||||
// Write the request into the buffer.
|
|
||||||
fmt.Fprint(b, " "+request.Method+" "+request.URL.String()+"\n")
|
|
||||||
writeHeader(b, request.Header)
|
|
||||||
if response != nil {
|
|
||||||
fmt.Fprintln(b, " --------------------------------------------------------------------------------")
|
|
||||||
fmt.Fprint(b, " RESPONSE Status: "+response.Status+"\n")
|
|
||||||
writeHeader(b, response.Header)
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
fmt.Fprintln(b, " --------------------------------------------------------------------------------")
|
|
||||||
fmt.Fprint(b, " ERROR:\n"+err.Error()+"\n")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// formatHeaders appends an HTTP request's or response's header into a Buffer.
|
|
||||||
func writeHeader(b *bytes.Buffer, header map[string][]string) {
|
|
||||||
if len(header) == 0 {
|
|
||||||
b.WriteString(" (no headers)\n")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
keys := make([]string, 0, len(header))
|
|
||||||
// Alphabetize the headers
|
|
||||||
for k := range header {
|
|
||||||
keys = append(keys, k)
|
|
||||||
}
|
|
||||||
sort.Strings(keys)
|
|
||||||
for _, k := range keys {
|
|
||||||
// Redact the value of any Authorization header to prevent security information from persisting in logs
|
|
||||||
value := interface{}("REDACTED")
|
|
||||||
if !strings.EqualFold(k, "Authorization") {
|
|
||||||
value = header[k]
|
|
||||||
}
|
|
||||||
fmt.Fprintf(b, " %s: %+v\n", k, value)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
9
vendor/github.com/Azure/azure-pipeline-go/pipeline/version.go
generated
vendored
9
vendor/github.com/Azure/azure-pipeline-go/pipeline/version.go
generated
vendored
|
|
@ -1,9 +0,0 @@
|
||||||
package pipeline
|
|
||||||
|
|
||||||
const (
|
|
||||||
// UserAgent is the string to be used in the user agent string when making requests.
|
|
||||||
UserAgent = "azure-pipeline-go/" + Version
|
|
||||||
|
|
||||||
// Version is the semantic version (see http://semver.org) of the pipeline package.
|
|
||||||
Version = "0.2.1"
|
|
||||||
)
|
|
||||||
21
vendor/github.com/Azure/azure-storage-blob-go/LICENSE
generated
vendored
21
vendor/github.com/Azure/azure-storage-blob-go/LICENSE
generated
vendored
|
|
@ -1,21 +0,0 @@
|
||||||
MIT License
|
|
||||||
|
|
||||||
Copyright (c) Microsoft Corporation. All rights reserved.
|
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
|
||||||
in the Software without restriction, including without limitation the rights
|
|
||||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
||||||
copies of the Software, and to permit persons to whom the Software is
|
|
||||||
furnished to do so, subject to the following conditions:
|
|
||||||
|
|
||||||
The above copyright notice and this permission notice shall be included in all
|
|
||||||
copies or substantial portions of the Software.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
||||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
||||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
||||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
||||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
||||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
||||||
SOFTWARE
|
|
||||||
65
vendor/github.com/Azure/azure-storage-blob-go/azblob/access_conditions.go
generated
vendored
65
vendor/github.com/Azure/azure-storage-blob-go/azblob/access_conditions.go
generated
vendored
|
|
@ -1,65 +0,0 @@
|
||||||
package azblob
|
|
||||||
|
|
||||||
import (
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ModifiedAccessConditions identifies standard HTTP access conditions which you optionally set.
|
|
||||||
type ModifiedAccessConditions struct {
|
|
||||||
IfModifiedSince time.Time
|
|
||||||
IfUnmodifiedSince time.Time
|
|
||||||
IfMatch ETag
|
|
||||||
IfNoneMatch ETag
|
|
||||||
}
|
|
||||||
|
|
||||||
// pointers is for internal infrastructure. It returns the fields as pointers.
|
|
||||||
func (ac ModifiedAccessConditions) pointers() (ims *time.Time, ius *time.Time, ime *ETag, inme *ETag) {
|
|
||||||
if !ac.IfModifiedSince.IsZero() {
|
|
||||||
ims = &ac.IfModifiedSince
|
|
||||||
}
|
|
||||||
if !ac.IfUnmodifiedSince.IsZero() {
|
|
||||||
ius = &ac.IfUnmodifiedSince
|
|
||||||
}
|
|
||||||
if ac.IfMatch != ETagNone {
|
|
||||||
ime = &ac.IfMatch
|
|
||||||
}
|
|
||||||
if ac.IfNoneMatch != ETagNone {
|
|
||||||
inme = &ac.IfNoneMatch
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// ContainerAccessConditions identifies container-specific access conditions which you optionally set.
|
|
||||||
type ContainerAccessConditions struct {
|
|
||||||
ModifiedAccessConditions
|
|
||||||
LeaseAccessConditions
|
|
||||||
}
|
|
||||||
|
|
||||||
// BlobAccessConditions identifies blob-specific access conditions which you optionally set.
|
|
||||||
type BlobAccessConditions struct {
|
|
||||||
ModifiedAccessConditions
|
|
||||||
LeaseAccessConditions
|
|
||||||
}
|
|
||||||
|
|
||||||
// LeaseAccessConditions identifies lease access conditions for a container or blob which you optionally set.
|
|
||||||
type LeaseAccessConditions struct {
|
|
||||||
LeaseID string
|
|
||||||
}
|
|
||||||
|
|
||||||
// pointers is for internal infrastructure. It returns the fields as pointers.
|
|
||||||
func (ac LeaseAccessConditions) pointers() (leaseID *string) {
|
|
||||||
if ac.LeaseID != "" {
|
|
||||||
leaseID = &ac.LeaseID
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
// getInt32 is for internal infrastructure. It is used with access condition values where
|
|
||||||
// 0 (the default setting) is meaningful. The library interprets 0 as do not send the header
|
|
||||||
// and the privately-storage field in the access condition object is stored as +1 higher than desired.
|
|
||||||
// THis method returns true, if the value is > 0 (explicitly set) and the stored value - 1 (the set desired value).
|
|
||||||
func getInt32(value int32) (bool, int32) {
|
|
||||||
return value > 0, value - 1
|
|
||||||
}
|
|
||||||
*/
|
|
||||||
8009
vendor/github.com/Azure/azure-storage-blob-go/azblob/blob.json
generated
vendored
8009
vendor/github.com/Azure/azure-storage-blob-go/azblob/blob.json
generated
vendored
File diff suppressed because it is too large
Load diff
24
vendor/github.com/Azure/azure-storage-blob-go/azblob/bytes_writer.go
generated
vendored
24
vendor/github.com/Azure/azure-storage-blob-go/azblob/bytes_writer.go
generated
vendored
|
|
@ -1,24 +0,0 @@
|
||||||
package azblob
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
)
|
|
||||||
|
|
||||||
type bytesWriter []byte
|
|
||||||
|
|
||||||
func newBytesWriter(b []byte) bytesWriter {
|
|
||||||
return b
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c bytesWriter) WriteAt(b []byte, off int64) (int, error) {
|
|
||||||
if off >= int64(len(c)) || off < 0 {
|
|
||||||
return 0, errors.New("Offset value is out of range")
|
|
||||||
}
|
|
||||||
|
|
||||||
n := copy(c[int(off):], b)
|
|
||||||
if n < len(b) {
|
|
||||||
return n, errors.New("Not enough space for all bytes")
|
|
||||||
}
|
|
||||||
|
|
||||||
return n, nil
|
|
||||||
}
|
|
||||||
220
vendor/github.com/Azure/azure-storage-blob-go/azblob/chunkwriting.go
generated
vendored
220
vendor/github.com/Azure/azure-storage-blob-go/azblob/chunkwriting.go
generated
vendored
|
|
@ -1,220 +0,0 @@
|
||||||
package azblob
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"context"
|
|
||||||
"encoding/base64"
|
|
||||||
"encoding/binary"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"sync"
|
|
||||||
"sync/atomic"
|
|
||||||
|
|
||||||
guuid "github.com/google/uuid"
|
|
||||||
)
|
|
||||||
|
|
||||||
// blockWriter provides methods to upload blocks that represent a file to a server and commit them.
|
|
||||||
// This allows us to provide a local implementation that fakes the server for hermetic testing.
|
|
||||||
type blockWriter interface {
|
|
||||||
StageBlock(context.Context, string, io.ReadSeeker, LeaseAccessConditions, []byte, ClientProvidedKeyOptions) (*BlockBlobStageBlockResponse, error)
|
|
||||||
CommitBlockList(context.Context, []string, BlobHTTPHeaders, Metadata, BlobAccessConditions, AccessTierType, BlobTagsMap, ClientProvidedKeyOptions) (*BlockBlobCommitBlockListResponse, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
// copyFromReader copies a source io.Reader to blob storage using concurrent uploads.
|
|
||||||
// TODO(someone): The existing model provides a buffer size and buffer limit as limiting factors. The buffer size is probably
|
|
||||||
// useless other than needing to be above some number, as the network stack is going to hack up the buffer over some size. The
|
|
||||||
// max buffers is providing a cap on how much memory we use (by multiplying it times the buffer size) and how many go routines can upload
|
|
||||||
// at a time. I think having a single max memory dial would be more efficient. We can choose an internal buffer size that works
|
|
||||||
// well, 4 MiB or 8 MiB, and autoscale to as many goroutines within the memory limit. This gives a single dial to tweak and we can
|
|
||||||
// choose a max value for the memory setting based on internal transfers within Azure (which will give us the maximum throughput model).
|
|
||||||
// We can even provide a utility to dial this number in for customer networks to optimize their copies.
|
|
||||||
func copyFromReader(ctx context.Context, from io.Reader, to blockWriter, o UploadStreamToBlockBlobOptions) (*BlockBlobCommitBlockListResponse, error) {
|
|
||||||
if err := o.defaults(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx, cancel := context.WithCancel(ctx)
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
cp := &copier{
|
|
||||||
ctx: ctx,
|
|
||||||
cancel: cancel,
|
|
||||||
reader: from,
|
|
||||||
to: to,
|
|
||||||
id: newID(),
|
|
||||||
o: o,
|
|
||||||
errCh: make(chan error, 1),
|
|
||||||
}
|
|
||||||
|
|
||||||
// Send all our chunks until we get an error.
|
|
||||||
var err error
|
|
||||||
for {
|
|
||||||
if err = cp.sendChunk(); err != nil {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// If the error is not EOF, then we have a problem.
|
|
||||||
if err != nil && !errors.Is(err, io.EOF) {
|
|
||||||
cp.wg.Wait()
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close out our upload.
|
|
||||||
if err := cp.close(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return cp.result, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// copier streams a file via chunks in parallel from a reader representing a file.
|
|
||||||
// Do not use directly, instead use copyFromReader().
|
|
||||||
type copier struct {
|
|
||||||
// ctx holds the context of a copier. This is normally a faux pas to store a Context in a struct. In this case,
|
|
||||||
// the copier has the lifetime of a function call, so its fine.
|
|
||||||
ctx context.Context
|
|
||||||
cancel context.CancelFunc
|
|
||||||
|
|
||||||
// o contains our options for uploading.
|
|
||||||
o UploadStreamToBlockBlobOptions
|
|
||||||
|
|
||||||
// id provides the ids for each chunk.
|
|
||||||
id *id
|
|
||||||
|
|
||||||
// reader is the source to be written to storage.
|
|
||||||
reader io.Reader
|
|
||||||
// to is the location we are writing our chunks to.
|
|
||||||
to blockWriter
|
|
||||||
|
|
||||||
// errCh is used to hold the first error from our concurrent writers.
|
|
||||||
errCh chan error
|
|
||||||
// wg provides a count of how many writers we are waiting to finish.
|
|
||||||
wg sync.WaitGroup
|
|
||||||
|
|
||||||
// result holds the final result from blob storage after we have submitted all chunks.
|
|
||||||
result *BlockBlobCommitBlockListResponse
|
|
||||||
}
|
|
||||||
|
|
||||||
type copierChunk struct {
|
|
||||||
buffer []byte
|
|
||||||
id string
|
|
||||||
}
|
|
||||||
|
|
||||||
// getErr returns an error by priority. First, if a function set an error, it returns that error. Next, if the Context has an error
|
|
||||||
// it returns that error. Otherwise it is nil. getErr supports only returning an error once per copier.
|
|
||||||
func (c *copier) getErr() error {
|
|
||||||
select {
|
|
||||||
case err := <-c.errCh:
|
|
||||||
return err
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
return c.ctx.Err()
|
|
||||||
}
|
|
||||||
|
|
||||||
// sendChunk reads data from out internal reader, creates a chunk, and sends it to be written via a channel.
|
|
||||||
// sendChunk returns io.EOF when the reader returns an io.EOF or io.ErrUnexpectedEOF.
|
|
||||||
func (c *copier) sendChunk() error {
|
|
||||||
if err := c.getErr(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
buffer := c.o.TransferManager.Get()
|
|
||||||
if len(buffer) == 0 {
|
|
||||||
return fmt.Errorf("TransferManager returned a 0 size buffer, this is a bug in the manager")
|
|
||||||
}
|
|
||||||
|
|
||||||
n, err := io.ReadFull(c.reader, buffer)
|
|
||||||
switch {
|
|
||||||
case err == nil && n == 0:
|
|
||||||
return nil
|
|
||||||
case err == nil:
|
|
||||||
id := c.id.next()
|
|
||||||
c.wg.Add(1)
|
|
||||||
c.o.TransferManager.Run(
|
|
||||||
func() {
|
|
||||||
defer c.wg.Done()
|
|
||||||
c.write(copierChunk{buffer: buffer[0:n], id: id})
|
|
||||||
},
|
|
||||||
)
|
|
||||||
return nil
|
|
||||||
case err != nil && (err == io.EOF || err == io.ErrUnexpectedEOF) && n == 0:
|
|
||||||
return io.EOF
|
|
||||||
}
|
|
||||||
|
|
||||||
if err == io.EOF || err == io.ErrUnexpectedEOF {
|
|
||||||
id := c.id.next()
|
|
||||||
c.wg.Add(1)
|
|
||||||
c.o.TransferManager.Run(
|
|
||||||
func() {
|
|
||||||
defer c.wg.Done()
|
|
||||||
c.write(copierChunk{buffer: buffer[0:n], id: id})
|
|
||||||
},
|
|
||||||
)
|
|
||||||
return io.EOF
|
|
||||||
}
|
|
||||||
if err := c.getErr(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// write uploads a chunk to blob storage.
|
|
||||||
func (c *copier) write(chunk copierChunk) {
|
|
||||||
defer c.o.TransferManager.Put(chunk.buffer)
|
|
||||||
|
|
||||||
if err := c.ctx.Err(); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
_, err := c.to.StageBlock(c.ctx, chunk.id, bytes.NewReader(chunk.buffer), c.o.AccessConditions.LeaseAccessConditions, nil, c.o.ClientProvidedKeyOptions)
|
|
||||||
if err != nil {
|
|
||||||
c.errCh <- fmt.Errorf("write error: %w", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// close commits our blocks to blob storage and closes our writer.
|
|
||||||
func (c *copier) close() error {
|
|
||||||
c.wg.Wait()
|
|
||||||
|
|
||||||
if err := c.getErr(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
var err error
|
|
||||||
c.result, err = c.to.CommitBlockList(c.ctx, c.id.issued(), c.o.BlobHTTPHeaders, c.o.Metadata, c.o.AccessConditions, c.o.BlobAccessTier, c.o.BlobTagsMap, c.o.ClientProvidedKeyOptions)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// id allows the creation of unique IDs based on UUID4 + an int32. This auto-increments.
|
|
||||||
type id struct {
|
|
||||||
u [64]byte
|
|
||||||
num uint32
|
|
||||||
all []string
|
|
||||||
}
|
|
||||||
|
|
||||||
// newID constructs a new id.
|
|
||||||
func newID() *id {
|
|
||||||
uu := guuid.New()
|
|
||||||
u := [64]byte{}
|
|
||||||
copy(u[:], uu[:])
|
|
||||||
return &id{u: u}
|
|
||||||
}
|
|
||||||
|
|
||||||
// next returns the next ID.
|
|
||||||
func (id *id) next() string {
|
|
||||||
defer atomic.AddUint32(&id.num, 1)
|
|
||||||
|
|
||||||
binary.BigEndian.PutUint32((id.u[len(guuid.UUID{}):]), atomic.LoadUint32(&id.num))
|
|
||||||
str := base64.StdEncoding.EncodeToString(id.u[:])
|
|
||||||
id.all = append(id.all, str)
|
|
||||||
|
|
||||||
return str
|
|
||||||
}
|
|
||||||
|
|
||||||
// issued returns all ids that have been issued. This returned value shares the internal slice so it is not safe to modify the return.
|
|
||||||
// The value is only valid until the next time next() is called.
|
|
||||||
func (id *id) issued() []string {
|
|
||||||
return id.all
|
|
||||||
}
|
|
||||||
1
vendor/github.com/Azure/azure-storage-blob-go/azblob/common_utils.go
generated
vendored
1
vendor/github.com/Azure/azure-storage-blob-go/azblob/common_utils.go
generated
vendored
|
|
@ -1 +0,0 @@
|
||||||
package azblob
|
|
||||||
566
vendor/github.com/Azure/azure-storage-blob-go/azblob/highlevel.go
generated
vendored
566
vendor/github.com/Azure/azure-storage-blob-go/azblob/highlevel.go
generated
vendored
|
|
@ -1,566 +0,0 @@
|
||||||
package azblob
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"encoding/base64"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"net/http"
|
|
||||||
|
|
||||||
"bytes"
|
|
||||||
"os"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"errors"
|
|
||||||
|
|
||||||
"github.com/Azure/azure-pipeline-go/pipeline"
|
|
||||||
)
|
|
||||||
|
|
||||||
// CommonResponse returns the headers common to all blob REST API responses.
|
|
||||||
type CommonResponse interface {
|
|
||||||
// ETag returns the value for header ETag.
|
|
||||||
ETag() ETag
|
|
||||||
|
|
||||||
// LastModified returns the value for header Last-Modified.
|
|
||||||
LastModified() time.Time
|
|
||||||
|
|
||||||
// RequestID returns the value for header x-ms-request-id.
|
|
||||||
RequestID() string
|
|
||||||
|
|
||||||
// Date returns the value for header Date.
|
|
||||||
Date() time.Time
|
|
||||||
|
|
||||||
// Version returns the value for header x-ms-version.
|
|
||||||
Version() string
|
|
||||||
|
|
||||||
// Response returns the raw HTTP response object.
|
|
||||||
Response() *http.Response
|
|
||||||
}
|
|
||||||
|
|
||||||
// UploadToBlockBlobOptions identifies options used by the UploadBufferToBlockBlob and UploadFileToBlockBlob functions.
|
|
||||||
type UploadToBlockBlobOptions struct {
|
|
||||||
// BlockSize specifies the block size to use; the default (and maximum size) is BlockBlobMaxStageBlockBytes.
|
|
||||||
BlockSize int64
|
|
||||||
|
|
||||||
// Progress is a function that is invoked periodically as bytes are sent to the BlockBlobURL.
|
|
||||||
// Note that the progress reporting is not always increasing; it can go down when retrying a request.
|
|
||||||
Progress pipeline.ProgressReceiver
|
|
||||||
|
|
||||||
// BlobHTTPHeaders indicates the HTTP headers to be associated with the blob.
|
|
||||||
BlobHTTPHeaders BlobHTTPHeaders
|
|
||||||
|
|
||||||
// Metadata indicates the metadata to be associated with the blob when PutBlockList is called.
|
|
||||||
Metadata Metadata
|
|
||||||
|
|
||||||
// AccessConditions indicates the access conditions for the block blob.
|
|
||||||
AccessConditions BlobAccessConditions
|
|
||||||
|
|
||||||
// BlobAccessTier indicates the tier of blob
|
|
||||||
BlobAccessTier AccessTierType
|
|
||||||
|
|
||||||
// BlobTagsMap
|
|
||||||
BlobTagsMap BlobTagsMap
|
|
||||||
|
|
||||||
// ClientProvidedKeyOptions indicates the client provided key by name and/or by value to encrypt/decrypt data.
|
|
||||||
ClientProvidedKeyOptions ClientProvidedKeyOptions
|
|
||||||
|
|
||||||
// Parallelism indicates the maximum number of blocks to upload in parallel (0=default)
|
|
||||||
Parallelism uint16
|
|
||||||
}
|
|
||||||
|
|
||||||
// uploadReaderAtToBlockBlob uploads a buffer in blocks to a block blob.
|
|
||||||
func uploadReaderAtToBlockBlob(ctx context.Context, reader io.ReaderAt, readerSize int64,
|
|
||||||
blockBlobURL BlockBlobURL, o UploadToBlockBlobOptions) (CommonResponse, error) {
|
|
||||||
if o.BlockSize == 0 {
|
|
||||||
// If bufferSize > (BlockBlobMaxStageBlockBytes * BlockBlobMaxBlocks), then error
|
|
||||||
if readerSize > BlockBlobMaxStageBlockBytes*BlockBlobMaxBlocks {
|
|
||||||
return nil, errors.New("buffer is too large to upload to a block blob")
|
|
||||||
}
|
|
||||||
// If bufferSize <= BlockBlobMaxUploadBlobBytes, then Upload should be used with just 1 I/O request
|
|
||||||
if readerSize <= BlockBlobMaxUploadBlobBytes {
|
|
||||||
o.BlockSize = BlockBlobMaxUploadBlobBytes // Default if unspecified
|
|
||||||
} else {
|
|
||||||
o.BlockSize = readerSize / BlockBlobMaxBlocks // buffer / max blocks = block size to use all 50,000 blocks
|
|
||||||
if o.BlockSize < BlobDefaultDownloadBlockSize { // If the block size is smaller than 4MB, round up to 4MB
|
|
||||||
o.BlockSize = BlobDefaultDownloadBlockSize
|
|
||||||
}
|
|
||||||
// StageBlock will be called with blockSize blocks and a Parallelism of (BufferSize / BlockSize).
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if readerSize <= BlockBlobMaxUploadBlobBytes {
|
|
||||||
// If the size can fit in 1 Upload call, do it this way
|
|
||||||
var body io.ReadSeeker = io.NewSectionReader(reader, 0, readerSize)
|
|
||||||
if o.Progress != nil {
|
|
||||||
body = pipeline.NewRequestBodyProgress(body, o.Progress)
|
|
||||||
}
|
|
||||||
return blockBlobURL.Upload(ctx, body, o.BlobHTTPHeaders, o.Metadata, o.AccessConditions, o.BlobAccessTier, o.BlobTagsMap, o.ClientProvidedKeyOptions)
|
|
||||||
}
|
|
||||||
|
|
||||||
var numBlocks = uint16(((readerSize - 1) / o.BlockSize) + 1)
|
|
||||||
|
|
||||||
blockIDList := make([]string, numBlocks) // Base-64 encoded block IDs
|
|
||||||
progress := int64(0)
|
|
||||||
progressLock := &sync.Mutex{}
|
|
||||||
|
|
||||||
err := DoBatchTransfer(ctx, BatchTransferOptions{
|
|
||||||
OperationName: "uploadReaderAtToBlockBlob",
|
|
||||||
TransferSize: readerSize,
|
|
||||||
ChunkSize: o.BlockSize,
|
|
||||||
Parallelism: o.Parallelism,
|
|
||||||
Operation: func(offset int64, count int64, ctx context.Context) error {
|
|
||||||
// This function is called once per block.
|
|
||||||
// It is passed this block's offset within the buffer and its count of bytes
|
|
||||||
// Prepare to read the proper block/section of the buffer
|
|
||||||
var body io.ReadSeeker = io.NewSectionReader(reader, offset, count)
|
|
||||||
blockNum := offset / o.BlockSize
|
|
||||||
if o.Progress != nil {
|
|
||||||
blockProgress := int64(0)
|
|
||||||
body = pipeline.NewRequestBodyProgress(body,
|
|
||||||
func(bytesTransferred int64) {
|
|
||||||
diff := bytesTransferred - blockProgress
|
|
||||||
blockProgress = bytesTransferred
|
|
||||||
progressLock.Lock() // 1 goroutine at a time gets a progress report
|
|
||||||
progress += diff
|
|
||||||
o.Progress(progress)
|
|
||||||
progressLock.Unlock()
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Block IDs are unique values to avoid issue if 2+ clients are uploading blocks
|
|
||||||
// at the same time causing PutBlockList to get a mix of blocks from all the clients.
|
|
||||||
blockIDList[blockNum] = base64.StdEncoding.EncodeToString(newUUID().bytes())
|
|
||||||
_, err := blockBlobURL.StageBlock(ctx, blockIDList[blockNum], body, o.AccessConditions.LeaseAccessConditions, nil, o.ClientProvidedKeyOptions)
|
|
||||||
return err
|
|
||||||
},
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
// All put blocks were successful, call Put Block List to finalize the blob
|
|
||||||
return blockBlobURL.CommitBlockList(ctx, blockIDList, o.BlobHTTPHeaders, o.Metadata, o.AccessConditions, o.BlobAccessTier, o.BlobTagsMap, o.ClientProvidedKeyOptions)
|
|
||||||
}
|
|
||||||
|
|
||||||
// UploadBufferToBlockBlob uploads a buffer in blocks to a block blob.
|
|
||||||
func UploadBufferToBlockBlob(ctx context.Context, b []byte,
|
|
||||||
blockBlobURL BlockBlobURL, o UploadToBlockBlobOptions) (CommonResponse, error) {
|
|
||||||
return uploadReaderAtToBlockBlob(ctx, bytes.NewReader(b), int64(len(b)), blockBlobURL, o)
|
|
||||||
}
|
|
||||||
|
|
||||||
// UploadFileToBlockBlob uploads a file in blocks to a block blob.
|
|
||||||
func UploadFileToBlockBlob(ctx context.Context, file *os.File,
|
|
||||||
blockBlobURL BlockBlobURL, o UploadToBlockBlobOptions) (CommonResponse, error) {
|
|
||||||
|
|
||||||
stat, err := file.Stat()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return uploadReaderAtToBlockBlob(ctx, file, stat.Size(), blockBlobURL, o)
|
|
||||||
}
|
|
||||||
|
|
||||||
///////////////////////////////////////////////////////////////////////////////
|
|
||||||
|
|
||||||
const BlobDefaultDownloadBlockSize = int64(4 * 1024 * 1024) // 4MB
|
|
||||||
|
|
||||||
// DownloadFromBlobOptions identifies options used by the DownloadBlobToBuffer and DownloadBlobToFile functions.
|
|
||||||
type DownloadFromBlobOptions struct {
|
|
||||||
// BlockSize specifies the block size to use for each parallel download; the default size is BlobDefaultDownloadBlockSize.
|
|
||||||
BlockSize int64
|
|
||||||
|
|
||||||
// Progress is a function that is invoked periodically as bytes are received.
|
|
||||||
Progress pipeline.ProgressReceiver
|
|
||||||
|
|
||||||
// AccessConditions indicates the access conditions used when making HTTP GET requests against the blob.
|
|
||||||
AccessConditions BlobAccessConditions
|
|
||||||
|
|
||||||
// ClientProvidedKeyOptions indicates the client provided key by name and/or by value to encrypt/decrypt data.
|
|
||||||
ClientProvidedKeyOptions ClientProvidedKeyOptions
|
|
||||||
|
|
||||||
// Parallelism indicates the maximum number of blocks to download in parallel (0=default)
|
|
||||||
Parallelism uint16
|
|
||||||
|
|
||||||
// RetryReaderOptionsPerBlock is used when downloading each block.
|
|
||||||
RetryReaderOptionsPerBlock RetryReaderOptions
|
|
||||||
}
|
|
||||||
|
|
||||||
// downloadBlobToWriterAt downloads an Azure blob to a buffer with parallel.
|
|
||||||
func downloadBlobToWriterAt(ctx context.Context, blobURL BlobURL, offset int64, count int64,
|
|
||||||
writer io.WriterAt, o DownloadFromBlobOptions, initialDownloadResponse *DownloadResponse) error {
|
|
||||||
if o.BlockSize == 0 {
|
|
||||||
o.BlockSize = BlobDefaultDownloadBlockSize
|
|
||||||
}
|
|
||||||
|
|
||||||
if count == CountToEnd { // If size not specified, calculate it
|
|
||||||
if initialDownloadResponse != nil {
|
|
||||||
count = initialDownloadResponse.ContentLength() - offset // if we have the length, use it
|
|
||||||
} else {
|
|
||||||
// If we don't have the length at all, get it
|
|
||||||
dr, err := blobURL.Download(ctx, 0, CountToEnd, o.AccessConditions, false, o.ClientProvidedKeyOptions)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
count = dr.ContentLength() - offset
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if count <= 0 {
|
|
||||||
// The file is empty, there is nothing to download.
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Prepare and do parallel download.
|
|
||||||
progress := int64(0)
|
|
||||||
progressLock := &sync.Mutex{}
|
|
||||||
|
|
||||||
err := DoBatchTransfer(ctx, BatchTransferOptions{
|
|
||||||
OperationName: "downloadBlobToWriterAt",
|
|
||||||
TransferSize: count,
|
|
||||||
ChunkSize: o.BlockSize,
|
|
||||||
Parallelism: o.Parallelism,
|
|
||||||
Operation: func(chunkStart int64, count int64, ctx context.Context) error {
|
|
||||||
dr, err := blobURL.Download(ctx, chunkStart+offset, count, o.AccessConditions, false, o.ClientProvidedKeyOptions)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
body := dr.Body(o.RetryReaderOptionsPerBlock)
|
|
||||||
if o.Progress != nil {
|
|
||||||
rangeProgress := int64(0)
|
|
||||||
body = pipeline.NewResponseBodyProgress(
|
|
||||||
body,
|
|
||||||
func(bytesTransferred int64) {
|
|
||||||
diff := bytesTransferred - rangeProgress
|
|
||||||
rangeProgress = bytesTransferred
|
|
||||||
progressLock.Lock()
|
|
||||||
progress += diff
|
|
||||||
o.Progress(progress)
|
|
||||||
progressLock.Unlock()
|
|
||||||
})
|
|
||||||
}
|
|
||||||
_, err = io.Copy(newSectionWriter(writer, chunkStart, count), body)
|
|
||||||
body.Close()
|
|
||||||
return err
|
|
||||||
},
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// DownloadBlobToBuffer downloads an Azure blob to a buffer with parallel.
|
|
||||||
// Offset and count are optional, pass 0 for both to download the entire blob.
|
|
||||||
func DownloadBlobToBuffer(ctx context.Context, blobURL BlobURL, offset int64, count int64,
|
|
||||||
b []byte, o DownloadFromBlobOptions) error {
|
|
||||||
return downloadBlobToWriterAt(ctx, blobURL, offset, count, newBytesWriter(b), o, nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
// DownloadBlobToFile downloads an Azure blob to a local file.
|
|
||||||
// The file would be truncated if the size doesn't match.
|
|
||||||
// Offset and count are optional, pass 0 for both to download the entire blob.
|
|
||||||
func DownloadBlobToFile(ctx context.Context, blobURL BlobURL, offset int64, count int64,
|
|
||||||
file *os.File, o DownloadFromBlobOptions) error {
|
|
||||||
// 1. Calculate the size of the destination file
|
|
||||||
var size int64
|
|
||||||
|
|
||||||
if count == CountToEnd {
|
|
||||||
// Try to get Azure blob's size
|
|
||||||
props, err := blobURL.GetProperties(ctx, o.AccessConditions, o.ClientProvidedKeyOptions)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
size = props.ContentLength() - offset
|
|
||||||
} else {
|
|
||||||
size = count
|
|
||||||
}
|
|
||||||
|
|
||||||
// 2. Compare and try to resize local file's size if it doesn't match Azure blob's size.
|
|
||||||
stat, err := file.Stat()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if stat.Size() != size {
|
|
||||||
if err = file.Truncate(size); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if size > 0 {
|
|
||||||
return downloadBlobToWriterAt(ctx, blobURL, offset, size, file, o, nil)
|
|
||||||
} else { // if the blob's size is 0, there is no need in downloading it
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
///////////////////////////////////////////////////////////////////////////////
|
|
||||||
|
|
||||||
// BatchTransferOptions identifies options used by DoBatchTransfer.
|
|
||||||
type BatchTransferOptions struct {
|
|
||||||
TransferSize int64
|
|
||||||
ChunkSize int64
|
|
||||||
Parallelism uint16
|
|
||||||
Operation func(offset int64, chunkSize int64, ctx context.Context) error
|
|
||||||
OperationName string
|
|
||||||
}
|
|
||||||
|
|
||||||
// DoBatchTransfer helps to execute operations in a batch manner.
|
|
||||||
// Can be used by users to customize batch works (for other scenarios that the SDK does not provide)
|
|
||||||
func DoBatchTransfer(ctx context.Context, o BatchTransferOptions) error {
|
|
||||||
if o.ChunkSize == 0 {
|
|
||||||
return errors.New("ChunkSize cannot be 0")
|
|
||||||
}
|
|
||||||
|
|
||||||
if o.Parallelism == 0 {
|
|
||||||
o.Parallelism = 5 // default Parallelism
|
|
||||||
}
|
|
||||||
|
|
||||||
// Prepare and do parallel operations.
|
|
||||||
numChunks := uint16(((o.TransferSize - 1) / o.ChunkSize) + 1)
|
|
||||||
operationChannel := make(chan func() error, o.Parallelism) // Create the channel that release 'Parallelism' goroutines concurrently
|
|
||||||
operationResponseChannel := make(chan error, numChunks) // Holds each response
|
|
||||||
ctx, cancel := context.WithCancel(ctx)
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
// Create the goroutines that process each operation (in parallel).
|
|
||||||
for g := uint16(0); g < o.Parallelism; g++ {
|
|
||||||
//grIndex := g
|
|
||||||
go func() {
|
|
||||||
for f := range operationChannel {
|
|
||||||
err := f()
|
|
||||||
operationResponseChannel <- err
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add each chunk's operation to the channel.
|
|
||||||
for chunkNum := uint16(0); chunkNum < numChunks; chunkNum++ {
|
|
||||||
curChunkSize := o.ChunkSize
|
|
||||||
|
|
||||||
if chunkNum == numChunks-1 { // Last chunk
|
|
||||||
curChunkSize = o.TransferSize - (int64(chunkNum) * o.ChunkSize) // Remove size of all transferred chunks from total
|
|
||||||
}
|
|
||||||
offset := int64(chunkNum) * o.ChunkSize
|
|
||||||
|
|
||||||
operationChannel <- func() error {
|
|
||||||
return o.Operation(offset, curChunkSize, ctx)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
close(operationChannel)
|
|
||||||
|
|
||||||
// Wait for the operations to complete.
|
|
||||||
var firstErr error = nil
|
|
||||||
for chunkNum := uint16(0); chunkNum < numChunks; chunkNum++ {
|
|
||||||
responseError := <-operationResponseChannel
|
|
||||||
// record the first error (the original error which should cause the other chunks to fail with canceled context)
|
|
||||||
if responseError != nil && firstErr == nil {
|
|
||||||
cancel() // As soon as any operation fails, cancel all remaining operation calls
|
|
||||||
firstErr = responseError
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return firstErr
|
|
||||||
}
|
|
||||||
|
|
||||||
////////////////////////////////////////////////////////////////////////////////////////////////
|
|
||||||
|
|
||||||
// TransferManager provides a buffer and thread pool manager for certain transfer options.
|
|
||||||
// It is undefined behavior if code outside of this package call any of these methods.
|
|
||||||
type TransferManager interface {
|
|
||||||
// Get provides a buffer that will be used to read data into and write out to the stream.
|
|
||||||
// It is guaranteed by this package to not read or write beyond the size of the slice.
|
|
||||||
Get() []byte
|
|
||||||
// Put may or may not put the buffer into underlying storage, depending on settings.
|
|
||||||
// The buffer must not be touched after this has been called.
|
|
||||||
Put(b []byte)
|
|
||||||
// Run will use a goroutine pool entry to run a function. This blocks until a pool
|
|
||||||
// goroutine becomes available.
|
|
||||||
Run(func())
|
|
||||||
// Closes shuts down all internal goroutines. This must be called when the TransferManager
|
|
||||||
// will no longer be used. Not closing it will cause a goroutine leak.
|
|
||||||
Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
type staticBuffer struct {
|
|
||||||
buffers chan []byte
|
|
||||||
size int
|
|
||||||
threadpool chan func()
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewStaticBuffer creates a TransferManager that will use a channel as a circular buffer
|
|
||||||
// that can hold "max" buffers of "size". The goroutine pool is also sized at max. This
|
|
||||||
// can be shared between calls if you wish to control maximum memory and concurrency with
|
|
||||||
// multiple concurrent calls.
|
|
||||||
func NewStaticBuffer(size, max int) (TransferManager, error) {
|
|
||||||
if size < 1 || max < 1 {
|
|
||||||
return nil, fmt.Errorf("cannot be called with size or max set to < 1")
|
|
||||||
}
|
|
||||||
|
|
||||||
if size < _1MiB {
|
|
||||||
return nil, fmt.Errorf("cannot have size < 1MiB")
|
|
||||||
}
|
|
||||||
|
|
||||||
threadpool := make(chan func(), max)
|
|
||||||
buffers := make(chan []byte, max)
|
|
||||||
for i := 0; i < max; i++ {
|
|
||||||
go func() {
|
|
||||||
for f := range threadpool {
|
|
||||||
f()
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
buffers <- make([]byte, size)
|
|
||||||
}
|
|
||||||
return staticBuffer{
|
|
||||||
buffers: buffers,
|
|
||||||
size: size,
|
|
||||||
threadpool: threadpool,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get implements TransferManager.Get().
|
|
||||||
func (s staticBuffer) Get() []byte {
|
|
||||||
return <-s.buffers
|
|
||||||
}
|
|
||||||
|
|
||||||
// Put implements TransferManager.Put().
|
|
||||||
func (s staticBuffer) Put(b []byte) {
|
|
||||||
select {
|
|
||||||
case s.buffers <- b:
|
|
||||||
default: // This shouldn't happen, but just in case they call Put() with there own buffer.
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Run implements TransferManager.Run().
|
|
||||||
func (s staticBuffer) Run(f func()) {
|
|
||||||
s.threadpool <- f
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close implements TransferManager.Close().
|
|
||||||
func (s staticBuffer) Close() {
|
|
||||||
close(s.threadpool)
|
|
||||||
close(s.buffers)
|
|
||||||
}
|
|
||||||
|
|
||||||
type syncPool struct {
|
|
||||||
threadpool chan func()
|
|
||||||
pool sync.Pool
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewSyncPool creates a TransferManager that will use a sync.Pool
|
|
||||||
// that can hold a non-capped number of buffers constrained by concurrency. This
|
|
||||||
// can be shared between calls if you wish to share memory and concurrency.
|
|
||||||
func NewSyncPool(size, concurrency int) (TransferManager, error) {
|
|
||||||
if size < 1 || concurrency < 1 {
|
|
||||||
return nil, fmt.Errorf("cannot be called with size or max set to < 1")
|
|
||||||
}
|
|
||||||
|
|
||||||
if size < _1MiB {
|
|
||||||
return nil, fmt.Errorf("cannot have size < 1MiB")
|
|
||||||
}
|
|
||||||
|
|
||||||
threadpool := make(chan func(), concurrency)
|
|
||||||
for i := 0; i < concurrency; i++ {
|
|
||||||
go func() {
|
|
||||||
for f := range threadpool {
|
|
||||||
f()
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
|
|
||||||
return &syncPool{
|
|
||||||
threadpool: threadpool,
|
|
||||||
pool: sync.Pool{
|
|
||||||
New: func() interface{} {
|
|
||||||
return make([]byte, size)
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get implements TransferManager.Get().
|
|
||||||
func (s *syncPool) Get() []byte {
|
|
||||||
return s.pool.Get().([]byte)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Put implements TransferManager.Put().
|
|
||||||
func (s *syncPool) Put(b []byte) {
|
|
||||||
s.pool.Put(b)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Run implements TransferManager.Run().
|
|
||||||
func (s *syncPool) Run(f func()) {
|
|
||||||
s.threadpool <- f
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close implements TransferManager.Close().
|
|
||||||
func (s *syncPool) Close() {
|
|
||||||
close(s.threadpool)
|
|
||||||
}
|
|
||||||
|
|
||||||
const _1MiB = 1024 * 1024
|
|
||||||
|
|
||||||
// UploadStreamToBlockBlobOptions is options for UploadStreamToBlockBlob.
|
|
||||||
type UploadStreamToBlockBlobOptions struct {
|
|
||||||
// TransferManager provides a TransferManager that controls buffer allocation/reuse and
|
|
||||||
// concurrency. This overrides BufferSize and MaxBuffers if set.
|
|
||||||
TransferManager TransferManager
|
|
||||||
transferMangerNotSet bool
|
|
||||||
// BufferSize sizes the buffer used to read data from source. If < 1 MiB, defaults to 1 MiB.
|
|
||||||
BufferSize int
|
|
||||||
// MaxBuffers defines the number of simultaneous uploads will be performed to upload the file.
|
|
||||||
MaxBuffers int
|
|
||||||
BlobHTTPHeaders BlobHTTPHeaders
|
|
||||||
Metadata Metadata
|
|
||||||
AccessConditions BlobAccessConditions
|
|
||||||
BlobAccessTier AccessTierType
|
|
||||||
BlobTagsMap BlobTagsMap
|
|
||||||
ClientProvidedKeyOptions ClientProvidedKeyOptions
|
|
||||||
}
|
|
||||||
|
|
||||||
func (u *UploadStreamToBlockBlobOptions) defaults() error {
|
|
||||||
if u.TransferManager != nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if u.MaxBuffers == 0 {
|
|
||||||
u.MaxBuffers = 1
|
|
||||||
}
|
|
||||||
|
|
||||||
if u.BufferSize < _1MiB {
|
|
||||||
u.BufferSize = _1MiB
|
|
||||||
}
|
|
||||||
|
|
||||||
var err error
|
|
||||||
u.TransferManager, err = NewStaticBuffer(u.BufferSize, u.MaxBuffers)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("bug: default transfer manager could not be created: %s", err)
|
|
||||||
}
|
|
||||||
u.transferMangerNotSet = true
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// UploadStreamToBlockBlob copies the file held in io.Reader to the Blob at blockBlobURL.
|
|
||||||
// A Context deadline or cancellation will cause this to error.
|
|
||||||
func UploadStreamToBlockBlob(ctx context.Context, reader io.Reader, blockBlobURL BlockBlobURL, o UploadStreamToBlockBlobOptions) (CommonResponse, error) {
|
|
||||||
if err := o.defaults(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// If we used the default manager, we need to close it.
|
|
||||||
if o.transferMangerNotSet {
|
|
||||||
defer o.TransferManager.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
result, err := copyFromReader(ctx, reader, blockBlobURL, o)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return result, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// UploadStreamOptions (defunct) was used internally. This will be removed or made private in a future version.
|
|
||||||
// TODO: Remove on next minor release in v0 or before v1.
|
|
||||||
type UploadStreamOptions struct {
|
|
||||||
BufferSize int
|
|
||||||
MaxBuffers int
|
|
||||||
}
|
|
||||||
172
vendor/github.com/Azure/azure-storage-blob-go/azblob/parsing_urls.go
generated
vendored
172
vendor/github.com/Azure/azure-storage-blob-go/azblob/parsing_urls.go
generated
vendored
|
|
@ -1,172 +0,0 @@
|
||||||
package azblob
|
|
||||||
|
|
||||||
import (
|
|
||||||
"net"
|
|
||||||
"net/url"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
snapshot = "snapshot"
|
|
||||||
versionId = "versionid"
|
|
||||||
SnapshotTimeFormat = "2006-01-02T15:04:05.0000000Z07:00"
|
|
||||||
)
|
|
||||||
|
|
||||||
// A BlobURLParts object represents the components that make up an Azure Storage Container/Blob URL. You parse an
|
|
||||||
// existing URL into its parts by calling NewBlobURLParts(). You construct a URL from parts by calling URL().
|
|
||||||
// NOTE: Changing any SAS-related field requires computing a new SAS signature.
|
|
||||||
type BlobURLParts struct {
|
|
||||||
Scheme string // Ex: "https://"
|
|
||||||
Host string // Ex: "account.blob.core.windows.net", "10.132.141.33", "10.132.141.33:80"
|
|
||||||
IPEndpointStyleInfo IPEndpointStyleInfo
|
|
||||||
ContainerName string // "" if no container
|
|
||||||
BlobName string // "" if no blob
|
|
||||||
Snapshot string // "" if not a snapshot
|
|
||||||
SAS SASQueryParameters
|
|
||||||
UnparsedParams string
|
|
||||||
VersionID string // "" if not versioning enabled
|
|
||||||
}
|
|
||||||
|
|
||||||
// IPEndpointStyleInfo is used for IP endpoint style URL when working with Azure storage emulator.
|
|
||||||
// Ex: "https://10.132.141.33/accountname/containername"
|
|
||||||
type IPEndpointStyleInfo struct {
|
|
||||||
AccountName string // "" if not using IP endpoint style
|
|
||||||
}
|
|
||||||
|
|
||||||
// isIPEndpointStyle checkes if URL's host is IP, in this case the storage account endpoint will be composed as:
|
|
||||||
// http(s)://IP(:port)/storageaccount/container/...
|
|
||||||
// As url's Host property, host could be both host or host:port
|
|
||||||
func isIPEndpointStyle(host string) bool {
|
|
||||||
if host == "" {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if h, _, err := net.SplitHostPort(host); err == nil {
|
|
||||||
host = h
|
|
||||||
}
|
|
||||||
// For IPv6, there could be case where SplitHostPort fails for cannot finding port.
|
|
||||||
// In this case, eliminate the '[' and ']' in the URL.
|
|
||||||
// For details about IPv6 URL, please refer to https://tools.ietf.org/html/rfc2732
|
|
||||||
if host[0] == '[' && host[len(host)-1] == ']' {
|
|
||||||
host = host[1 : len(host)-1]
|
|
||||||
}
|
|
||||||
return net.ParseIP(host) != nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewBlobURLParts parses a URL initializing BlobURLParts' fields including any SAS-related & snapshot query parameters. Any other
|
|
||||||
// query parameters remain in the UnparsedParams field. This method overwrites all fields in the BlobURLParts object.
|
|
||||||
func NewBlobURLParts(u url.URL) BlobURLParts {
|
|
||||||
up := BlobURLParts{
|
|
||||||
Scheme: u.Scheme,
|
|
||||||
Host: u.Host,
|
|
||||||
}
|
|
||||||
|
|
||||||
// Find the container & blob names (if any)
|
|
||||||
if u.Path != "" {
|
|
||||||
path := u.Path
|
|
||||||
if path[0] == '/' {
|
|
||||||
path = path[1:] // If path starts with a slash, remove it
|
|
||||||
}
|
|
||||||
if isIPEndpointStyle(up.Host) {
|
|
||||||
if accountEndIndex := strings.Index(path, "/"); accountEndIndex == -1 { // Slash not found; path has account name & no container name or blob
|
|
||||||
up.IPEndpointStyleInfo.AccountName = path
|
|
||||||
} else {
|
|
||||||
up.IPEndpointStyleInfo.AccountName = path[:accountEndIndex] // The account name is the part between the slashes
|
|
||||||
path = path[accountEndIndex+1:] // path refers to portion after the account name now (container & blob names)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
containerEndIndex := strings.Index(path, "/") // Find the next slash (if it exists)
|
|
||||||
if containerEndIndex == -1 { // Slash not found; path has container name & no blob name
|
|
||||||
up.ContainerName = path
|
|
||||||
} else {
|
|
||||||
up.ContainerName = path[:containerEndIndex] // The container name is the part between the slashes
|
|
||||||
up.BlobName = path[containerEndIndex+1:] // The blob name is after the container slash
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Convert the query parameters to a case-sensitive map & trim whitespace
|
|
||||||
paramsMap := u.Query()
|
|
||||||
|
|
||||||
up.Snapshot = "" // Assume no snapshot
|
|
||||||
up.VersionID = "" // Assume no versionID
|
|
||||||
if snapshotStr, ok := caseInsensitiveValues(paramsMap).Get(snapshot); ok {
|
|
||||||
up.Snapshot = snapshotStr[0]
|
|
||||||
// If we recognized the query parameter, remove it from the map
|
|
||||||
delete(paramsMap, snapshot)
|
|
||||||
}
|
|
||||||
|
|
||||||
if versionIDs, ok := caseInsensitiveValues(paramsMap).Get(versionId); ok {
|
|
||||||
up.VersionID = versionIDs[0]
|
|
||||||
// If we recognized the query parameter, remove it from the map
|
|
||||||
delete(paramsMap, versionId) // delete "versionid" from paramsMap
|
|
||||||
delete(paramsMap, "versionId") // delete "versionId" from paramsMap
|
|
||||||
}
|
|
||||||
up.SAS = newSASQueryParameters(paramsMap, true)
|
|
||||||
up.UnparsedParams = paramsMap.Encode()
|
|
||||||
return up
|
|
||||||
}
|
|
||||||
|
|
||||||
type caseInsensitiveValues url.Values // map[string][]string
|
|
||||||
func (values caseInsensitiveValues) Get(key string) ([]string, bool) {
|
|
||||||
key = strings.ToLower(key)
|
|
||||||
for k, v := range values {
|
|
||||||
if strings.ToLower(k) == key {
|
|
||||||
return v, true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return []string{}, false
|
|
||||||
}
|
|
||||||
|
|
||||||
// URL returns a URL object whose fields are initialized from the BlobURLParts fields. The URL's RawQuery
|
|
||||||
// field contains the SAS, snapshot, and unparsed query parameters.
|
|
||||||
func (up BlobURLParts) URL() url.URL {
|
|
||||||
path := ""
|
|
||||||
if isIPEndpointStyle(up.Host) && up.IPEndpointStyleInfo.AccountName != "" {
|
|
||||||
path += "/" + up.IPEndpointStyleInfo.AccountName
|
|
||||||
}
|
|
||||||
// Concatenate container & blob names (if they exist)
|
|
||||||
if up.ContainerName != "" {
|
|
||||||
path += "/" + up.ContainerName
|
|
||||||
if up.BlobName != "" {
|
|
||||||
path += "/" + up.BlobName
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
rawQuery := up.UnparsedParams
|
|
||||||
|
|
||||||
//If no snapshot is initially provided, fill it in from the SAS query properties to help the user
|
|
||||||
if up.Snapshot == "" && !up.SAS.snapshotTime.IsZero() {
|
|
||||||
up.Snapshot = up.SAS.snapshotTime.Format(SnapshotTimeFormat)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Concatenate blob snapshot query parameter (if it exists)
|
|
||||||
if up.Snapshot != "" {
|
|
||||||
if len(rawQuery) > 0 {
|
|
||||||
rawQuery += "&"
|
|
||||||
}
|
|
||||||
rawQuery += snapshot + "=" + up.Snapshot
|
|
||||||
}
|
|
||||||
|
|
||||||
// Concatenate blob version id query parameter (if it exists)
|
|
||||||
if up.VersionID != "" {
|
|
||||||
if len(rawQuery) > 0 {
|
|
||||||
rawQuery += "&"
|
|
||||||
}
|
|
||||||
rawQuery += versionId + "=" + up.VersionID
|
|
||||||
}
|
|
||||||
|
|
||||||
sas := up.SAS.Encode()
|
|
||||||
if sas != "" {
|
|
||||||
if len(rawQuery) > 0 {
|
|
||||||
rawQuery += "&"
|
|
||||||
}
|
|
||||||
rawQuery += sas
|
|
||||||
}
|
|
||||||
u := url.URL{
|
|
||||||
Scheme: up.Scheme,
|
|
||||||
Host: up.Host,
|
|
||||||
Path: path,
|
|
||||||
RawQuery: rawQuery,
|
|
||||||
}
|
|
||||||
return u
|
|
||||||
}
|
|
||||||
33
vendor/github.com/Azure/azure-storage-blob-go/azblob/request_common.go
generated
vendored
33
vendor/github.com/Azure/azure-storage-blob-go/azblob/request_common.go
generated
vendored
|
|
@ -1,33 +0,0 @@
|
||||||
package azblob
|
|
||||||
|
|
||||||
// ClientProvidedKeyOptions contains headers which may be be specified from service version 2019-02-02
|
|
||||||
// or higher to encrypts the data on the service-side with the given key. Use of customer-provided keys
|
|
||||||
// must be done over HTTPS. As the encryption key itself is provided in the request, a secure connection
|
|
||||||
// must be established to transfer the key.
|
|
||||||
// Note: Azure Storage does not store or manage customer provided encryption keys. Keys are securely discarded
|
|
||||||
// as soon as possible after they’ve been used to encrypt or decrypt the blob data.
|
|
||||||
// https://docs.microsoft.com/en-us/azure/storage/common/storage-service-encryption
|
|
||||||
// https://docs.microsoft.com/en-us/azure/storage/common/customer-managed-keys-overview
|
|
||||||
type ClientProvidedKeyOptions struct {
|
|
||||||
// A Base64-encoded AES-256 encryption key value.
|
|
||||||
EncryptionKey *string
|
|
||||||
|
|
||||||
// The Base64-encoded SHA256 of the encryption key.
|
|
||||||
EncryptionKeySha256 *string
|
|
||||||
|
|
||||||
// Specifies the algorithm to use when encrypting data using the given key. Must be AES256.
|
|
||||||
EncryptionAlgorithm EncryptionAlgorithmType
|
|
||||||
|
|
||||||
// Specifies the name of the encryption scope to use to encrypt the data provided in the request
|
|
||||||
// https://docs.microsoft.com/en-us/azure/storage/blobs/encryption-scope-overview
|
|
||||||
// https://docs.microsoft.com/en-us/azure/key-vault/general/overview
|
|
||||||
EncryptionScope *string
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewClientProvidedKeyOptions function.
|
|
||||||
// By default the value of encryption algorithm params is "AES256" for service version 2019-02-02 or higher.
|
|
||||||
func NewClientProvidedKeyOptions(ek *string, eksha256 *string, es *string) (cpk ClientProvidedKeyOptions) {
|
|
||||||
cpk = ClientProvidedKeyOptions{}
|
|
||||||
cpk.EncryptionKey, cpk.EncryptionKeySha256, cpk.EncryptionAlgorithm, cpk.EncryptionScope = ek, eksha256, EncryptionAlgorithmAES256, es
|
|
||||||
return cpk
|
|
||||||
}
|
|
||||||
284
vendor/github.com/Azure/azure-storage-blob-go/azblob/sas_service.go
generated
vendored
284
vendor/github.com/Azure/azure-storage-blob-go/azblob/sas_service.go
generated
vendored
|
|
@ -1,284 +0,0 @@
|
||||||
package azblob
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"fmt"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// BlobSASSignatureValues is used to generate a Shared Access Signature (SAS) for an Azure Storage container or blob.
|
|
||||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/constructing-a-service-sas
|
|
||||||
type BlobSASSignatureValues struct {
|
|
||||||
Version string `param:"sv"` // If not specified, this defaults to SASVersion
|
|
||||||
Protocol SASProtocol `param:"spr"` // See the SASProtocol* constants
|
|
||||||
StartTime time.Time `param:"st"` // Not specified if IsZero
|
|
||||||
ExpiryTime time.Time `param:"se"` // Not specified if IsZero
|
|
||||||
SnapshotTime time.Time
|
|
||||||
Permissions string `param:"sp"` // Create by initializing a ContainerSASPermissions or BlobSASPermissions and then call String()
|
|
||||||
IPRange IPRange `param:"sip"`
|
|
||||||
Identifier string `param:"si"`
|
|
||||||
ContainerName string
|
|
||||||
BlobName string // Use "" to create a Container SAS
|
|
||||||
CacheControl string // rscc
|
|
||||||
ContentDisposition string // rscd
|
|
||||||
ContentEncoding string // rsce
|
|
||||||
ContentLanguage string // rscl
|
|
||||||
ContentType string // rsct
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewSASQueryParameters uses an account's StorageAccountCredential to sign this signature values to produce
|
|
||||||
// the proper SAS query parameters.
|
|
||||||
// See: StorageAccountCredential. Compatible with both UserDelegationCredential and SharedKeyCredential
|
|
||||||
func (v BlobSASSignatureValues) NewSASQueryParameters(credential StorageAccountCredential) (SASQueryParameters, error) {
|
|
||||||
resource := "c"
|
|
||||||
if credential == nil {
|
|
||||||
return SASQueryParameters{}, fmt.Errorf("cannot sign SAS query without StorageAccountCredential")
|
|
||||||
}
|
|
||||||
|
|
||||||
if !v.SnapshotTime.IsZero() {
|
|
||||||
resource = "bs"
|
|
||||||
//Make sure the permission characters are in the correct order
|
|
||||||
perms := &BlobSASPermissions{}
|
|
||||||
if err := perms.Parse(v.Permissions); err != nil {
|
|
||||||
return SASQueryParameters{}, err
|
|
||||||
}
|
|
||||||
v.Permissions = perms.String()
|
|
||||||
} else if v.Version != "" {
|
|
||||||
resource = "bv"
|
|
||||||
//Make sure the permission characters are in the correct order
|
|
||||||
perms := &BlobSASPermissions{}
|
|
||||||
if err := perms.Parse(v.Permissions); err != nil {
|
|
||||||
return SASQueryParameters{}, err
|
|
||||||
}
|
|
||||||
v.Permissions = perms.String()
|
|
||||||
} else if v.BlobName == "" {
|
|
||||||
// Make sure the permission characters are in the correct order
|
|
||||||
perms := &ContainerSASPermissions{}
|
|
||||||
if err := perms.Parse(v.Permissions); err != nil {
|
|
||||||
return SASQueryParameters{}, err
|
|
||||||
}
|
|
||||||
v.Permissions = perms.String()
|
|
||||||
} else {
|
|
||||||
resource = "b"
|
|
||||||
// Make sure the permission characters are in the correct order
|
|
||||||
perms := &BlobSASPermissions{}
|
|
||||||
if err := perms.Parse(v.Permissions); err != nil {
|
|
||||||
return SASQueryParameters{}, err
|
|
||||||
}
|
|
||||||
v.Permissions = perms.String()
|
|
||||||
}
|
|
||||||
if v.Version == "" {
|
|
||||||
v.Version = SASVersion
|
|
||||||
}
|
|
||||||
startTime, expiryTime, snapshotTime := FormatTimesForSASSigning(v.StartTime, v.ExpiryTime, v.SnapshotTime)
|
|
||||||
|
|
||||||
signedIdentifier := v.Identifier
|
|
||||||
|
|
||||||
udk := credential.getUDKParams()
|
|
||||||
|
|
||||||
if udk != nil {
|
|
||||||
udkStart, udkExpiry, _ := FormatTimesForSASSigning(udk.SignedStart, udk.SignedExpiry, time.Time{})
|
|
||||||
//I don't like this answer to combining the functions
|
|
||||||
//But because signedIdentifier and the user delegation key strings share a place, this is an _OK_ way to do it.
|
|
||||||
signedIdentifier = strings.Join([]string{
|
|
||||||
udk.SignedOid,
|
|
||||||
udk.SignedTid,
|
|
||||||
udkStart,
|
|
||||||
udkExpiry,
|
|
||||||
udk.SignedService,
|
|
||||||
udk.SignedVersion,
|
|
||||||
}, "\n")
|
|
||||||
}
|
|
||||||
|
|
||||||
// String to sign: http://msdn.microsoft.com/en-us/library/azure/dn140255.aspx
|
|
||||||
stringToSign := strings.Join([]string{
|
|
||||||
v.Permissions,
|
|
||||||
startTime,
|
|
||||||
expiryTime,
|
|
||||||
getCanonicalName(credential.AccountName(), v.ContainerName, v.BlobName),
|
|
||||||
signedIdentifier,
|
|
||||||
v.IPRange.String(),
|
|
||||||
string(v.Protocol),
|
|
||||||
v.Version,
|
|
||||||
resource,
|
|
||||||
snapshotTime, // signed timestamp
|
|
||||||
v.CacheControl, // rscc
|
|
||||||
v.ContentDisposition, // rscd
|
|
||||||
v.ContentEncoding, // rsce
|
|
||||||
v.ContentLanguage, // rscl
|
|
||||||
v.ContentType}, // rsct
|
|
||||||
"\n")
|
|
||||||
|
|
||||||
signature := ""
|
|
||||||
signature = credential.ComputeHMACSHA256(stringToSign)
|
|
||||||
|
|
||||||
p := SASQueryParameters{
|
|
||||||
// Common SAS parameters
|
|
||||||
version: v.Version,
|
|
||||||
protocol: v.Protocol,
|
|
||||||
startTime: v.StartTime,
|
|
||||||
expiryTime: v.ExpiryTime,
|
|
||||||
permissions: v.Permissions,
|
|
||||||
ipRange: v.IPRange,
|
|
||||||
|
|
||||||
// Container/Blob-specific SAS parameters
|
|
||||||
resource: resource,
|
|
||||||
identifier: v.Identifier,
|
|
||||||
cacheControl: v.CacheControl,
|
|
||||||
contentDisposition: v.ContentDisposition,
|
|
||||||
contentEncoding: v.ContentEncoding,
|
|
||||||
contentLanguage: v.ContentLanguage,
|
|
||||||
contentType: v.ContentType,
|
|
||||||
snapshotTime: v.SnapshotTime,
|
|
||||||
|
|
||||||
// Calculated SAS signature
|
|
||||||
signature: signature,
|
|
||||||
}
|
|
||||||
|
|
||||||
//User delegation SAS specific parameters
|
|
||||||
if udk != nil {
|
|
||||||
p.signedOid = udk.SignedOid
|
|
||||||
p.signedTid = udk.SignedTid
|
|
||||||
p.signedStart = udk.SignedStart
|
|
||||||
p.signedExpiry = udk.SignedExpiry
|
|
||||||
p.signedService = udk.SignedService
|
|
||||||
p.signedVersion = udk.SignedVersion
|
|
||||||
}
|
|
||||||
|
|
||||||
return p, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// getCanonicalName computes the canonical name for a container or blob resource for SAS signing.
|
|
||||||
func getCanonicalName(account string, containerName string, blobName string) string {
|
|
||||||
// Container: "/blob/account/containername"
|
|
||||||
// Blob: "/blob/account/containername/blobname"
|
|
||||||
elements := []string{"/blob/", account, "/", containerName}
|
|
||||||
if blobName != "" {
|
|
||||||
elements = append(elements, "/", strings.Replace(blobName, "\\", "/", -1))
|
|
||||||
}
|
|
||||||
return strings.Join(elements, "")
|
|
||||||
}
|
|
||||||
|
|
||||||
// The ContainerSASPermissions type simplifies creating the permissions string for an Azure Storage container SAS.
|
|
||||||
// Initialize an instance of this type and then call its String method to set BlobSASSignatureValues's Permissions field.
|
|
||||||
type ContainerSASPermissions struct {
|
|
||||||
Read, Add, Create, Write, Delete, DeletePreviousVersion, List, Tag bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// String produces the SAS permissions string for an Azure Storage container.
|
|
||||||
// Call this method to set BlobSASSignatureValues's Permissions field.
|
|
||||||
func (p ContainerSASPermissions) String() string {
|
|
||||||
var b bytes.Buffer
|
|
||||||
if p.Read {
|
|
||||||
b.WriteRune('r')
|
|
||||||
}
|
|
||||||
if p.Add {
|
|
||||||
b.WriteRune('a')
|
|
||||||
}
|
|
||||||
if p.Create {
|
|
||||||
b.WriteRune('c')
|
|
||||||
}
|
|
||||||
if p.Write {
|
|
||||||
b.WriteRune('w')
|
|
||||||
}
|
|
||||||
if p.Delete {
|
|
||||||
b.WriteRune('d')
|
|
||||||
}
|
|
||||||
if p.DeletePreviousVersion {
|
|
||||||
b.WriteRune('x')
|
|
||||||
}
|
|
||||||
if p.List {
|
|
||||||
b.WriteRune('l')
|
|
||||||
}
|
|
||||||
if p.Tag {
|
|
||||||
b.WriteRune('t')
|
|
||||||
}
|
|
||||||
return b.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parse initializes the ContainerSASPermissions's fields from a string.
|
|
||||||
func (p *ContainerSASPermissions) Parse(s string) error {
|
|
||||||
*p = ContainerSASPermissions{} // Clear the flags
|
|
||||||
for _, r := range s {
|
|
||||||
switch r {
|
|
||||||
case 'r':
|
|
||||||
p.Read = true
|
|
||||||
case 'a':
|
|
||||||
p.Add = true
|
|
||||||
case 'c':
|
|
||||||
p.Create = true
|
|
||||||
case 'w':
|
|
||||||
p.Write = true
|
|
||||||
case 'd':
|
|
||||||
p.Delete = true
|
|
||||||
case 'x':
|
|
||||||
p.DeletePreviousVersion = true
|
|
||||||
case 'l':
|
|
||||||
p.List = true
|
|
||||||
case 't':
|
|
||||||
p.Tag = true
|
|
||||||
default:
|
|
||||||
return fmt.Errorf("invalid permission: '%v'", r)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// The BlobSASPermissions type simplifies creating the permissions string for an Azure Storage blob SAS.
|
|
||||||
// Initialize an instance of this type and then call its String method to set BlobSASSignatureValues's Permissions field.
|
|
||||||
type BlobSASPermissions struct{ Read, Add, Create, Write, Delete, DeletePreviousVersion, Tag bool }
|
|
||||||
|
|
||||||
// String produces the SAS permissions string for an Azure Storage blob.
|
|
||||||
// Call this method to set BlobSASSignatureValues's Permissions field.
|
|
||||||
func (p BlobSASPermissions) String() string {
|
|
||||||
var b bytes.Buffer
|
|
||||||
if p.Read {
|
|
||||||
b.WriteRune('r')
|
|
||||||
}
|
|
||||||
if p.Add {
|
|
||||||
b.WriteRune('a')
|
|
||||||
}
|
|
||||||
if p.Create {
|
|
||||||
b.WriteRune('c')
|
|
||||||
}
|
|
||||||
if p.Write {
|
|
||||||
b.WriteRune('w')
|
|
||||||
}
|
|
||||||
if p.Delete {
|
|
||||||
b.WriteRune('d')
|
|
||||||
}
|
|
||||||
if p.DeletePreviousVersion {
|
|
||||||
b.WriteRune('x')
|
|
||||||
}
|
|
||||||
if p.Tag {
|
|
||||||
b.WriteRune('t')
|
|
||||||
}
|
|
||||||
return b.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parse initializes the BlobSASPermissions's fields from a string.
|
|
||||||
func (p *BlobSASPermissions) Parse(s string) error {
|
|
||||||
*p = BlobSASPermissions{} // Clear the flags
|
|
||||||
for _, r := range s {
|
|
||||||
switch r {
|
|
||||||
case 'r':
|
|
||||||
p.Read = true
|
|
||||||
case 'a':
|
|
||||||
p.Add = true
|
|
||||||
case 'c':
|
|
||||||
p.Create = true
|
|
||||||
case 'w':
|
|
||||||
p.Write = true
|
|
||||||
case 'd':
|
|
||||||
p.Delete = true
|
|
||||||
case 'x':
|
|
||||||
p.DeletePreviousVersion = true
|
|
||||||
case 't':
|
|
||||||
p.Tag = true
|
|
||||||
default:
|
|
||||||
return fmt.Errorf("invalid permission: '%v'", r)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
47
vendor/github.com/Azure/azure-storage-blob-go/azblob/section_writer.go
generated
vendored
47
vendor/github.com/Azure/azure-storage-blob-go/azblob/section_writer.go
generated
vendored
|
|
@ -1,47 +0,0 @@
|
||||||
package azblob
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"io"
|
|
||||||
)
|
|
||||||
|
|
||||||
type sectionWriter struct {
|
|
||||||
count int64
|
|
||||||
offset int64
|
|
||||||
position int64
|
|
||||||
writerAt io.WriterAt
|
|
||||||
}
|
|
||||||
|
|
||||||
func newSectionWriter(c io.WriterAt, off int64, count int64) *sectionWriter {
|
|
||||||
return §ionWriter{
|
|
||||||
count: count,
|
|
||||||
offset: off,
|
|
||||||
writerAt: c,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *sectionWriter) Write(p []byte) (int, error) {
|
|
||||||
remaining := c.count - c.position
|
|
||||||
|
|
||||||
if remaining <= 0 {
|
|
||||||
return 0, errors.New("End of section reached")
|
|
||||||
}
|
|
||||||
|
|
||||||
slice := p
|
|
||||||
|
|
||||||
if int64(len(slice)) > remaining {
|
|
||||||
slice = slice[:remaining]
|
|
||||||
}
|
|
||||||
|
|
||||||
n, err := c.writerAt.WriteAt(slice, c.offset+c.position)
|
|
||||||
c.position += int64(n)
|
|
||||||
if err != nil {
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(p) > n {
|
|
||||||
return n, errors.New("Not enough space for all bytes")
|
|
||||||
}
|
|
||||||
|
|
||||||
return n, nil
|
|
||||||
}
|
|
||||||
198
vendor/github.com/Azure/azure-storage-blob-go/azblob/service_codes_blob.go
generated
vendored
198
vendor/github.com/Azure/azure-storage-blob-go/azblob/service_codes_blob.go
generated
vendored
|
|
@ -1,198 +0,0 @@
|
||||||
package azblob
|
|
||||||
|
|
||||||
// https://docs.microsoft.com/en-us/rest/api/storageservices/blob-service-error-codes
|
|
||||||
|
|
||||||
// ServiceCode values indicate a service failure.
|
|
||||||
const (
|
|
||||||
// ServiceCodeAppendPositionConditionNotMet means the append position condition specified was not met.
|
|
||||||
ServiceCodeAppendPositionConditionNotMet ServiceCodeType = "AppendPositionConditionNotMet"
|
|
||||||
|
|
||||||
// ServiceCodeBlobAlreadyExists means the specified blob already exists.
|
|
||||||
ServiceCodeBlobAlreadyExists ServiceCodeType = "BlobAlreadyExists"
|
|
||||||
|
|
||||||
// ServiceCodeBlobNotFound means the specified blob does not exist.
|
|
||||||
ServiceCodeBlobNotFound ServiceCodeType = "BlobNotFound"
|
|
||||||
|
|
||||||
// ServiceCodeBlobOverwritten means the blob has been recreated since the previous snapshot was taken.
|
|
||||||
ServiceCodeBlobOverwritten ServiceCodeType = "BlobOverwritten"
|
|
||||||
|
|
||||||
// ServiceCodeBlobTierInadequateForContentLength means the specified blob tier size limit cannot be less than content length.
|
|
||||||
ServiceCodeBlobTierInadequateForContentLength ServiceCodeType = "BlobTierInadequateForContentLength"
|
|
||||||
|
|
||||||
// ServiceCodeBlockCountExceedsLimit means the committed block count cannot exceed the maximum limit of 50,000 blocks
|
|
||||||
// or that the uncommitted block count cannot exceed the maximum limit of 100,000 blocks.
|
|
||||||
ServiceCodeBlockCountExceedsLimit ServiceCodeType = "BlockCountExceedsLimit"
|
|
||||||
|
|
||||||
// ServiceCodeBlockListTooLong means the block list may not contain more than 50,000 blocks.
|
|
||||||
ServiceCodeBlockListTooLong ServiceCodeType = "BlockListTooLong"
|
|
||||||
|
|
||||||
// ServiceCodeCannotChangeToLowerTier means that a higher blob tier has already been explicitly set.
|
|
||||||
ServiceCodeCannotChangeToLowerTier ServiceCodeType = "CannotChangeToLowerTier"
|
|
||||||
|
|
||||||
// ServiceCodeCannotVerifyCopySource means that the service could not verify the copy source within the specified time.
|
|
||||||
// Examine the HTTP status code and message for more information about the failure.
|
|
||||||
ServiceCodeCannotVerifyCopySource ServiceCodeType = "CannotVerifyCopySource"
|
|
||||||
|
|
||||||
// ServiceCodeContainerAlreadyExists means the specified container already exists.
|
|
||||||
ServiceCodeContainerAlreadyExists ServiceCodeType = "ContainerAlreadyExists"
|
|
||||||
|
|
||||||
// ServiceCodeContainerBeingDeleted means the specified container is being deleted.
|
|
||||||
ServiceCodeContainerBeingDeleted ServiceCodeType = "ContainerBeingDeleted"
|
|
||||||
|
|
||||||
// ServiceCodeContainerDisabled means the specified container has been disabled by the administrator.
|
|
||||||
ServiceCodeContainerDisabled ServiceCodeType = "ContainerDisabled"
|
|
||||||
|
|
||||||
// ServiceCodeContainerNotFound means the specified container does not exist.
|
|
||||||
ServiceCodeContainerNotFound ServiceCodeType = "ContainerNotFound"
|
|
||||||
|
|
||||||
// ServiceCodeContentLengthLargerThanTierLimit means the blob's content length cannot exceed its tier limit.
|
|
||||||
ServiceCodeContentLengthLargerThanTierLimit ServiceCodeType = "ContentLengthLargerThanTierLimit"
|
|
||||||
|
|
||||||
// ServiceCodeCopyAcrossAccountsNotSupported means the copy source account and destination account must be the same.
|
|
||||||
ServiceCodeCopyAcrossAccountsNotSupported ServiceCodeType = "CopyAcrossAccountsNotSupported"
|
|
||||||
|
|
||||||
// ServiceCodeCopyIDMismatch means the specified copy ID did not match the copy ID for the pending copy operation.
|
|
||||||
ServiceCodeCopyIDMismatch ServiceCodeType = "CopyIdMismatch"
|
|
||||||
|
|
||||||
// ServiceCodeFeatureVersionMismatch means the type of blob in the container is unrecognized by this version or
|
|
||||||
// that the operation for AppendBlob requires at least version 2015-02-21.
|
|
||||||
ServiceCodeFeatureVersionMismatch ServiceCodeType = "FeatureVersionMismatch"
|
|
||||||
|
|
||||||
// ServiceCodeIncrementalCopyBlobMismatch means the specified source blob is different than the copy source of the existing incremental copy blob.
|
|
||||||
ServiceCodeIncrementalCopyBlobMismatch ServiceCodeType = "IncrementalCopyBlobMismatch"
|
|
||||||
|
|
||||||
// ServiceCodeFeatureEncryptionMismatch means the given customer specified encryption does not match the encryption used to encrypt the blob.
|
|
||||||
ServiceCodeFeatureEncryptionMismatch ServiceCodeType = "BlobCustomerSpecifiedEncryptionMismatch"
|
|
||||||
|
|
||||||
// ServiceCodeIncrementalCopyOfEarlierVersionSnapshotNotAllowed means the specified snapshot is earlier than the last snapshot copied into the incremental copy blob.
|
|
||||||
ServiceCodeIncrementalCopyOfEarlierVersionSnapshotNotAllowed ServiceCodeType = "IncrementalCopyOfEarlierVersionSnapshotNotAllowed"
|
|
||||||
|
|
||||||
// ServiceCodeIncrementalCopySourceMustBeSnapshot means the source for incremental copy request must be a snapshot.
|
|
||||||
ServiceCodeIncrementalCopySourceMustBeSnapshot ServiceCodeType = "IncrementalCopySourceMustBeSnapshot"
|
|
||||||
|
|
||||||
// ServiceCodeInfiniteLeaseDurationRequired means the lease ID matched, but the specified lease must be an infinite-duration lease.
|
|
||||||
ServiceCodeInfiniteLeaseDurationRequired ServiceCodeType = "InfiniteLeaseDurationRequired"
|
|
||||||
|
|
||||||
// ServiceCodeInvalidBlobOrBlock means the specified blob or block content is invalid.
|
|
||||||
ServiceCodeInvalidBlobOrBlock ServiceCodeType = "InvalidBlobOrBlock"
|
|
||||||
|
|
||||||
// ServiceCodeInvalidBlobType means the blob type is invalid for this operation.
|
|
||||||
ServiceCodeInvalidBlobType ServiceCodeType = "InvalidBlobType"
|
|
||||||
|
|
||||||
// ServiceCodeInvalidBlockID means the specified block ID is invalid. The block ID must be Base64-encoded.
|
|
||||||
ServiceCodeInvalidBlockID ServiceCodeType = "InvalidBlockId"
|
|
||||||
|
|
||||||
// ServiceCodeInvalidBlockList means the specified block list is invalid.
|
|
||||||
ServiceCodeInvalidBlockList ServiceCodeType = "InvalidBlockList"
|
|
||||||
|
|
||||||
// ServiceCodeInvalidOperation means an invalid operation against a blob snapshot.
|
|
||||||
ServiceCodeInvalidOperation ServiceCodeType = "InvalidOperation"
|
|
||||||
|
|
||||||
// ServiceCodeInvalidPageRange means the page range specified is invalid.
|
|
||||||
ServiceCodeInvalidPageRange ServiceCodeType = "InvalidPageRange"
|
|
||||||
|
|
||||||
// ServiceCodeInvalidSourceBlobType means the copy source blob type is invalid for this operation.
|
|
||||||
ServiceCodeInvalidSourceBlobType ServiceCodeType = "InvalidSourceBlobType"
|
|
||||||
|
|
||||||
// ServiceCodeInvalidSourceBlobURL means the source URL for incremental copy request must be valid Azure Storage blob URL.
|
|
||||||
ServiceCodeInvalidSourceBlobURL ServiceCodeType = "InvalidSourceBlobUrl"
|
|
||||||
|
|
||||||
// ServiceCodeInvalidVersionForPageBlobOperation means that all operations on page blobs require at least version 2009-09-19.
|
|
||||||
ServiceCodeInvalidVersionForPageBlobOperation ServiceCodeType = "InvalidVersionForPageBlobOperation"
|
|
||||||
|
|
||||||
// ServiceCodeLeaseAlreadyPresent means there is already a lease present.
|
|
||||||
ServiceCodeLeaseAlreadyPresent ServiceCodeType = "LeaseAlreadyPresent"
|
|
||||||
|
|
||||||
// ServiceCodeLeaseAlreadyBroken means the lease has already been broken and cannot be broken again.
|
|
||||||
ServiceCodeLeaseAlreadyBroken ServiceCodeType = "LeaseAlreadyBroken"
|
|
||||||
|
|
||||||
// ServiceCodeLeaseIDMismatchWithBlobOperation means the lease ID specified did not match the lease ID for the blob.
|
|
||||||
ServiceCodeLeaseIDMismatchWithBlobOperation ServiceCodeType = "LeaseIdMismatchWithBlobOperation"
|
|
||||||
|
|
||||||
// ServiceCodeLeaseIDMismatchWithContainerOperation means the lease ID specified did not match the lease ID for the container.
|
|
||||||
ServiceCodeLeaseIDMismatchWithContainerOperation ServiceCodeType = "LeaseIdMismatchWithContainerOperation"
|
|
||||||
|
|
||||||
// ServiceCodeLeaseIDMismatchWithLeaseOperation means the lease ID specified did not match the lease ID for the blob/container.
|
|
||||||
ServiceCodeLeaseIDMismatchWithLeaseOperation ServiceCodeType = "LeaseIdMismatchWithLeaseOperation"
|
|
||||||
|
|
||||||
// ServiceCodeLeaseIDMissing means there is currently a lease on the blob/container and no lease ID was specified in the request.
|
|
||||||
ServiceCodeLeaseIDMissing ServiceCodeType = "LeaseIdMissing"
|
|
||||||
|
|
||||||
// ServiceCodeLeaseIsBreakingAndCannotBeAcquired means the lease ID matched, but the lease is currently in breaking state and cannot be acquired until it is broken.
|
|
||||||
ServiceCodeLeaseIsBreakingAndCannotBeAcquired ServiceCodeType = "LeaseIsBreakingAndCannotBeAcquired"
|
|
||||||
|
|
||||||
// ServiceCodeLeaseIsBreakingAndCannotBeChanged means the lease ID matched, but the lease is currently in breaking state and cannot be changed.
|
|
||||||
ServiceCodeLeaseIsBreakingAndCannotBeChanged ServiceCodeType = "LeaseIsBreakingAndCannotBeChanged"
|
|
||||||
|
|
||||||
// ServiceCodeLeaseIsBrokenAndCannotBeRenewed means the lease ID matched, but the lease has been broken explicitly and cannot be renewed.
|
|
||||||
ServiceCodeLeaseIsBrokenAndCannotBeRenewed ServiceCodeType = "LeaseIsBrokenAndCannotBeRenewed"
|
|
||||||
|
|
||||||
// ServiceCodeLeaseLost means a lease ID was specified, but the lease for the blob/container has expired.
|
|
||||||
ServiceCodeLeaseLost ServiceCodeType = "LeaseLost"
|
|
||||||
|
|
||||||
// ServiceCodeLeaseNotPresentWithBlobOperation means there is currently no lease on the blob.
|
|
||||||
ServiceCodeLeaseNotPresentWithBlobOperation ServiceCodeType = "LeaseNotPresentWithBlobOperation"
|
|
||||||
|
|
||||||
// ServiceCodeLeaseNotPresentWithContainerOperation means there is currently no lease on the container.
|
|
||||||
ServiceCodeLeaseNotPresentWithContainerOperation ServiceCodeType = "LeaseNotPresentWithContainerOperation"
|
|
||||||
|
|
||||||
// ServiceCodeLeaseNotPresentWithLeaseOperation means there is currently no lease on the blob/container.
|
|
||||||
ServiceCodeLeaseNotPresentWithLeaseOperation ServiceCodeType = "LeaseNotPresentWithLeaseOperation"
|
|
||||||
|
|
||||||
// ServiceCodeMaxBlobSizeConditionNotMet means the max blob size condition specified was not met.
|
|
||||||
ServiceCodeMaxBlobSizeConditionNotMet ServiceCodeType = "MaxBlobSizeConditionNotMet"
|
|
||||||
|
|
||||||
// ServiceCodeNoPendingCopyOperation means there is currently no pending copy operation.
|
|
||||||
ServiceCodeNoPendingCopyOperation ServiceCodeType = "NoPendingCopyOperation"
|
|
||||||
|
|
||||||
// ServiceCodeOperationNotAllowedOnIncrementalCopyBlob means the specified operation is not allowed on an incremental copy blob.
|
|
||||||
ServiceCodeOperationNotAllowedOnIncrementalCopyBlob ServiceCodeType = "OperationNotAllowedOnIncrementalCopyBlob"
|
|
||||||
|
|
||||||
// ServiceCodePendingCopyOperation means there is currently a pending copy operation.
|
|
||||||
ServiceCodePendingCopyOperation ServiceCodeType = "PendingCopyOperation"
|
|
||||||
|
|
||||||
// ServiceCodePreviousSnapshotCannotBeNewer means the prevsnapshot query parameter value cannot be newer than snapshot query parameter value.
|
|
||||||
ServiceCodePreviousSnapshotCannotBeNewer ServiceCodeType = "PreviousSnapshotCannotBeNewer"
|
|
||||||
|
|
||||||
// ServiceCodePreviousSnapshotNotFound means the previous snapshot is not found.
|
|
||||||
ServiceCodePreviousSnapshotNotFound ServiceCodeType = "PreviousSnapshotNotFound"
|
|
||||||
|
|
||||||
// ServiceCodePreviousSnapshotOperationNotSupported means that differential Get Page Ranges is not supported on the previous snapshot.
|
|
||||||
ServiceCodePreviousSnapshotOperationNotSupported ServiceCodeType = "PreviousSnapshotOperationNotSupported"
|
|
||||||
|
|
||||||
// ServiceCodeSequenceNumberConditionNotMet means the sequence number condition specified was not met.
|
|
||||||
ServiceCodeSequenceNumberConditionNotMet ServiceCodeType = "SequenceNumberConditionNotMet"
|
|
||||||
|
|
||||||
// ServiceCodeSequenceNumberIncrementTooLarge means the sequence number increment cannot be performed because it would result in overflow of the sequence number.
|
|
||||||
ServiceCodeSequenceNumberIncrementTooLarge ServiceCodeType = "SequenceNumberIncrementTooLarge"
|
|
||||||
|
|
||||||
// ServiceCodeSnapshotCountExceeded means the snapshot count against this blob has been exceeded.
|
|
||||||
ServiceCodeSnapshotCountExceeded ServiceCodeType = "SnapshotCountExceeded"
|
|
||||||
|
|
||||||
// ServiceCodeSnaphotOperationRateExceeded means the rate of snapshot operations against this blob has been exceeded.
|
|
||||||
ServiceCodeSnaphotOperationRateExceeded ServiceCodeType = "SnaphotOperationRateExceeded"
|
|
||||||
|
|
||||||
// ServiceCodeSnapshotsPresent means this operation is not permitted while the blob has snapshots.
|
|
||||||
ServiceCodeSnapshotsPresent ServiceCodeType = "SnapshotsPresent"
|
|
||||||
|
|
||||||
// ServiceCodeSourceConditionNotMet means the source condition specified using HTTP conditional header(s) is not met.
|
|
||||||
ServiceCodeSourceConditionNotMet ServiceCodeType = "SourceConditionNotMet"
|
|
||||||
|
|
||||||
// ServiceCodeSystemInUse means this blob is in use by the system.
|
|
||||||
ServiceCodeSystemInUse ServiceCodeType = "SystemInUse"
|
|
||||||
|
|
||||||
// ServiceCodeTargetConditionNotMet means the target condition specified using HTTP conditional header(s) is not met.
|
|
||||||
ServiceCodeTargetConditionNotMet ServiceCodeType = "TargetConditionNotMet"
|
|
||||||
|
|
||||||
// ServiceCodeUnauthorizedBlobOverwrite means this request is not authorized to perform blob overwrites.
|
|
||||||
ServiceCodeUnauthorizedBlobOverwrite ServiceCodeType = "UnauthorizedBlobOverwrite"
|
|
||||||
|
|
||||||
// ServiceCodeBlobBeingRehydrated means this operation is not permitted because the blob is being rehydrated.
|
|
||||||
ServiceCodeBlobBeingRehydrated ServiceCodeType = "BlobBeingRehydrated"
|
|
||||||
|
|
||||||
// ServiceCodeBlobArchived means this operation is not permitted on an archived blob.
|
|
||||||
ServiceCodeBlobArchived ServiceCodeType = "BlobArchived"
|
|
||||||
|
|
||||||
// ServiceCodeBlobNotArchived means this blob is currently not in the archived state.
|
|
||||||
ServiceCodeBlobNotArchived ServiceCodeType = "BlobNotArchived"
|
|
||||||
)
|
|
||||||
8
vendor/github.com/Azure/azure-storage-blob-go/azblob/storage_account_credential.go
generated
vendored
8
vendor/github.com/Azure/azure-storage-blob-go/azblob/storage_account_credential.go
generated
vendored
|
|
@ -1,8 +0,0 @@
|
||||||
package azblob
|
|
||||||
|
|
||||||
// StorageAccountCredential is a wrapper interface for SharedKeyCredential and UserDelegationCredential
|
|
||||||
type StorageAccountCredential interface {
|
|
||||||
AccountName() string
|
|
||||||
ComputeHMACSHA256(message string) (base64String string)
|
|
||||||
getUDKParams() *UserDelegationKey
|
|
||||||
}
|
|
||||||
158
vendor/github.com/Azure/azure-storage-blob-go/azblob/url_append_blob.go
generated
vendored
158
vendor/github.com/Azure/azure-storage-blob-go/azblob/url_append_blob.go
generated
vendored
|
|
@ -1,158 +0,0 @@
|
||||||
package azblob
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"io"
|
|
||||||
"net/url"
|
|
||||||
|
|
||||||
"github.com/Azure/azure-pipeline-go/pipeline"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
// AppendBlobMaxAppendBlockBytes indicates the maximum number of bytes that can be sent in a call to AppendBlock.
|
|
||||||
AppendBlobMaxAppendBlockBytes = 4 * 1024 * 1024 // 4MB
|
|
||||||
|
|
||||||
// AppendBlobMaxBlocks indicates the maximum number of blocks allowed in an append blob.
|
|
||||||
AppendBlobMaxBlocks = 50000
|
|
||||||
)
|
|
||||||
|
|
||||||
// AppendBlobURL defines a set of operations applicable to append blobs.
|
|
||||||
type AppendBlobURL struct {
|
|
||||||
BlobURL
|
|
||||||
abClient appendBlobClient
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewAppendBlobURL creates an AppendBlobURL object using the specified URL and request policy pipeline.
|
|
||||||
func NewAppendBlobURL(url url.URL, p pipeline.Pipeline) AppendBlobURL {
|
|
||||||
blobClient := newBlobClient(url, p)
|
|
||||||
abClient := newAppendBlobClient(url, p)
|
|
||||||
return AppendBlobURL{BlobURL: BlobURL{blobClient: blobClient}, abClient: abClient}
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithPipeline creates a new AppendBlobURL object identical to the source but with the specific request policy pipeline.
|
|
||||||
func (ab AppendBlobURL) WithPipeline(p pipeline.Pipeline) AppendBlobURL {
|
|
||||||
return NewAppendBlobURL(ab.blobClient.URL(), p)
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithSnapshot creates a new AppendBlobURL object identical to the source but with the specified snapshot timestamp.
|
|
||||||
// Pass "" to remove the snapshot returning a URL to the base blob.
|
|
||||||
func (ab AppendBlobURL) WithSnapshot(snapshot string) AppendBlobURL {
|
|
||||||
p := NewBlobURLParts(ab.URL())
|
|
||||||
p.Snapshot = snapshot
|
|
||||||
return NewAppendBlobURL(p.URL(), ab.blobClient.Pipeline())
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithVersionID creates a new AppendBlobURL object identical to the source but with the specified version id.
|
|
||||||
// Pass "" to remove the snapshot returning a URL to the base blob.
|
|
||||||
func (ab AppendBlobURL) WithVersionID(versionId string) AppendBlobURL {
|
|
||||||
p := NewBlobURLParts(ab.URL())
|
|
||||||
p.VersionID = versionId
|
|
||||||
return NewAppendBlobURL(p.URL(), ab.blobClient.Pipeline())
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ab AppendBlobURL) GetAccountInfo(ctx context.Context) (*BlobGetAccountInfoResponse, error) {
|
|
||||||
return ab.blobClient.GetAccountInfo(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create creates a 0-length append blob. Call AppendBlock to append data to an append blob.
|
|
||||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-blob.
|
|
||||||
func (ab AppendBlobURL) Create(ctx context.Context, h BlobHTTPHeaders, metadata Metadata, ac BlobAccessConditions, blobTagsMap BlobTagsMap, cpk ClientProvidedKeyOptions) (*AppendBlobCreateResponse, error) {
|
|
||||||
ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch := ac.ModifiedAccessConditions.pointers()
|
|
||||||
blobTagsString := SerializeBlobTagsHeader(blobTagsMap)
|
|
||||||
return ab.abClient.Create(ctx, 0, nil,
|
|
||||||
&h.ContentType, &h.ContentEncoding, &h.ContentLanguage, h.ContentMD5,
|
|
||||||
&h.CacheControl, metadata, ac.LeaseAccessConditions.pointers(), &h.ContentDisposition,
|
|
||||||
cpk.EncryptionKey, cpk.EncryptionKeySha256, cpk.EncryptionAlgorithm, // CPK-V
|
|
||||||
cpk.EncryptionScope, // CPK-N
|
|
||||||
ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch,
|
|
||||||
nil, // Blob ifTags
|
|
||||||
nil,
|
|
||||||
blobTagsString, // Blob tags
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
// AppendBlock writes a stream to a new block of data to the end of the existing append blob.
|
|
||||||
// This method panics if the stream is not at position 0.
|
|
||||||
// Note that the http client closes the body stream after the request is sent to the service.
|
|
||||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/append-block.
|
|
||||||
func (ab AppendBlobURL) AppendBlock(ctx context.Context, body io.ReadSeeker, ac AppendBlobAccessConditions, transactionalMD5 []byte, cpk ClientProvidedKeyOptions) (*AppendBlobAppendBlockResponse, error) {
|
|
||||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
|
|
||||||
ifAppendPositionEqual, ifMaxSizeLessThanOrEqual := ac.AppendPositionAccessConditions.pointers()
|
|
||||||
count, err := validateSeekableStreamAt0AndGetCount(body)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return ab.abClient.AppendBlock(ctx, body, count, nil,
|
|
||||||
transactionalMD5,
|
|
||||||
nil, // CRC
|
|
||||||
ac.LeaseAccessConditions.pointers(),
|
|
||||||
ifMaxSizeLessThanOrEqual, ifAppendPositionEqual,
|
|
||||||
cpk.EncryptionKey, cpk.EncryptionKeySha256, cpk.EncryptionAlgorithm, // CPK
|
|
||||||
cpk.EncryptionScope, // CPK-N
|
|
||||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag,
|
|
||||||
nil, // Blob ifTags
|
|
||||||
nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
// AppendBlockFromURL copies a new block of data from source URL to the end of the existing append blob.
|
|
||||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/append-block-from-url.
|
|
||||||
func (ab AppendBlobURL) AppendBlockFromURL(ctx context.Context, sourceURL url.URL, offset int64, count int64, destinationAccessConditions AppendBlobAccessConditions, sourceAccessConditions ModifiedAccessConditions, transactionalMD5 []byte, cpk ClientProvidedKeyOptions) (*AppendBlobAppendBlockFromURLResponse, error) {
|
|
||||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := destinationAccessConditions.ModifiedAccessConditions.pointers()
|
|
||||||
sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatchETag, sourceIfNoneMatchETag := sourceAccessConditions.pointers()
|
|
||||||
ifAppendPositionEqual, ifMaxSizeLessThanOrEqual := destinationAccessConditions.AppendPositionAccessConditions.pointers()
|
|
||||||
return ab.abClient.AppendBlockFromURL(ctx, sourceURL.String(), 0, httpRange{offset: offset, count: count}.pointers(),
|
|
||||||
transactionalMD5, nil, nil, nil,
|
|
||||||
cpk.EncryptionKey, cpk.EncryptionKeySha256, cpk.EncryptionAlgorithm, // CPK
|
|
||||||
cpk.EncryptionScope, // CPK-N
|
|
||||||
destinationAccessConditions.LeaseAccessConditions.pointers(),
|
|
||||||
ifMaxSizeLessThanOrEqual, ifAppendPositionEqual,
|
|
||||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag,
|
|
||||||
nil, // Blob ifTags
|
|
||||||
sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatchETag, sourceIfNoneMatchETag, nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
type AppendBlobAccessConditions struct {
|
|
||||||
ModifiedAccessConditions
|
|
||||||
LeaseAccessConditions
|
|
||||||
AppendPositionAccessConditions
|
|
||||||
}
|
|
||||||
|
|
||||||
// AppendPositionAccessConditions identifies append blob-specific access conditions which you optionally set.
|
|
||||||
type AppendPositionAccessConditions struct {
|
|
||||||
// IfAppendPositionEqual ensures that the AppendBlock operation succeeds
|
|
||||||
// only if the append position is equal to a value.
|
|
||||||
// IfAppendPositionEqual=0 means no 'IfAppendPositionEqual' header specified.
|
|
||||||
// IfAppendPositionEqual>0 means 'IfAppendPositionEqual' header specified with its value
|
|
||||||
// IfAppendPositionEqual==-1 means IfAppendPositionEqual' header specified with a value of 0
|
|
||||||
IfAppendPositionEqual int64
|
|
||||||
|
|
||||||
// IfMaxSizeLessThanOrEqual ensures that the AppendBlock operation succeeds
|
|
||||||
// only if the append blob's size is less than or equal to a value.
|
|
||||||
// IfMaxSizeLessThanOrEqual=0 means no 'IfMaxSizeLessThanOrEqual' header specified.
|
|
||||||
// IfMaxSizeLessThanOrEqual>0 means 'IfMaxSizeLessThanOrEqual' header specified with its value
|
|
||||||
// IfMaxSizeLessThanOrEqual==-1 means 'IfMaxSizeLessThanOrEqual' header specified with a value of 0
|
|
||||||
IfMaxSizeLessThanOrEqual int64
|
|
||||||
}
|
|
||||||
|
|
||||||
// pointers is for internal infrastructure. It returns the fields as pointers.
|
|
||||||
func (ac AppendPositionAccessConditions) pointers() (iape *int64, imsltoe *int64) {
|
|
||||||
var zero int64 // defaults to 0
|
|
||||||
switch ac.IfAppendPositionEqual {
|
|
||||||
case -1:
|
|
||||||
iape = &zero
|
|
||||||
case 0:
|
|
||||||
iape = nil
|
|
||||||
default:
|
|
||||||
iape = &ac.IfAppendPositionEqual
|
|
||||||
}
|
|
||||||
|
|
||||||
switch ac.IfMaxSizeLessThanOrEqual {
|
|
||||||
case -1:
|
|
||||||
imsltoe = &zero
|
|
||||||
case 0:
|
|
||||||
imsltoe = nil
|
|
||||||
default:
|
|
||||||
imsltoe = &ac.IfMaxSizeLessThanOrEqual
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
320
vendor/github.com/Azure/azure-storage-blob-go/azblob/url_blob.go
generated
vendored
320
vendor/github.com/Azure/azure-storage-blob-go/azblob/url_blob.go
generated
vendored
|
|
@ -1,320 +0,0 @@
|
||||||
package azblob
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"github.com/Azure/azure-pipeline-go/pipeline"
|
|
||||||
"net/url"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// A BlobURL represents a URL to an Azure Storage blob; the blob may be a block blob, append blob, or page blob.
|
|
||||||
type BlobURL struct {
|
|
||||||
blobClient blobClient
|
|
||||||
}
|
|
||||||
|
|
||||||
type BlobTagsMap map[string]string
|
|
||||||
|
|
||||||
var DefaultAccessTier AccessTierType = AccessTierNone
|
|
||||||
var DefaultPremiumBlobAccessTier PremiumPageBlobAccessTierType = PremiumPageBlobAccessTierNone
|
|
||||||
|
|
||||||
// NewBlobURL creates a BlobURL object using the specified URL and request policy pipeline.
|
|
||||||
func NewBlobURL(url url.URL, p pipeline.Pipeline) BlobURL {
|
|
||||||
blobClient := newBlobClient(url, p)
|
|
||||||
return BlobURL{blobClient: blobClient}
|
|
||||||
}
|
|
||||||
|
|
||||||
// URL returns the URL endpoint used by the BlobURL object.
|
|
||||||
func (b BlobURL) URL() url.URL {
|
|
||||||
return b.blobClient.URL()
|
|
||||||
}
|
|
||||||
|
|
||||||
// String returns the URL as a string.
|
|
||||||
func (b BlobURL) String() string {
|
|
||||||
u := b.URL()
|
|
||||||
return u.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b BlobURL) GetAccountInfo(ctx context.Context) (*BlobGetAccountInfoResponse, error) {
|
|
||||||
return b.blobClient.GetAccountInfo(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithPipeline creates a new BlobURL object identical to the source but with the specified request policy pipeline.
|
|
||||||
func (b BlobURL) WithPipeline(p pipeline.Pipeline) BlobURL {
|
|
||||||
return NewBlobURL(b.blobClient.URL(), p)
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithSnapshot creates a new BlobURL object identical to the source but with the specified snapshot timestamp.
|
|
||||||
// Pass "" to remove the snapshot returning a URL to the base blob.
|
|
||||||
func (b BlobURL) WithSnapshot(snapshot string) BlobURL {
|
|
||||||
p := NewBlobURLParts(b.URL())
|
|
||||||
p.Snapshot = snapshot
|
|
||||||
return NewBlobURL(p.URL(), b.blobClient.Pipeline())
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithVersionID creates a new BlobURL object identical to the source but with the specified version id.
|
|
||||||
// Pass "" to remove the snapshot returning a URL to the base blob.
|
|
||||||
func (b BlobURL) WithVersionID(versionID string) BlobURL {
|
|
||||||
p := NewBlobURLParts(b.URL())
|
|
||||||
p.VersionID = versionID
|
|
||||||
return NewBlobURL(p.URL(), b.blobClient.Pipeline())
|
|
||||||
}
|
|
||||||
|
|
||||||
// ToAppendBlobURL creates an AppendBlobURL using the source's URL and pipeline.
|
|
||||||
func (b BlobURL) ToAppendBlobURL() AppendBlobURL {
|
|
||||||
return NewAppendBlobURL(b.URL(), b.blobClient.Pipeline())
|
|
||||||
}
|
|
||||||
|
|
||||||
// ToBlockBlobURL creates a BlockBlobURL using the source's URL and pipeline.
|
|
||||||
func (b BlobURL) ToBlockBlobURL() BlockBlobURL {
|
|
||||||
return NewBlockBlobURL(b.URL(), b.blobClient.Pipeline())
|
|
||||||
}
|
|
||||||
|
|
||||||
// ToPageBlobURL creates a PageBlobURL using the source's URL and pipeline.
|
|
||||||
func (b BlobURL) ToPageBlobURL() PageBlobURL {
|
|
||||||
return NewPageBlobURL(b.URL(), b.blobClient.Pipeline())
|
|
||||||
}
|
|
||||||
|
|
||||||
func SerializeBlobTagsHeader(blobTagsMap BlobTagsMap) *string {
|
|
||||||
if blobTagsMap == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
tags := make([]string, 0)
|
|
||||||
for key, val := range blobTagsMap {
|
|
||||||
tags = append(tags, url.QueryEscape(key)+"="+url.QueryEscape(val))
|
|
||||||
}
|
|
||||||
//tags = tags[:len(tags)-1]
|
|
||||||
blobTagsString := strings.Join(tags, "&")
|
|
||||||
return &blobTagsString
|
|
||||||
}
|
|
||||||
|
|
||||||
func SerializeBlobTags(blobTagsMap BlobTagsMap) BlobTags {
|
|
||||||
if blobTagsMap == nil {
|
|
||||||
return BlobTags{}
|
|
||||||
}
|
|
||||||
blobTagSet := make([]BlobTag, 0, len(blobTagsMap))
|
|
||||||
for key, val := range blobTagsMap {
|
|
||||||
blobTagSet = append(blobTagSet, BlobTag{Key: key, Value: val})
|
|
||||||
}
|
|
||||||
return BlobTags{BlobTagSet: blobTagSet}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Download reads a range of bytes from a blob. The response also includes the blob's properties and metadata.
|
|
||||||
// Passing azblob.CountToEnd (0) for count will download the blob from the offset to the end.
|
|
||||||
// Note: Snapshot/VersionId are optional parameters which are part of request URL query params.
|
|
||||||
// These parameters can be explicitly set by calling WithSnapshot(snapshot string)/WithVersionID(versionID string)
|
|
||||||
// Therefore it not required to pass these here.
|
|
||||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-blob.
|
|
||||||
func (b BlobURL) Download(ctx context.Context, offset int64, count int64, ac BlobAccessConditions, rangeGetContentMD5 bool, cpk ClientProvidedKeyOptions) (*DownloadResponse, error) {
|
|
||||||
var xRangeGetContentMD5 *bool
|
|
||||||
if rangeGetContentMD5 {
|
|
||||||
xRangeGetContentMD5 = &rangeGetContentMD5
|
|
||||||
}
|
|
||||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
|
|
||||||
dr, err := b.blobClient.Download(ctx, nil, nil, nil,
|
|
||||||
httpRange{offset: offset, count: count}.pointers(),
|
|
||||||
ac.LeaseAccessConditions.pointers(), xRangeGetContentMD5, nil,
|
|
||||||
cpk.EncryptionKey, cpk.EncryptionKeySha256, cpk.EncryptionAlgorithm, // CPK
|
|
||||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag,
|
|
||||||
nil, // Blob ifTags
|
|
||||||
nil)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return &DownloadResponse{
|
|
||||||
b: b,
|
|
||||||
r: dr,
|
|
||||||
ctx: ctx,
|
|
||||||
getInfo: HTTPGetterInfo{Offset: offset, Count: count, ETag: dr.ETag()},
|
|
||||||
}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Delete marks the specified blob or snapshot for deletion. The blob is later deleted during garbage collection.
|
|
||||||
// Note 1: that deleting a blob also deletes all its snapshots.
|
|
||||||
// Note 2: Snapshot/VersionId are optional parameters which are part of request URL query params.
|
|
||||||
// These parameters can be explicitly set by calling WithSnapshot(snapshot string)/WithVersionID(versionID string)
|
|
||||||
// Therefore it not required to pass these here.
|
|
||||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/delete-blob.
|
|
||||||
func (b BlobURL) Delete(ctx context.Context, deleteOptions DeleteSnapshotsOptionType, ac BlobAccessConditions) (*BlobDeleteResponse, error) {
|
|
||||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
|
|
||||||
return b.blobClient.Delete(ctx, nil, nil, nil, ac.LeaseAccessConditions.pointers(), deleteOptions,
|
|
||||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag,
|
|
||||||
nil, // Blob ifTags
|
|
||||||
nil, BlobDeleteNone)
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetTags operation enables users to set tags on a blob or specific blob version, but not snapshot.
|
|
||||||
// Each call to this operation replaces all existing tags attached to the blob.
|
|
||||||
// To remove all tags from the blob, call this operation with no tags set.
|
|
||||||
// https://docs.microsoft.com/en-us/rest/api/storageservices/set-blob-tags
|
|
||||||
func (b BlobURL) SetTags(ctx context.Context, transactionalContentMD5 []byte, transactionalContentCrc64 []byte, ifTags *string, blobTagsMap BlobTagsMap) (*BlobSetTagsResponse, error) {
|
|
||||||
tags := SerializeBlobTags(blobTagsMap)
|
|
||||||
return b.blobClient.SetTags(ctx, nil, nil, transactionalContentMD5, transactionalContentCrc64, nil, ifTags, nil, &tags)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetTags operation enables users to get tags on a blob or specific blob version, or snapshot.
|
|
||||||
// https://docs.microsoft.com/en-us/rest/api/storageservices/get-blob-tags
|
|
||||||
func (b BlobURL) GetTags(ctx context.Context, ifTags *string) (*BlobTags, error) {
|
|
||||||
return b.blobClient.GetTags(ctx, nil, nil, nil, nil, ifTags, nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Undelete restores the contents and metadata of a soft-deleted blob and any associated soft-deleted snapshots.
|
|
||||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/undelete-blob.
|
|
||||||
func (b BlobURL) Undelete(ctx context.Context) (*BlobUndeleteResponse, error) {
|
|
||||||
return b.blobClient.Undelete(ctx, nil, nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetTier operation sets the tier on a blob. The operation is allowed on a page blob in a premium storage account
|
|
||||||
// and on a block blob in a blob storage account (locally redundant storage only).
|
|
||||||
// A premium page blob's tier determines the allowed size, IOPS, and bandwidth of the blob.
|
|
||||||
// A block blob's tier determines Hot/Cool/Archive storage type. This operation does not update the blob's ETag.
|
|
||||||
// Note: VersionId is an optional parameter which is part of request URL query params.
|
|
||||||
// It can be explicitly set by calling WithVersionID(versionID string) function and hence it not required to pass it here.
|
|
||||||
// For detailed information about block blob level tiering see https://docs.microsoft.com/en-us/azure/storage/blobs/storage-blob-storage-tiers.
|
|
||||||
func (b BlobURL) SetTier(ctx context.Context, tier AccessTierType, lac LeaseAccessConditions) (*BlobSetTierResponse, error) {
|
|
||||||
return b.blobClient.SetTier(ctx, tier, nil,
|
|
||||||
nil, // Blob versioning
|
|
||||||
nil, RehydratePriorityNone, nil, lac.pointers(),
|
|
||||||
nil) // Blob ifTags
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetProperties returns the blob's properties.
|
|
||||||
// Note: Snapshot/VersionId are optional parameters which are part of request URL query params.
|
|
||||||
// These parameters can be explicitly set by calling WithSnapshot(snapshot string)/WithVersionID(versionID string)
|
|
||||||
// Therefore it not required to pass these here.
|
|
||||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-blob-properties.
|
|
||||||
func (b BlobURL) GetProperties(ctx context.Context, ac BlobAccessConditions, cpk ClientProvidedKeyOptions) (*BlobGetPropertiesResponse, error) {
|
|
||||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
|
|
||||||
return b.blobClient.GetProperties(ctx, nil,
|
|
||||||
nil, // Blob versioning
|
|
||||||
nil, ac.LeaseAccessConditions.pointers(),
|
|
||||||
cpk.EncryptionKey, cpk.EncryptionKeySha256, cpk.EncryptionAlgorithm, // CPK
|
|
||||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag,
|
|
||||||
nil, // Blob ifTags
|
|
||||||
nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetHTTPHeaders changes a blob's HTTP headers.
|
|
||||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-blob-properties.
|
|
||||||
func (b BlobURL) SetHTTPHeaders(ctx context.Context, h BlobHTTPHeaders, ac BlobAccessConditions) (*BlobSetHTTPHeadersResponse, error) {
|
|
||||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
|
|
||||||
return b.blobClient.SetHTTPHeaders(ctx, nil,
|
|
||||||
&h.CacheControl, &h.ContentType, h.ContentMD5, &h.ContentEncoding, &h.ContentLanguage,
|
|
||||||
ac.LeaseAccessConditions.pointers(), ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag,
|
|
||||||
nil, // Blob ifTags
|
|
||||||
&h.ContentDisposition, nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetMetadata changes a blob's metadata.
|
|
||||||
// https://docs.microsoft.com/rest/api/storageservices/set-blob-metadata.
|
|
||||||
func (b BlobURL) SetMetadata(ctx context.Context, metadata Metadata, ac BlobAccessConditions, cpk ClientProvidedKeyOptions) (*BlobSetMetadataResponse, error) {
|
|
||||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
|
|
||||||
return b.blobClient.SetMetadata(ctx, nil, metadata, ac.LeaseAccessConditions.pointers(),
|
|
||||||
cpk.EncryptionKey, cpk.EncryptionKeySha256, cpk.EncryptionAlgorithm, // CPK-V
|
|
||||||
cpk.EncryptionScope, // CPK-N
|
|
||||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag,
|
|
||||||
nil, // Blob ifTags
|
|
||||||
nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
// CreateSnapshot creates a read-only snapshot of a blob.
|
|
||||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/snapshot-blob.
|
|
||||||
func (b BlobURL) CreateSnapshot(ctx context.Context, metadata Metadata, ac BlobAccessConditions, cpk ClientProvidedKeyOptions) (*BlobCreateSnapshotResponse, error) {
|
|
||||||
// CreateSnapshot does NOT panic if the user tries to create a snapshot using a URL that already has a snapshot query parameter
|
|
||||||
// because checking this would be a performance hit for a VERY unusual path and I don't think the common case should suffer this
|
|
||||||
// performance hit.
|
|
||||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
|
|
||||||
return b.blobClient.CreateSnapshot(ctx, nil, metadata,
|
|
||||||
cpk.EncryptionKey, cpk.EncryptionKeySha256, cpk.EncryptionAlgorithm, // CPK-V
|
|
||||||
cpk.EncryptionScope, // CPK-N
|
|
||||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag,
|
|
||||||
nil, // Blob ifTags
|
|
||||||
ac.LeaseAccessConditions.pointers(), nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
// AcquireLease acquires a lease on the blob for write and delete operations. The lease duration must be between
|
|
||||||
// 15 to 60 seconds, or infinite (-1).
|
|
||||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob.
|
|
||||||
func (b BlobURL) AcquireLease(ctx context.Context, proposedID string, duration int32, ac ModifiedAccessConditions) (*BlobAcquireLeaseResponse, error) {
|
|
||||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.pointers()
|
|
||||||
return b.blobClient.AcquireLease(ctx, nil, &duration, &proposedID,
|
|
||||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag,
|
|
||||||
nil, // Blob ifTags
|
|
||||||
nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
// RenewLease renews the blob's previously-acquired lease.
|
|
||||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob.
|
|
||||||
func (b BlobURL) RenewLease(ctx context.Context, leaseID string, ac ModifiedAccessConditions) (*BlobRenewLeaseResponse, error) {
|
|
||||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.pointers()
|
|
||||||
return b.blobClient.RenewLease(ctx, leaseID, nil,
|
|
||||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag,
|
|
||||||
nil, // Blob ifTags
|
|
||||||
nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ReleaseLease releases the blob's previously-acquired lease.
|
|
||||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob.
|
|
||||||
func (b BlobURL) ReleaseLease(ctx context.Context, leaseID string, ac ModifiedAccessConditions) (*BlobReleaseLeaseResponse, error) {
|
|
||||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.pointers()
|
|
||||||
return b.blobClient.ReleaseLease(ctx, leaseID, nil,
|
|
||||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag,
|
|
||||||
nil, // Blob ifTags
|
|
||||||
nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
// BreakLease breaks the blob's previously-acquired lease (if it exists). Pass the LeaseBreakDefault (-1)
|
|
||||||
// constant to break a fixed-duration lease when it expires or an infinite lease immediately.
|
|
||||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob.
|
|
||||||
func (b BlobURL) BreakLease(ctx context.Context, breakPeriodInSeconds int32, ac ModifiedAccessConditions) (*BlobBreakLeaseResponse, error) {
|
|
||||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.pointers()
|
|
||||||
return b.blobClient.BreakLease(ctx, nil, leasePeriodPointer(breakPeriodInSeconds),
|
|
||||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag,
|
|
||||||
nil, // Blob ifTags
|
|
||||||
nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ChangeLease changes the blob's lease ID.
|
|
||||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob.
|
|
||||||
func (b BlobURL) ChangeLease(ctx context.Context, leaseID string, proposedID string, ac ModifiedAccessConditions) (*BlobChangeLeaseResponse, error) {
|
|
||||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.pointers()
|
|
||||||
return b.blobClient.ChangeLease(ctx, leaseID, proposedID,
|
|
||||||
nil, ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag,
|
|
||||||
nil, // Blob ifTags
|
|
||||||
nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
// LeaseBreakNaturally tells ContainerURL's or BlobURL's BreakLease method to break the lease using service semantics.
|
|
||||||
const LeaseBreakNaturally = -1
|
|
||||||
|
|
||||||
func leasePeriodPointer(period int32) (p *int32) {
|
|
||||||
if period != LeaseBreakNaturally {
|
|
||||||
p = &period
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// StartCopyFromURL copies the data at the source URL to a blob.
|
|
||||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/copy-blob.
|
|
||||||
func (b BlobURL) StartCopyFromURL(ctx context.Context, source url.URL, metadata Metadata, srcac ModifiedAccessConditions, dstac BlobAccessConditions, tier AccessTierType, blobTagsMap BlobTagsMap) (*BlobStartCopyFromURLResponse, error) {
|
|
||||||
srcIfModifiedSince, srcIfUnmodifiedSince, srcIfMatchETag, srcIfNoneMatchETag := srcac.pointers()
|
|
||||||
dstIfModifiedSince, dstIfUnmodifiedSince, dstIfMatchETag, dstIfNoneMatchETag := dstac.ModifiedAccessConditions.pointers()
|
|
||||||
dstLeaseID := dstac.LeaseAccessConditions.pointers()
|
|
||||||
blobTagsString := SerializeBlobTagsHeader(blobTagsMap)
|
|
||||||
return b.blobClient.StartCopyFromURL(ctx, source.String(), nil, metadata,
|
|
||||||
tier, RehydratePriorityNone, srcIfModifiedSince, srcIfUnmodifiedSince,
|
|
||||||
srcIfMatchETag, srcIfNoneMatchETag,
|
|
||||||
nil, // source ifTags
|
|
||||||
dstIfModifiedSince, dstIfUnmodifiedSince,
|
|
||||||
dstIfMatchETag, dstIfNoneMatchETag,
|
|
||||||
nil, // Blob ifTags
|
|
||||||
dstLeaseID,
|
|
||||||
nil,
|
|
||||||
blobTagsString, // Blob tags
|
|
||||||
nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
// AbortCopyFromURL stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
|
|
||||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/abort-copy-blob.
|
|
||||||
func (b BlobURL) AbortCopyFromURL(ctx context.Context, copyID string, ac LeaseAccessConditions) (*BlobAbortCopyFromURLResponse, error) {
|
|
||||||
return b.blobClient.AbortCopyFromURL(ctx, copyID, nil, ac.pointers(), nil)
|
|
||||||
}
|
|
||||||
175
vendor/github.com/Azure/azure-storage-blob-go/azblob/url_block_blob.go
generated
vendored
175
vendor/github.com/Azure/azure-storage-blob-go/azblob/url_block_blob.go
generated
vendored
|
|
@ -1,175 +0,0 @@
|
||||||
package azblob
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"io"
|
|
||||||
"net/url"
|
|
||||||
|
|
||||||
"github.com/Azure/azure-pipeline-go/pipeline"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
// BlockBlobMaxUploadBlobBytes indicates the maximum number of bytes that can be sent in a call to Upload.
|
|
||||||
BlockBlobMaxUploadBlobBytes = 256 * 1024 * 1024 // 256MB
|
|
||||||
|
|
||||||
// BlockBlobMaxStageBlockBytes indicates the maximum number of bytes that can be sent in a call to StageBlock.
|
|
||||||
BlockBlobMaxStageBlockBytes = 4000 * 1024 * 1024 // 4000MiB
|
|
||||||
|
|
||||||
// BlockBlobMaxBlocks indicates the maximum number of blocks allowed in a block blob.
|
|
||||||
BlockBlobMaxBlocks = 50000
|
|
||||||
)
|
|
||||||
|
|
||||||
// BlockBlobURL defines a set of operations applicable to block blobs.
|
|
||||||
type BlockBlobURL struct {
|
|
||||||
BlobURL
|
|
||||||
bbClient blockBlobClient
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewBlockBlobURL creates a BlockBlobURL object using the specified URL and request policy pipeline.
|
|
||||||
func NewBlockBlobURL(url url.URL, p pipeline.Pipeline) BlockBlobURL {
|
|
||||||
blobClient := newBlobClient(url, p)
|
|
||||||
bbClient := newBlockBlobClient(url, p)
|
|
||||||
return BlockBlobURL{BlobURL: BlobURL{blobClient: blobClient}, bbClient: bbClient}
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithPipeline creates a new BlockBlobURL object identical to the source but with the specific request policy pipeline.
|
|
||||||
func (bb BlockBlobURL) WithPipeline(p pipeline.Pipeline) BlockBlobURL {
|
|
||||||
return NewBlockBlobURL(bb.blobClient.URL(), p)
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithSnapshot creates a new BlockBlobURL object identical to the source but with the specified snapshot timestamp.
|
|
||||||
// Pass "" to remove the snapshot returning a URL to the base blob.
|
|
||||||
func (bb BlockBlobURL) WithSnapshot(snapshot string) BlockBlobURL {
|
|
||||||
p := NewBlobURLParts(bb.URL())
|
|
||||||
p.Snapshot = snapshot
|
|
||||||
return NewBlockBlobURL(p.URL(), bb.blobClient.Pipeline())
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithVersionID creates a new BlockBlobURRL object identical to the source but with the specified version id.
|
|
||||||
// Pass "" to remove the snapshot returning a URL to the base blob.
|
|
||||||
func (bb BlockBlobURL) WithVersionID(versionId string) BlockBlobURL {
|
|
||||||
p := NewBlobURLParts(bb.URL())
|
|
||||||
p.VersionID = versionId
|
|
||||||
return NewBlockBlobURL(p.URL(), bb.blobClient.Pipeline())
|
|
||||||
}
|
|
||||||
|
|
||||||
func (bb BlockBlobURL) GetAccountInfo(ctx context.Context) (*BlobGetAccountInfoResponse, error) {
|
|
||||||
return bb.blobClient.GetAccountInfo(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Upload creates a new block blob or overwrites an existing block blob.
|
|
||||||
// Updating an existing block blob overwrites any existing metadata on the blob. Partial updates are not
|
|
||||||
// supported with Upload; the content of the existing blob is overwritten with the new content. To
|
|
||||||
// perform a partial update of a block blob, use StageBlock and CommitBlockList.
|
|
||||||
// This method panics if the stream is not at position 0.
|
|
||||||
// Note that the http client closes the body stream after the request is sent to the service.
|
|
||||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-blob.
|
|
||||||
func (bb BlockBlobURL) Upload(ctx context.Context, body io.ReadSeeker, h BlobHTTPHeaders, metadata Metadata, ac BlobAccessConditions, tier AccessTierType, blobTagsMap BlobTagsMap, cpk ClientProvidedKeyOptions) (*BlockBlobUploadResponse, error) {
|
|
||||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
|
|
||||||
count, err := validateSeekableStreamAt0AndGetCount(body)
|
|
||||||
blobTagsString := SerializeBlobTagsHeader(blobTagsMap)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return bb.bbClient.Upload(ctx, body, count, nil, nil,
|
|
||||||
&h.ContentType, &h.ContentEncoding, &h.ContentLanguage, h.ContentMD5,
|
|
||||||
&h.CacheControl, metadata, ac.LeaseAccessConditions.pointers(), &h.ContentDisposition,
|
|
||||||
cpk.EncryptionKey, cpk.EncryptionKeySha256, cpk.EncryptionAlgorithm, // CPK-V
|
|
||||||
cpk.EncryptionScope, // CPK-N
|
|
||||||
tier, ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag,
|
|
||||||
nil, // Blob ifTags
|
|
||||||
nil,
|
|
||||||
blobTagsString, // Blob tags
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
// StageBlock uploads the specified block to the block blob's "staging area" to be later committed by a call to CommitBlockList.
|
|
||||||
// Note that the http client closes the body stream after the request is sent to the service.
|
|
||||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-block.
|
|
||||||
func (bb BlockBlobURL) StageBlock(ctx context.Context, base64BlockID string, body io.ReadSeeker, ac LeaseAccessConditions, transactionalMD5 []byte, cpk ClientProvidedKeyOptions) (*BlockBlobStageBlockResponse, error) {
|
|
||||||
count, err := validateSeekableStreamAt0AndGetCount(body)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return bb.bbClient.StageBlock(ctx, base64BlockID, count, body, transactionalMD5, nil, nil, ac.pointers(),
|
|
||||||
cpk.EncryptionKey, cpk.EncryptionKeySha256, cpk.EncryptionAlgorithm, // CPK-V
|
|
||||||
cpk.EncryptionScope, // CPK-N
|
|
||||||
nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
// StageBlockFromURL copies the specified block from a source URL to the block blob's "staging area" to be later committed by a call to CommitBlockList.
|
|
||||||
// If count is CountToEnd (0), then data is read from specified offset to the end.
|
|
||||||
// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/put-block-from-url.
|
|
||||||
func (bb BlockBlobURL) StageBlockFromURL(ctx context.Context, base64BlockID string, sourceURL url.URL, offset int64, count int64, destinationAccessConditions LeaseAccessConditions, sourceAccessConditions ModifiedAccessConditions, cpk ClientProvidedKeyOptions) (*BlockBlobStageBlockFromURLResponse, error) {
|
|
||||||
sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatchETag, sourceIfNoneMatchETag := sourceAccessConditions.pointers()
|
|
||||||
return bb.bbClient.StageBlockFromURL(ctx, base64BlockID, 0, sourceURL.String(), httpRange{offset: offset, count: count}.pointers(), nil, nil, nil,
|
|
||||||
cpk.EncryptionKey, cpk.EncryptionKeySha256, cpk.EncryptionAlgorithm, // CPK
|
|
||||||
cpk.EncryptionScope, // CPK-N
|
|
||||||
destinationAccessConditions.pointers(), sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatchETag, sourceIfNoneMatchETag, nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
// CommitBlockList writes a blob by specifying the list of block IDs that make up the blob.
|
|
||||||
// In order to be written as part of a blob, a block must have been successfully written
|
|
||||||
// to the server in a prior PutBlock operation. You can call PutBlockList to update a blob
|
|
||||||
// by uploading only those blocks that have changed, then committing the new and existing
|
|
||||||
// blocks together. Any blocks not specified in the block list and permanently deleted.
|
|
||||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-block-list.
|
|
||||||
func (bb BlockBlobURL) CommitBlockList(ctx context.Context, base64BlockIDs []string, h BlobHTTPHeaders, metadata Metadata, ac BlobAccessConditions, tier AccessTierType, blobTagsMap BlobTagsMap, cpk ClientProvidedKeyOptions) (*BlockBlobCommitBlockListResponse, error) {
|
|
||||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
|
|
||||||
blobTagsString := SerializeBlobTagsHeader(blobTagsMap)
|
|
||||||
return bb.bbClient.CommitBlockList(ctx, BlockLookupList{Latest: base64BlockIDs}, nil,
|
|
||||||
&h.CacheControl, &h.ContentType, &h.ContentEncoding, &h.ContentLanguage, h.ContentMD5, nil, nil,
|
|
||||||
metadata, ac.LeaseAccessConditions.pointers(), &h.ContentDisposition,
|
|
||||||
cpk.EncryptionKey, cpk.EncryptionKeySha256, cpk.EncryptionAlgorithm, // CPK
|
|
||||||
cpk.EncryptionScope, // CPK-N
|
|
||||||
tier,
|
|
||||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag,
|
|
||||||
nil, // Blob ifTags
|
|
||||||
nil,
|
|
||||||
blobTagsString, // Blob tags
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetBlockList returns the list of blocks that have been uploaded as part of a block blob using the specified block list filter.
|
|
||||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-block-list.
|
|
||||||
func (bb BlockBlobURL) GetBlockList(ctx context.Context, listType BlockListType, ac LeaseAccessConditions) (*BlockList, error) {
|
|
||||||
return bb.bbClient.GetBlockList(ctx, listType, nil, nil, ac.pointers(),
|
|
||||||
nil, // Blob ifTags
|
|
||||||
nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
// CopyFromURL synchronously copies the data at the source URL to a block blob, with sizes up to 256 MB.
|
|
||||||
// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/copy-blob-from-url.
|
|
||||||
func (bb BlockBlobURL) CopyFromURL(ctx context.Context, source url.URL, metadata Metadata, srcac ModifiedAccessConditions, dstac BlobAccessConditions, srcContentMD5 []byte, tier AccessTierType, blobTagsMap BlobTagsMap) (*BlobCopyFromURLResponse, error) {
|
|
||||||
|
|
||||||
srcIfModifiedSince, srcIfUnmodifiedSince, srcIfMatchETag, srcIfNoneMatchETag := srcac.pointers()
|
|
||||||
dstIfModifiedSince, dstIfUnmodifiedSince, dstIfMatchETag, dstIfNoneMatchETag := dstac.ModifiedAccessConditions.pointers()
|
|
||||||
dstLeaseID := dstac.LeaseAccessConditions.pointers()
|
|
||||||
blobTagsString := SerializeBlobTagsHeader(blobTagsMap)
|
|
||||||
return bb.blobClient.CopyFromURL(ctx, source.String(), nil, metadata, tier,
|
|
||||||
srcIfModifiedSince, srcIfUnmodifiedSince,
|
|
||||||
srcIfMatchETag, srcIfNoneMatchETag,
|
|
||||||
dstIfModifiedSince, dstIfUnmodifiedSince,
|
|
||||||
dstIfMatchETag, dstIfNoneMatchETag,
|
|
||||||
nil, // Blob ifTags
|
|
||||||
dstLeaseID, nil, srcContentMD5,
|
|
||||||
blobTagsString, // Blob tags
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
// PutBlobFromURL synchronously creates a new Block Blob with data from the source URL up to a max length of 256MB.
|
|
||||||
// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/put-blob-from-url.
|
|
||||||
func (bb BlockBlobURL) PutBlobFromURL(ctx context.Context, h BlobHTTPHeaders, source url.URL, metadata Metadata, srcac ModifiedAccessConditions, dstac BlobAccessConditions, srcContentMD5 []byte, dstContentMD5 []byte, tier AccessTierType, blobTagsMap BlobTagsMap, cpk ClientProvidedKeyOptions) (*BlockBlobPutBlobFromURLResponse, error) {
|
|
||||||
|
|
||||||
srcIfModifiedSince, srcIfUnmodifiedSince, srcIfMatchETag, srcIfNoneMatchETag := srcac.pointers()
|
|
||||||
dstIfModifiedSince, dstIfUnmodifiedSince, dstIfMatchETag, dstIfNoneMatchETag := dstac.ModifiedAccessConditions.pointers()
|
|
||||||
dstLeaseID := dstac.LeaseAccessConditions.pointers()
|
|
||||||
blobTagsString := SerializeBlobTagsHeader(blobTagsMap)
|
|
||||||
|
|
||||||
return bb.bbClient.PutBlobFromURL(ctx, 0, source.String(), nil, nil,
|
|
||||||
&h.ContentType, &h.ContentEncoding, &h.ContentLanguage, dstContentMD5, &h.CacheControl,
|
|
||||||
metadata, dstLeaseID, &h.ContentDisposition, cpk.EncryptionKey, cpk.EncryptionKeySha256,
|
|
||||||
cpk.EncryptionAlgorithm, cpk.EncryptionScope, tier, dstIfModifiedSince, dstIfUnmodifiedSince,
|
|
||||||
dstIfMatchETag, dstIfNoneMatchETag, nil, srcIfModifiedSince, srcIfUnmodifiedSince,
|
|
||||||
srcIfMatchETag, srcIfNoneMatchETag, nil, nil, srcContentMD5, blobTagsString, nil)
|
|
||||||
}
|
|
||||||
307
vendor/github.com/Azure/azure-storage-blob-go/azblob/url_container.go
generated
vendored
307
vendor/github.com/Azure/azure-storage-blob-go/azblob/url_container.go
generated
vendored
|
|
@ -1,307 +0,0 @@
|
||||||
package azblob
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"context"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"net/url"
|
|
||||||
|
|
||||||
"github.com/Azure/azure-pipeline-go/pipeline"
|
|
||||||
)
|
|
||||||
|
|
||||||
// A ContainerURL represents a URL to the Azure Storage container allowing you to manipulate its blobs.
|
|
||||||
type ContainerURL struct {
|
|
||||||
client containerClient
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewContainerURL creates a ContainerURL object using the specified URL and request policy pipeline.
|
|
||||||
func NewContainerURL(url url.URL, p pipeline.Pipeline) ContainerURL {
|
|
||||||
client := newContainerClient(url, p)
|
|
||||||
return ContainerURL{client: client}
|
|
||||||
}
|
|
||||||
|
|
||||||
// URL returns the URL endpoint used by the ContainerURL object.
|
|
||||||
func (c ContainerURL) URL() url.URL {
|
|
||||||
return c.client.URL()
|
|
||||||
}
|
|
||||||
|
|
||||||
// String returns the URL as a string.
|
|
||||||
func (c ContainerURL) String() string {
|
|
||||||
u := c.URL()
|
|
||||||
return u.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c ContainerURL) GetAccountInfo(ctx context.Context) (*ContainerGetAccountInfoResponse, error) {
|
|
||||||
return c.client.GetAccountInfo(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithPipeline creates a new ContainerURL object identical to the source but with the specified request policy pipeline.
|
|
||||||
func (c ContainerURL) WithPipeline(p pipeline.Pipeline) ContainerURL {
|
|
||||||
return NewContainerURL(c.URL(), p)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewBlobURL creates a new BlobURL object by concatenating blobName to the end of
|
|
||||||
// ContainerURL's URL. The new BlobURL uses the same request policy pipeline as the ContainerURL.
|
|
||||||
// To change the pipeline, create the BlobURL and then call its WithPipeline method passing in the
|
|
||||||
// desired pipeline object. Or, call this package's NewBlobURL instead of calling this object's
|
|
||||||
// NewBlobURL method.
|
|
||||||
func (c ContainerURL) NewBlobURL(blobName string) BlobURL {
|
|
||||||
blobURL := appendToURLPath(c.URL(), blobName)
|
|
||||||
return NewBlobURL(blobURL, c.client.Pipeline())
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewAppendBlobURL creates a new AppendBlobURL object by concatenating blobName to the end of
|
|
||||||
// ContainerURL's URL. The new AppendBlobURL uses the same request policy pipeline as the ContainerURL.
|
|
||||||
// To change the pipeline, create the AppendBlobURL and then call its WithPipeline method passing in the
|
|
||||||
// desired pipeline object. Or, call this package's NewAppendBlobURL instead of calling this object's
|
|
||||||
// NewAppendBlobURL method.
|
|
||||||
func (c ContainerURL) NewAppendBlobURL(blobName string) AppendBlobURL {
|
|
||||||
blobURL := appendToURLPath(c.URL(), blobName)
|
|
||||||
return NewAppendBlobURL(blobURL, c.client.Pipeline())
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewBlockBlobURL creates a new BlockBlobURL object by concatenating blobName to the end of
|
|
||||||
// ContainerURL's URL. The new BlockBlobURL uses the same request policy pipeline as the ContainerURL.
|
|
||||||
// To change the pipeline, create the BlockBlobURL and then call its WithPipeline method passing in the
|
|
||||||
// desired pipeline object. Or, call this package's NewBlockBlobURL instead of calling this object's
|
|
||||||
// NewBlockBlobURL method.
|
|
||||||
func (c ContainerURL) NewBlockBlobURL(blobName string) BlockBlobURL {
|
|
||||||
blobURL := appendToURLPath(c.URL(), blobName)
|
|
||||||
return NewBlockBlobURL(blobURL, c.client.Pipeline())
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewPageBlobURL creates a new PageBlobURL object by concatenating blobName to the end of
|
|
||||||
// ContainerURL's URL. The new PageBlobURL uses the same request policy pipeline as the ContainerURL.
|
|
||||||
// To change the pipeline, create the PageBlobURL and then call its WithPipeline method passing in the
|
|
||||||
// desired pipeline object. Or, call this package's NewPageBlobURL instead of calling this object's
|
|
||||||
// NewPageBlobURL method.
|
|
||||||
func (c ContainerURL) NewPageBlobURL(blobName string) PageBlobURL {
|
|
||||||
blobURL := appendToURLPath(c.URL(), blobName)
|
|
||||||
return NewPageBlobURL(blobURL, c.client.Pipeline())
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create creates a new container within a storage account. If a container with the same name already exists, the operation fails.
|
|
||||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/create-container.
|
|
||||||
func (c ContainerURL) Create(ctx context.Context, metadata Metadata, publicAccessType PublicAccessType) (*ContainerCreateResponse, error) {
|
|
||||||
return c.client.Create(ctx, nil, metadata, publicAccessType, nil,
|
|
||||||
nil, nil, // container encryption
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Delete marks the specified container for deletion. The container and any blobs contained within it are later deleted during garbage collection.
|
|
||||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/delete-container.
|
|
||||||
func (c ContainerURL) Delete(ctx context.Context, ac ContainerAccessConditions) (*ContainerDeleteResponse, error) {
|
|
||||||
if ac.IfMatch != ETagNone || ac.IfNoneMatch != ETagNone {
|
|
||||||
return nil, errors.New("the IfMatch and IfNoneMatch access conditions must have their default values because they are ignored by the service")
|
|
||||||
}
|
|
||||||
|
|
||||||
ifModifiedSince, ifUnmodifiedSince, _, _ := ac.ModifiedAccessConditions.pointers()
|
|
||||||
return c.client.Delete(ctx, nil, ac.LeaseAccessConditions.pointers(),
|
|
||||||
ifModifiedSince, ifUnmodifiedSince, nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetProperties returns the container's properties.
|
|
||||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-container-metadata.
|
|
||||||
func (c ContainerURL) GetProperties(ctx context.Context, ac LeaseAccessConditions) (*ContainerGetPropertiesResponse, error) {
|
|
||||||
// NOTE: GetMetadata actually calls GetProperties internally because GetProperties returns the metadata AND the properties.
|
|
||||||
// This allows us to not expose a GetProperties method at all simplifying the API.
|
|
||||||
return c.client.GetProperties(ctx, nil, ac.pointers(), nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetMetadata sets the container's metadata.
|
|
||||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-container-metadata.
|
|
||||||
func (c ContainerURL) SetMetadata(ctx context.Context, metadata Metadata, ac ContainerAccessConditions) (*ContainerSetMetadataResponse, error) {
|
|
||||||
if !ac.IfUnmodifiedSince.IsZero() || ac.IfMatch != ETagNone || ac.IfNoneMatch != ETagNone {
|
|
||||||
return nil, errors.New("the IfUnmodifiedSince, IfMatch, and IfNoneMatch must have their default values because they are ignored by the blob service")
|
|
||||||
}
|
|
||||||
ifModifiedSince, _, _, _ := ac.ModifiedAccessConditions.pointers()
|
|
||||||
return c.client.SetMetadata(ctx, nil, ac.LeaseAccessConditions.pointers(), metadata, ifModifiedSince, nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetAccessPolicy returns the container's access policy. The access policy indicates whether container's blobs may be accessed publicly.
|
|
||||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-container-acl.
|
|
||||||
func (c ContainerURL) GetAccessPolicy(ctx context.Context, ac LeaseAccessConditions) (*SignedIdentifiers, error) {
|
|
||||||
return c.client.GetAccessPolicy(ctx, nil, ac.pointers(), nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
// The AccessPolicyPermission type simplifies creating the permissions string for a container's access policy.
|
|
||||||
// Initialize an instance of this type and then call its String method to set AccessPolicy's Permission field.
|
|
||||||
type AccessPolicyPermission struct {
|
|
||||||
Read, Add, Create, Write, Delete, List bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// String produces the access policy permission string for an Azure Storage container.
|
|
||||||
// Call this method to set AccessPolicy's Permission field.
|
|
||||||
func (p AccessPolicyPermission) String() string {
|
|
||||||
var b bytes.Buffer
|
|
||||||
if p.Read {
|
|
||||||
b.WriteRune('r')
|
|
||||||
}
|
|
||||||
if p.Add {
|
|
||||||
b.WriteRune('a')
|
|
||||||
}
|
|
||||||
if p.Create {
|
|
||||||
b.WriteRune('c')
|
|
||||||
}
|
|
||||||
if p.Write {
|
|
||||||
b.WriteRune('w')
|
|
||||||
}
|
|
||||||
if p.Delete {
|
|
||||||
b.WriteRune('d')
|
|
||||||
}
|
|
||||||
if p.List {
|
|
||||||
b.WriteRune('l')
|
|
||||||
}
|
|
||||||
return b.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parse initializes the AccessPolicyPermission's fields from a string.
|
|
||||||
func (p *AccessPolicyPermission) Parse(s string) error {
|
|
||||||
*p = AccessPolicyPermission{} // Clear the flags
|
|
||||||
for _, r := range s {
|
|
||||||
switch r {
|
|
||||||
case 'r':
|
|
||||||
p.Read = true
|
|
||||||
case 'a':
|
|
||||||
p.Add = true
|
|
||||||
case 'c':
|
|
||||||
p.Create = true
|
|
||||||
case 'w':
|
|
||||||
p.Write = true
|
|
||||||
case 'd':
|
|
||||||
p.Delete = true
|
|
||||||
case 'l':
|
|
||||||
p.List = true
|
|
||||||
default:
|
|
||||||
return fmt.Errorf("invalid permission: '%v'", r)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetAccessPolicy sets the container's permissions. The access policy indicates whether blobs in a container may be accessed publicly.
|
|
||||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-container-acl.
|
|
||||||
func (c ContainerURL) SetAccessPolicy(ctx context.Context, accessType PublicAccessType, si []SignedIdentifier,
|
|
||||||
ac ContainerAccessConditions) (*ContainerSetAccessPolicyResponse, error) {
|
|
||||||
if ac.IfMatch != ETagNone || ac.IfNoneMatch != ETagNone {
|
|
||||||
return nil, errors.New("the IfMatch and IfNoneMatch access conditions must have their default values because they are ignored by the service")
|
|
||||||
}
|
|
||||||
ifModifiedSince, ifUnmodifiedSince, _, _ := ac.ModifiedAccessConditions.pointers()
|
|
||||||
return c.client.SetAccessPolicy(ctx, si, nil, ac.LeaseAccessConditions.pointers(),
|
|
||||||
accessType, ifModifiedSince, ifUnmodifiedSince, nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
// AcquireLease acquires a lease on the container for delete operations. The lease duration must be between 15 to 60 seconds, or infinite (-1).
|
|
||||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-container.
|
|
||||||
func (c ContainerURL) AcquireLease(ctx context.Context, proposedID string, duration int32, ac ModifiedAccessConditions) (*ContainerAcquireLeaseResponse, error) {
|
|
||||||
ifModifiedSince, ifUnmodifiedSince, _, _ := ac.pointers()
|
|
||||||
return c.client.AcquireLease(ctx, nil, &duration, &proposedID,
|
|
||||||
ifModifiedSince, ifUnmodifiedSince, nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
// RenewLease renews the container's previously-acquired lease.
|
|
||||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-container.
|
|
||||||
func (c ContainerURL) RenewLease(ctx context.Context, leaseID string, ac ModifiedAccessConditions) (*ContainerRenewLeaseResponse, error) {
|
|
||||||
ifModifiedSince, ifUnmodifiedSince, _, _ := ac.pointers()
|
|
||||||
return c.client.RenewLease(ctx, leaseID, nil, ifModifiedSince, ifUnmodifiedSince, nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ReleaseLease releases the container's previously-acquired lease.
|
|
||||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-container.
|
|
||||||
func (c ContainerURL) ReleaseLease(ctx context.Context, leaseID string, ac ModifiedAccessConditions) (*ContainerReleaseLeaseResponse, error) {
|
|
||||||
ifModifiedSince, ifUnmodifiedSince, _, _ := ac.pointers()
|
|
||||||
return c.client.ReleaseLease(ctx, leaseID, nil, ifModifiedSince, ifUnmodifiedSince, nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
// BreakLease breaks the container's previously-acquired lease (if it exists).
|
|
||||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-container.
|
|
||||||
func (c ContainerURL) BreakLease(ctx context.Context, period int32, ac ModifiedAccessConditions) (*ContainerBreakLeaseResponse, error) {
|
|
||||||
ifModifiedSince, ifUnmodifiedSince, _, _ := ac.pointers()
|
|
||||||
return c.client.BreakLease(ctx, nil, leasePeriodPointer(period), ifModifiedSince, ifUnmodifiedSince, nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ChangeLease changes the container's lease ID.
|
|
||||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-container.
|
|
||||||
func (c ContainerURL) ChangeLease(ctx context.Context, leaseID string, proposedID string, ac ModifiedAccessConditions) (*ContainerChangeLeaseResponse, error) {
|
|
||||||
ifModifiedSince, ifUnmodifiedSince, _, _ := ac.pointers()
|
|
||||||
return c.client.ChangeLease(ctx, leaseID, proposedID, nil, ifModifiedSince, ifUnmodifiedSince, nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ListBlobsFlatSegment returns a single segment of blobs starting from the specified Marker. Use an empty
|
|
||||||
// Marker to start enumeration from the beginning. Blob names are returned in lexicographic order.
|
|
||||||
// After getting a segment, process it, and then call ListBlobsFlatSegment again (passing the the
|
|
||||||
// previously-returned Marker) to get the next segment.
|
|
||||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/list-blobs.
|
|
||||||
func (c ContainerURL) ListBlobsFlatSegment(ctx context.Context, marker Marker, o ListBlobsSegmentOptions) (*ListBlobsFlatSegmentResponse, error) {
|
|
||||||
prefix, include, maxResults := o.pointers()
|
|
||||||
return c.client.ListBlobFlatSegment(ctx, prefix, marker.Val, maxResults, include, nil, nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ListBlobsHierarchySegment returns a single segment of blobs starting from the specified Marker. Use an empty
|
|
||||||
// Marker to start enumeration from the beginning. Blob names are returned in lexicographic order.
|
|
||||||
// After getting a segment, process it, and then call ListBlobsHierarchicalSegment again (passing the the
|
|
||||||
// previously-returned Marker) to get the next segment.
|
|
||||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/list-blobs.
|
|
||||||
func (c ContainerURL) ListBlobsHierarchySegment(ctx context.Context, marker Marker, delimiter string, o ListBlobsSegmentOptions) (*ListBlobsHierarchySegmentResponse, error) {
|
|
||||||
if o.Details.Snapshots {
|
|
||||||
return nil, errors.New("snapshots are not supported in this listing operation")
|
|
||||||
}
|
|
||||||
prefix, include, maxResults := o.pointers()
|
|
||||||
return c.client.ListBlobHierarchySegment(ctx, delimiter, prefix, marker.Val, maxResults, include, nil, nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ListBlobsSegmentOptions defines options available when calling ListBlobs.
|
|
||||||
type ListBlobsSegmentOptions struct {
|
|
||||||
Details BlobListingDetails // No IncludeType header is produced if ""
|
|
||||||
Prefix string // No Prefix header is produced if ""
|
|
||||||
|
|
||||||
// SetMaxResults sets the maximum desired results you want the service to return. Note, the
|
|
||||||
// service may return fewer results than requested.
|
|
||||||
// MaxResults=0 means no 'MaxResults' header specified.
|
|
||||||
MaxResults int32
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o *ListBlobsSegmentOptions) pointers() (prefix *string, include []ListBlobsIncludeItemType, maxResults *int32) {
|
|
||||||
if o.Prefix != "" {
|
|
||||||
prefix = &o.Prefix
|
|
||||||
}
|
|
||||||
include = o.Details.slice()
|
|
||||||
if o.MaxResults != 0 {
|
|
||||||
maxResults = &o.MaxResults
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// BlobListingDetails indicates what additional information the service should return with each blob.
|
|
||||||
type BlobListingDetails struct {
|
|
||||||
Copy, Metadata, Snapshots, UncommittedBlobs, Deleted, Tags, Versions bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// string produces the Include query parameter's value.
|
|
||||||
func (d *BlobListingDetails) slice() []ListBlobsIncludeItemType {
|
|
||||||
items := []ListBlobsIncludeItemType{}
|
|
||||||
// NOTE: Multiple strings MUST be appended in alphabetic order or signing the string for authentication fails!
|
|
||||||
if d.Copy {
|
|
||||||
items = append(items, ListBlobsIncludeItemCopy)
|
|
||||||
}
|
|
||||||
if d.Deleted {
|
|
||||||
items = append(items, ListBlobsIncludeItemDeleted)
|
|
||||||
}
|
|
||||||
if d.Metadata {
|
|
||||||
items = append(items, ListBlobsIncludeItemMetadata)
|
|
||||||
}
|
|
||||||
if d.Snapshots {
|
|
||||||
items = append(items, ListBlobsIncludeItemSnapshots)
|
|
||||||
}
|
|
||||||
if d.UncommittedBlobs {
|
|
||||||
items = append(items, ListBlobsIncludeItemUncommittedblobs)
|
|
||||||
}
|
|
||||||
if d.Tags {
|
|
||||||
items = append(items, ListBlobsIncludeItemTags)
|
|
||||||
}
|
|
||||||
if d.Versions {
|
|
||||||
items = append(items, ListBlobsIncludeItemVersions)
|
|
||||||
}
|
|
||||||
return items
|
|
||||||
}
|
|
||||||
273
vendor/github.com/Azure/azure-storage-blob-go/azblob/url_page_blob.go
generated
vendored
273
vendor/github.com/Azure/azure-storage-blob-go/azblob/url_page_blob.go
generated
vendored
|
|
@ -1,273 +0,0 @@
|
||||||
package azblob
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"net/url"
|
|
||||||
"strconv"
|
|
||||||
|
|
||||||
"github.com/Azure/azure-pipeline-go/pipeline"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
// PageBlobPageBytes indicates the number of bytes in a page (512).
|
|
||||||
PageBlobPageBytes = 512
|
|
||||||
|
|
||||||
// PageBlobMaxUploadPagesBytes indicates the maximum number of bytes that can be sent in a call to PutPage.
|
|
||||||
PageBlobMaxUploadPagesBytes = 4 * 1024 * 1024 // 4MB
|
|
||||||
)
|
|
||||||
|
|
||||||
// PageBlobURL defines a set of operations applicable to page blobs.
|
|
||||||
type PageBlobURL struct {
|
|
||||||
BlobURL
|
|
||||||
pbClient pageBlobClient
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewPageBlobURL creates a PageBlobURL object using the specified URL and request policy pipeline.
|
|
||||||
func NewPageBlobURL(url url.URL, p pipeline.Pipeline) PageBlobURL {
|
|
||||||
blobClient := newBlobClient(url, p)
|
|
||||||
pbClient := newPageBlobClient(url, p)
|
|
||||||
return PageBlobURL{BlobURL: BlobURL{blobClient: blobClient}, pbClient: pbClient}
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithPipeline creates a new PageBlobURL object identical to the source but with the specific request policy pipeline.
|
|
||||||
func (pb PageBlobURL) WithPipeline(p pipeline.Pipeline) PageBlobURL {
|
|
||||||
return NewPageBlobURL(pb.blobClient.URL(), p)
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithSnapshot creates a new PageBlobURL object identical to the source but with the specified snapshot timestamp.
|
|
||||||
// Pass "" to remove the snapshot returning a URL to the base blob.
|
|
||||||
func (pb PageBlobURL) WithSnapshot(snapshot string) PageBlobURL {
|
|
||||||
p := NewBlobURLParts(pb.URL())
|
|
||||||
p.Snapshot = snapshot
|
|
||||||
return NewPageBlobURL(p.URL(), pb.blobClient.Pipeline())
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithVersionID creates a new PageBlobURL object identical to the source but with the specified snapshot timestamp.
|
|
||||||
// Pass "" to remove the snapshot returning a URL to the base blob.
|
|
||||||
func (pb PageBlobURL) WithVersionID(versionId string) PageBlobURL {
|
|
||||||
p := NewBlobURLParts(pb.URL())
|
|
||||||
p.VersionID = versionId
|
|
||||||
return NewPageBlobURL(p.URL(), pb.blobClient.Pipeline())
|
|
||||||
}
|
|
||||||
|
|
||||||
func (pb PageBlobURL) GetAccountInfo(ctx context.Context) (*BlobGetAccountInfoResponse, error) {
|
|
||||||
return pb.blobClient.GetAccountInfo(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create creates a page blob of the specified length. Call PutPage to upload data to a page blob.
|
|
||||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-blob.
|
|
||||||
func (pb PageBlobURL) Create(ctx context.Context, size int64, sequenceNumber int64, h BlobHTTPHeaders, metadata Metadata, ac BlobAccessConditions, tier PremiumPageBlobAccessTierType, blobTagsMap BlobTagsMap, cpk ClientProvidedKeyOptions) (*PageBlobCreateResponse, error) {
|
|
||||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
|
|
||||||
blobTagsString := SerializeBlobTagsHeader(blobTagsMap)
|
|
||||||
return pb.pbClient.Create(ctx, 0, size, nil, tier,
|
|
||||||
&h.ContentType, &h.ContentEncoding, &h.ContentLanguage, h.ContentMD5, &h.CacheControl,
|
|
||||||
metadata, ac.LeaseAccessConditions.pointers(), &h.ContentDisposition,
|
|
||||||
cpk.EncryptionKey, cpk.EncryptionKeySha256, cpk.EncryptionAlgorithm, // CPK-V
|
|
||||||
cpk.EncryptionScope, // CPK-N
|
|
||||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag,
|
|
||||||
nil, // Blob tags
|
|
||||||
&sequenceNumber, nil,
|
|
||||||
blobTagsString, // Blob tags
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
// UploadPages writes 1 or more pages to the page blob. The start offset and the stream size must be a multiple of 512 bytes.
|
|
||||||
// This method panics if the stream is not at position 0.
|
|
||||||
// Note that the http client closes the body stream after the request is sent to the service.
|
|
||||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-page.
|
|
||||||
func (pb PageBlobURL) UploadPages(ctx context.Context, offset int64, body io.ReadSeeker, ac PageBlobAccessConditions, transactionalMD5 []byte, cpk ClientProvidedKeyOptions) (*PageBlobUploadPagesResponse, error) {
|
|
||||||
count, err := validateSeekableStreamAt0AndGetCount(body)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
|
|
||||||
ifSequenceNumberLessThanOrEqual, ifSequenceNumberLessThan, ifSequenceNumberEqual := ac.SequenceNumberAccessConditions.pointers()
|
|
||||||
return pb.pbClient.UploadPages(ctx, body, count, transactionalMD5, nil, nil,
|
|
||||||
PageRange{Start: offset, End: offset + count - 1}.pointers(),
|
|
||||||
ac.LeaseAccessConditions.pointers(),
|
|
||||||
cpk.EncryptionKey, cpk.EncryptionKeySha256, cpk.EncryptionAlgorithm, // CPK
|
|
||||||
cpk.EncryptionScope, // CPK-N
|
|
||||||
ifSequenceNumberLessThanOrEqual, ifSequenceNumberLessThan, ifSequenceNumberEqual,
|
|
||||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag,
|
|
||||||
nil, // Blob ifTags
|
|
||||||
nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
// UploadPagesFromURL copies 1 or more pages from a source URL to the page blob.
|
|
||||||
// The sourceOffset specifies the start offset of source data to copy from.
|
|
||||||
// The destOffset specifies the start offset of data in page blob will be written to.
|
|
||||||
// The count must be a multiple of 512 bytes.
|
|
||||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-page-from-url.
|
|
||||||
func (pb PageBlobURL) UploadPagesFromURL(ctx context.Context, sourceURL url.URL, sourceOffset int64, destOffset int64, count int64, transactionalMD5 []byte, destinationAccessConditions PageBlobAccessConditions, sourceAccessConditions ModifiedAccessConditions, cpk ClientProvidedKeyOptions) (*PageBlobUploadPagesFromURLResponse, error) {
|
|
||||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := destinationAccessConditions.ModifiedAccessConditions.pointers()
|
|
||||||
sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatchETag, sourceIfNoneMatchETag := sourceAccessConditions.pointers()
|
|
||||||
ifSequenceNumberLessThanOrEqual, ifSequenceNumberLessThan, ifSequenceNumberEqual := destinationAccessConditions.SequenceNumberAccessConditions.pointers()
|
|
||||||
return pb.pbClient.UploadPagesFromURL(ctx, sourceURL.String(), *PageRange{Start: sourceOffset, End: sourceOffset + count - 1}.pointers(), 0,
|
|
||||||
*PageRange{Start: destOffset, End: destOffset + count - 1}.pointers(), transactionalMD5, nil, nil,
|
|
||||||
cpk.EncryptionKey, cpk.EncryptionKeySha256, cpk.EncryptionAlgorithm, // CPK-V
|
|
||||||
cpk.EncryptionScope, // CPK-N
|
|
||||||
destinationAccessConditions.LeaseAccessConditions.pointers(),
|
|
||||||
ifSequenceNumberLessThanOrEqual, ifSequenceNumberLessThan, ifSequenceNumberEqual,
|
|
||||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag,
|
|
||||||
nil, // Blob ifTags
|
|
||||||
sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatchETag, sourceIfNoneMatchETag, nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ClearPages frees the specified pages from the page blob.
|
|
||||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-page.
|
|
||||||
func (pb PageBlobURL) ClearPages(ctx context.Context, offset int64, count int64, ac PageBlobAccessConditions, cpk ClientProvidedKeyOptions) (*PageBlobClearPagesResponse, error) {
|
|
||||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
|
|
||||||
ifSequenceNumberLessThanOrEqual, ifSequenceNumberLessThan, ifSequenceNumberEqual := ac.SequenceNumberAccessConditions.pointers()
|
|
||||||
return pb.pbClient.ClearPages(ctx, 0, nil,
|
|
||||||
PageRange{Start: offset, End: offset + count - 1}.pointers(),
|
|
||||||
ac.LeaseAccessConditions.pointers(),
|
|
||||||
cpk.EncryptionKey, cpk.EncryptionKeySha256, cpk.EncryptionAlgorithm, // CPK
|
|
||||||
cpk.EncryptionScope, // CPK-N
|
|
||||||
ifSequenceNumberLessThanOrEqual, ifSequenceNumberLessThan,
|
|
||||||
ifSequenceNumberEqual, ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil, nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetPageRanges returns the list of valid page ranges for a page blob or snapshot of a page blob.
|
|
||||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-page-ranges.
|
|
||||||
func (pb PageBlobURL) GetPageRanges(ctx context.Context, offset int64, count int64, ac BlobAccessConditions) (*PageList, error) {
|
|
||||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
|
|
||||||
return pb.pbClient.GetPageRanges(ctx, nil, nil,
|
|
||||||
httpRange{offset: offset, count: count}.pointers(),
|
|
||||||
ac.LeaseAccessConditions.pointers(),
|
|
||||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag,
|
|
||||||
nil, // Blob ifTags
|
|
||||||
nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetManagedDiskPageRangesDiff gets the collection of page ranges that differ between a specified snapshot and this page blob representing managed disk.
|
|
||||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-page-ranges.
|
|
||||||
func (pb PageBlobURL) GetManagedDiskPageRangesDiff(ctx context.Context, offset int64, count int64, prevSnapshot *string, prevSnapshotURL *string, ac BlobAccessConditions) (*PageList, error) {
|
|
||||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
|
|
||||||
|
|
||||||
return pb.pbClient.GetPageRangesDiff(ctx, nil, nil, prevSnapshot,
|
|
||||||
prevSnapshotURL, // Get managed disk diff
|
|
||||||
httpRange{offset: offset, count: count}.pointers(),
|
|
||||||
ac.LeaseAccessConditions.pointers(),
|
|
||||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag,
|
|
||||||
nil, // Blob ifTags
|
|
||||||
nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetPageRangesDiff gets the collection of page ranges that differ between a specified snapshot and this page blob.
|
|
||||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-page-ranges.
|
|
||||||
func (pb PageBlobURL) GetPageRangesDiff(ctx context.Context, offset int64, count int64, prevSnapshot string, ac BlobAccessConditions) (*PageList, error) {
|
|
||||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
|
|
||||||
return pb.pbClient.GetPageRangesDiff(ctx, nil, nil, &prevSnapshot,
|
|
||||||
nil, // Get managed disk diff
|
|
||||||
httpRange{offset: offset, count: count}.pointers(),
|
|
||||||
ac.LeaseAccessConditions.pointers(),
|
|
||||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag,
|
|
||||||
nil, // Blob ifTags
|
|
||||||
nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Resize resizes the page blob to the specified size (which must be a multiple of 512).
|
|
||||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-blob-properties.
|
|
||||||
func (pb PageBlobURL) Resize(ctx context.Context, size int64, ac BlobAccessConditions, cpk ClientProvidedKeyOptions) (*PageBlobResizeResponse, error) {
|
|
||||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
|
|
||||||
return pb.pbClient.Resize(ctx, size, nil, ac.LeaseAccessConditions.pointers(),
|
|
||||||
cpk.EncryptionKey, cpk.EncryptionKeySha256, cpk.EncryptionAlgorithm, // CPK
|
|
||||||
cpk.EncryptionScope, // CPK-N
|
|
||||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil, nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
// UpdateSequenceNumber sets the page blob's sequence number.
|
|
||||||
func (pb PageBlobURL) UpdateSequenceNumber(ctx context.Context, action SequenceNumberActionType, sequenceNumber int64,
|
|
||||||
ac BlobAccessConditions) (*PageBlobUpdateSequenceNumberResponse, error) {
|
|
||||||
sn := &sequenceNumber
|
|
||||||
if action == SequenceNumberActionIncrement {
|
|
||||||
sn = nil
|
|
||||||
}
|
|
||||||
ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch := ac.ModifiedAccessConditions.pointers()
|
|
||||||
return pb.pbClient.UpdateSequenceNumber(ctx, action, nil,
|
|
||||||
ac.LeaseAccessConditions.pointers(), ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch,
|
|
||||||
nil, sn, nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
// StartCopyIncremental begins an operation to start an incremental copy from one page blob's snapshot to this page blob.
|
|
||||||
// The snapshot is copied such that only the differential changes between the previously copied snapshot are transferred to the destination.
|
|
||||||
// The copied snapshots are complete copies of the original snapshot and can be read or copied from as usual.
|
|
||||||
// For more information, see https://docs.microsoft.com/rest/api/storageservices/incremental-copy-blob and
|
|
||||||
// https://docs.microsoft.com/en-us/azure/virtual-machines/windows/incremental-snapshots.
|
|
||||||
func (pb PageBlobURL) StartCopyIncremental(ctx context.Context, source url.URL, snapshot string, ac BlobAccessConditions) (*PageBlobCopyIncrementalResponse, error) {
|
|
||||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
|
|
||||||
qp := source.Query()
|
|
||||||
qp.Set("snapshot", snapshot)
|
|
||||||
source.RawQuery = qp.Encode()
|
|
||||||
return pb.pbClient.CopyIncremental(ctx, source.String(), nil,
|
|
||||||
ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil, nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (pr PageRange) pointers() *string {
|
|
||||||
endOffset := strconv.FormatInt(int64(pr.End), 10)
|
|
||||||
asString := fmt.Sprintf("bytes=%v-%s", pr.Start, endOffset)
|
|
||||||
return &asString
|
|
||||||
}
|
|
||||||
|
|
||||||
type PageBlobAccessConditions struct {
|
|
||||||
ModifiedAccessConditions
|
|
||||||
LeaseAccessConditions
|
|
||||||
SequenceNumberAccessConditions
|
|
||||||
}
|
|
||||||
|
|
||||||
// SequenceNumberAccessConditions identifies page blob-specific access conditions which you optionally set.
|
|
||||||
type SequenceNumberAccessConditions struct {
|
|
||||||
// IfSequenceNumberLessThan ensures that the page blob operation succeeds
|
|
||||||
// only if the blob's sequence number is less than a value.
|
|
||||||
// IfSequenceNumberLessThan=0 means no 'IfSequenceNumberLessThan' header specified.
|
|
||||||
// IfSequenceNumberLessThan>0 means 'IfSequenceNumberLessThan' header specified with its value
|
|
||||||
// IfSequenceNumberLessThan==-1 means 'IfSequenceNumberLessThan' header specified with a value of 0
|
|
||||||
IfSequenceNumberLessThan int64
|
|
||||||
|
|
||||||
// IfSequenceNumberLessThanOrEqual ensures that the page blob operation succeeds
|
|
||||||
// only if the blob's sequence number is less than or equal to a value.
|
|
||||||
// IfSequenceNumberLessThanOrEqual=0 means no 'IfSequenceNumberLessThanOrEqual' header specified.
|
|
||||||
// IfSequenceNumberLessThanOrEqual>0 means 'IfSequenceNumberLessThanOrEqual' header specified with its value
|
|
||||||
// IfSequenceNumberLessThanOrEqual=-1 means 'IfSequenceNumberLessThanOrEqual' header specified with a value of 0
|
|
||||||
IfSequenceNumberLessThanOrEqual int64
|
|
||||||
|
|
||||||
// IfSequenceNumberEqual ensures that the page blob operation succeeds
|
|
||||||
// only if the blob's sequence number is equal to a value.
|
|
||||||
// IfSequenceNumberEqual=0 means no 'IfSequenceNumberEqual' header specified.
|
|
||||||
// IfSequenceNumberEqual>0 means 'IfSequenceNumberEqual' header specified with its value
|
|
||||||
// IfSequenceNumberEqual=-1 means 'IfSequenceNumberEqual' header specified with a value of 0
|
|
||||||
IfSequenceNumberEqual int64
|
|
||||||
}
|
|
||||||
|
|
||||||
// pointers is for internal infrastructure. It returns the fields as pointers.
|
|
||||||
func (ac SequenceNumberAccessConditions) pointers() (snltoe *int64, snlt *int64, sne *int64) {
|
|
||||||
var zero int64 // Defaults to 0
|
|
||||||
switch ac.IfSequenceNumberLessThan {
|
|
||||||
case -1:
|
|
||||||
snlt = &zero
|
|
||||||
case 0:
|
|
||||||
snlt = nil
|
|
||||||
default:
|
|
||||||
snlt = &ac.IfSequenceNumberLessThan
|
|
||||||
}
|
|
||||||
|
|
||||||
switch ac.IfSequenceNumberLessThanOrEqual {
|
|
||||||
case -1:
|
|
||||||
snltoe = &zero
|
|
||||||
case 0:
|
|
||||||
snltoe = nil
|
|
||||||
default:
|
|
||||||
snltoe = &ac.IfSequenceNumberLessThanOrEqual
|
|
||||||
}
|
|
||||||
switch ac.IfSequenceNumberEqual {
|
|
||||||
case -1:
|
|
||||||
sne = &zero
|
|
||||||
case 0:
|
|
||||||
sne = nil
|
|
||||||
default:
|
|
||||||
sne = &ac.IfSequenceNumberEqual
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
174
vendor/github.com/Azure/azure-storage-blob-go/azblob/url_service.go
generated
vendored
174
vendor/github.com/Azure/azure-storage-blob-go/azblob/url_service.go
generated
vendored
|
|
@ -1,174 +0,0 @@
|
||||||
package azblob
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"net/url"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/Azure/azure-pipeline-go/pipeline"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
// ContainerNameRoot is the special Azure Storage name used to identify a storage account's root container.
|
|
||||||
ContainerNameRoot = "$root"
|
|
||||||
|
|
||||||
// ContainerNameLogs is the special Azure Storage name used to identify a storage account's logs container.
|
|
||||||
ContainerNameLogs = "$logs"
|
|
||||||
)
|
|
||||||
|
|
||||||
// A ServiceURL represents a URL to the Azure Storage Blob service allowing you to manipulate blob containers.
|
|
||||||
type ServiceURL struct {
|
|
||||||
client serviceClient
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewServiceURL creates a ServiceURL object using the specified URL and request policy pipeline.
|
|
||||||
func NewServiceURL(primaryURL url.URL, p pipeline.Pipeline) ServiceURL {
|
|
||||||
client := newServiceClient(primaryURL, p)
|
|
||||||
return ServiceURL{client: client}
|
|
||||||
}
|
|
||||||
|
|
||||||
//GetUserDelegationCredential obtains a UserDelegationKey object using the base ServiceURL object.
|
|
||||||
//OAuth is required for this call, as well as any role that can delegate access to the storage account.
|
|
||||||
func (s ServiceURL) GetUserDelegationCredential(ctx context.Context, info KeyInfo, timeout *int32, requestID *string) (UserDelegationCredential, error) {
|
|
||||||
sc := newServiceClient(s.client.url, s.client.p)
|
|
||||||
udk, err := sc.GetUserDelegationKey(ctx, info, timeout, requestID)
|
|
||||||
if err != nil {
|
|
||||||
return UserDelegationCredential{}, err
|
|
||||||
}
|
|
||||||
return NewUserDelegationCredential(strings.Split(s.client.url.Host, ".")[0], *udk), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
//TODO this was supposed to be generated
|
|
||||||
//NewKeyInfo creates a new KeyInfo struct with the correct time formatting & conversion
|
|
||||||
func NewKeyInfo(Start, Expiry time.Time) KeyInfo {
|
|
||||||
return KeyInfo{
|
|
||||||
Start: Start.UTC().Format(SASTimeFormat),
|
|
||||||
Expiry: Expiry.UTC().Format(SASTimeFormat),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s ServiceURL) GetAccountInfo(ctx context.Context) (*ServiceGetAccountInfoResponse, error) {
|
|
||||||
return s.client.GetAccountInfo(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
// URL returns the URL endpoint used by the ServiceURL object.
|
|
||||||
func (s ServiceURL) URL() url.URL {
|
|
||||||
return s.client.URL()
|
|
||||||
}
|
|
||||||
|
|
||||||
// String returns the URL as a string.
|
|
||||||
func (s ServiceURL) String() string {
|
|
||||||
u := s.URL()
|
|
||||||
return u.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithPipeline creates a new ServiceURL object identical to the source but with the specified request policy pipeline.
|
|
||||||
func (s ServiceURL) WithPipeline(p pipeline.Pipeline) ServiceURL {
|
|
||||||
return NewServiceURL(s.URL(), p)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewContainerURL creates a new ContainerURL object by concatenating containerName to the end of
|
|
||||||
// ServiceURL's URL. The new ContainerURL uses the same request policy pipeline as the ServiceURL.
|
|
||||||
// To change the pipeline, create the ContainerURL and then call its WithPipeline method passing in the
|
|
||||||
// desired pipeline object. Or, call this package's NewContainerURL instead of calling this object's
|
|
||||||
// NewContainerURL method.
|
|
||||||
func (s ServiceURL) NewContainerURL(containerName string) ContainerURL {
|
|
||||||
containerURL := appendToURLPath(s.URL(), containerName)
|
|
||||||
return NewContainerURL(containerURL, s.client.Pipeline())
|
|
||||||
}
|
|
||||||
|
|
||||||
// appendToURLPath appends a string to the end of a URL's path (prefixing the string with a '/' if required)
|
|
||||||
func appendToURLPath(u url.URL, name string) url.URL {
|
|
||||||
// e.g. "https://ms.com/a/b/?k1=v1&k2=v2#f"
|
|
||||||
// When you call url.Parse() this is what you'll get:
|
|
||||||
// Scheme: "https"
|
|
||||||
// Opaque: ""
|
|
||||||
// User: nil
|
|
||||||
// Host: "ms.com"
|
|
||||||
// Path: "/a/b/" This should start with a / and it might or might not have a trailing slash
|
|
||||||
// RawPath: ""
|
|
||||||
// ForceQuery: false
|
|
||||||
// RawQuery: "k1=v1&k2=v2"
|
|
||||||
// Fragment: "f"
|
|
||||||
if len(u.Path) == 0 || u.Path[len(u.Path)-1] != '/' {
|
|
||||||
u.Path += "/" // Append "/" to end before appending name
|
|
||||||
}
|
|
||||||
u.Path += name
|
|
||||||
return u
|
|
||||||
}
|
|
||||||
|
|
||||||
// ListContainersFlatSegment returns a single segment of containers starting from the specified Marker. Use an empty
|
|
||||||
// Marker to start enumeration from the beginning. Container names are returned in lexicographic order.
|
|
||||||
// After getting a segment, process it, and then call ListContainersFlatSegment again (passing the the
|
|
||||||
// previously-returned Marker) to get the next segment. For more information, see
|
|
||||||
// https://docs.microsoft.com/rest/api/storageservices/list-containers2.
|
|
||||||
func (s ServiceURL) ListContainersSegment(ctx context.Context, marker Marker, o ListContainersSegmentOptions) (*ListContainersSegmentResponse, error) {
|
|
||||||
prefix, include, maxResults := o.pointers()
|
|
||||||
return s.client.ListContainersSegment(ctx, prefix, marker.Val, maxResults, include, nil, nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ListContainersOptions defines options available when calling ListContainers.
|
|
||||||
type ListContainersSegmentOptions struct {
|
|
||||||
Detail ListContainersDetail // No IncludeType header is produced if ""
|
|
||||||
Prefix string // No Prefix header is produced if ""
|
|
||||||
MaxResults int32 // 0 means unspecified
|
|
||||||
// TODO: update swagger to generate this type?
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o *ListContainersSegmentOptions) pointers() (prefix *string, include []ListContainersIncludeType, maxResults *int32) {
|
|
||||||
if o.Prefix != "" {
|
|
||||||
prefix = &o.Prefix
|
|
||||||
}
|
|
||||||
if o.MaxResults != 0 {
|
|
||||||
maxResults = &o.MaxResults
|
|
||||||
}
|
|
||||||
include = []ListContainersIncludeType{ListContainersIncludeType(o.Detail.string())}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// ListContainersFlatDetail indicates what additional information the service should return with each container.
|
|
||||||
type ListContainersDetail struct {
|
|
||||||
// Tells the service whether to return metadata for each container.
|
|
||||||
Metadata bool
|
|
||||||
|
|
||||||
// Show containers that have been deleted when the soft-delete feature is enabled.
|
|
||||||
// Deleted bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// string produces the Include query parameter's value.
|
|
||||||
func (d *ListContainersDetail) string() string {
|
|
||||||
items := make([]string, 0, 2)
|
|
||||||
// NOTE: Multiple strings MUST be appended in alphabetic order or signing the string for authentication fails!
|
|
||||||
if d.Metadata {
|
|
||||||
items = append(items, string(ListContainersIncludeMetadata))
|
|
||||||
}
|
|
||||||
// if d.Deleted {
|
|
||||||
// items = append(items, string(ListContainersIncludeDeleted))
|
|
||||||
// }
|
|
||||||
if len(items) > 0 {
|
|
||||||
return strings.Join(items, ",")
|
|
||||||
}
|
|
||||||
return string(ListContainersIncludeNone)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (bsu ServiceURL) GetProperties(ctx context.Context) (*StorageServiceProperties, error) {
|
|
||||||
return bsu.client.GetProperties(ctx, nil, nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (bsu ServiceURL) SetProperties(ctx context.Context, properties StorageServiceProperties) (*ServiceSetPropertiesResponse, error) {
|
|
||||||
return bsu.client.SetProperties(ctx, properties, nil, nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (bsu ServiceURL) GetStatistics(ctx context.Context) (*StorageServiceStats, error) {
|
|
||||||
return bsu.client.GetStatistics(ctx, nil, nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
// FindBlobsByTags operation finds all blobs in the storage account whose tags match a given search expression.
|
|
||||||
// Filter blobs searches across all containers within a storage account but can be scoped within the expression to a single container.
|
|
||||||
// https://docs.microsoft.com/en-us/rest/api/storageservices/find-blobs-by-tags
|
|
||||||
// eg. "dog='germanshepherd' and penguin='emperorpenguin'"
|
|
||||||
// To specify a container, eg. "@container=’containerName’ and Name = ‘C’"
|
|
||||||
func (bsu ServiceURL) FindBlobsByTags(ctx context.Context, timeout *int32, requestID *string, where *string, marker Marker, maxResults *int32) (*FilterBlobSegment, error) {
|
|
||||||
return bsu.client.FilterBlobs(ctx, timeout, requestID, where, marker.Val, maxResults)
|
|
||||||
}
|
|
||||||
38
vendor/github.com/Azure/azure-storage-blob-go/azblob/user_delegation_credential.go
generated
vendored
38
vendor/github.com/Azure/azure-storage-blob-go/azblob/user_delegation_credential.go
generated
vendored
|
|
@ -1,38 +0,0 @@
|
||||||
package azblob
|
|
||||||
|
|
||||||
import (
|
|
||||||
"crypto/hmac"
|
|
||||||
"crypto/sha256"
|
|
||||||
"encoding/base64"
|
|
||||||
)
|
|
||||||
|
|
||||||
// NewUserDelegationCredential creates a new UserDelegationCredential using a Storage account's name and a user delegation key from it
|
|
||||||
func NewUserDelegationCredential(accountName string, key UserDelegationKey) UserDelegationCredential {
|
|
||||||
return UserDelegationCredential{
|
|
||||||
accountName: accountName,
|
|
||||||
accountKey: key,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type UserDelegationCredential struct {
|
|
||||||
accountName string
|
|
||||||
accountKey UserDelegationKey
|
|
||||||
}
|
|
||||||
|
|
||||||
// AccountName returns the Storage account's name
|
|
||||||
func (f UserDelegationCredential) AccountName() string {
|
|
||||||
return f.accountName
|
|
||||||
}
|
|
||||||
|
|
||||||
// ComputeHMAC
|
|
||||||
func (f UserDelegationCredential) ComputeHMACSHA256(message string) (base64String string) {
|
|
||||||
bytes, _ := base64.StdEncoding.DecodeString(f.accountKey.Value)
|
|
||||||
h := hmac.New(sha256.New, bytes)
|
|
||||||
h.Write([]byte(message))
|
|
||||||
return base64.StdEncoding.EncodeToString(h.Sum(nil))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Private method to return important parameters for NewSASQueryParameters
|
|
||||||
func (f UserDelegationCredential) getUDKParams() *UserDelegationKey {
|
|
||||||
return &f.accountKey
|
|
||||||
}
|
|
||||||
3
vendor/github.com/Azure/azure-storage-blob-go/azblob/version.go
generated
vendored
3
vendor/github.com/Azure/azure-storage-blob-go/azblob/version.go
generated
vendored
|
|
@ -1,3 +0,0 @@
|
||||||
package azblob
|
|
||||||
|
|
||||||
const serviceLibVersion = "0.14"
|
|
||||||
55
vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_credential_anonymous.go
generated
vendored
55
vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_credential_anonymous.go
generated
vendored
|
|
@ -1,55 +0,0 @@
|
||||||
package azblob
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
|
|
||||||
"github.com/Azure/azure-pipeline-go/pipeline"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Credential represent any credential type; it is used to create a credential policy Factory.
|
|
||||||
type Credential interface {
|
|
||||||
pipeline.Factory
|
|
||||||
credentialMarker()
|
|
||||||
}
|
|
||||||
|
|
||||||
type credentialFunc pipeline.FactoryFunc
|
|
||||||
|
|
||||||
func (f credentialFunc) New(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.Policy {
|
|
||||||
return f(next, po)
|
|
||||||
}
|
|
||||||
|
|
||||||
// credentialMarker is a package-internal method that exists just to satisfy the Credential interface.
|
|
||||||
func (credentialFunc) credentialMarker() {}
|
|
||||||
|
|
||||||
//////////////////////////////
|
|
||||||
|
|
||||||
// NewAnonymousCredential creates an anonymous credential for use with HTTP(S) requests that read public resource
|
|
||||||
// or for use with Shared Access Signatures (SAS).
|
|
||||||
func NewAnonymousCredential() Credential {
|
|
||||||
return anonymousCredentialFactory
|
|
||||||
}
|
|
||||||
|
|
||||||
var anonymousCredentialFactory Credential = &anonymousCredentialPolicyFactory{} // Singleton
|
|
||||||
|
|
||||||
// anonymousCredentialPolicyFactory is the credential's policy factory.
|
|
||||||
type anonymousCredentialPolicyFactory struct {
|
|
||||||
}
|
|
||||||
|
|
||||||
// New creates a credential policy object.
|
|
||||||
func (f *anonymousCredentialPolicyFactory) New(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.Policy {
|
|
||||||
return &anonymousCredentialPolicy{next: next}
|
|
||||||
}
|
|
||||||
|
|
||||||
// credentialMarker is a package-internal method that exists just to satisfy the Credential interface.
|
|
||||||
func (*anonymousCredentialPolicyFactory) credentialMarker() {}
|
|
||||||
|
|
||||||
// anonymousCredentialPolicy is the credential's policy object.
|
|
||||||
type anonymousCredentialPolicy struct {
|
|
||||||
next pipeline.Policy
|
|
||||||
}
|
|
||||||
|
|
||||||
// Do implements the credential's policy interface.
|
|
||||||
func (p anonymousCredentialPolicy) Do(ctx context.Context, request pipeline.Request) (pipeline.Response, error) {
|
|
||||||
// For anonymous credentials, this is effectively a no-op
|
|
||||||
return p.next.Do(ctx, request)
|
|
||||||
}
|
|
||||||
205
vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_credential_shared_key.go
generated
vendored
205
vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_credential_shared_key.go
generated
vendored
|
|
@ -1,205 +0,0 @@
|
||||||
package azblob
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"context"
|
|
||||||
"crypto/hmac"
|
|
||||||
"crypto/sha256"
|
|
||||||
"encoding/base64"
|
|
||||||
"errors"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"sort"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/Azure/azure-pipeline-go/pipeline"
|
|
||||||
)
|
|
||||||
|
|
||||||
// NewSharedKeyCredential creates an immutable SharedKeyCredential containing the
|
|
||||||
// storage account's name and either its primary or secondary key.
|
|
||||||
func NewSharedKeyCredential(accountName, accountKey string) (*SharedKeyCredential, error) {
|
|
||||||
bytes, err := base64.StdEncoding.DecodeString(accountKey)
|
|
||||||
if err != nil {
|
|
||||||
return &SharedKeyCredential{}, err
|
|
||||||
}
|
|
||||||
return &SharedKeyCredential{accountName: accountName, accountKey: bytes}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// SharedKeyCredential contains an account's name and its primary or secondary key.
|
|
||||||
// It is immutable making it shareable and goroutine-safe.
|
|
||||||
type SharedKeyCredential struct {
|
|
||||||
// Only the NewSharedKeyCredential method should set these; all other methods should treat them as read-only
|
|
||||||
accountName string
|
|
||||||
accountKey []byte
|
|
||||||
}
|
|
||||||
|
|
||||||
// AccountName returns the Storage account's name.
|
|
||||||
func (f SharedKeyCredential) AccountName() string {
|
|
||||||
return f.accountName
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f SharedKeyCredential) getAccountKey() []byte {
|
|
||||||
return f.accountKey
|
|
||||||
}
|
|
||||||
|
|
||||||
// noop function to satisfy StorageAccountCredential interface
|
|
||||||
func (f SharedKeyCredential) getUDKParams() *UserDelegationKey {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// New creates a credential policy object.
|
|
||||||
func (f *SharedKeyCredential) New(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.Policy {
|
|
||||||
return pipeline.PolicyFunc(func(ctx context.Context, request pipeline.Request) (pipeline.Response, error) {
|
|
||||||
// Add a x-ms-date header if it doesn't already exist
|
|
||||||
if d := request.Header.Get(headerXmsDate); d == "" {
|
|
||||||
request.Header[headerXmsDate] = []string{time.Now().UTC().Format(http.TimeFormat)}
|
|
||||||
}
|
|
||||||
stringToSign, err := f.buildStringToSign(request)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
signature := f.ComputeHMACSHA256(stringToSign)
|
|
||||||
authHeader := strings.Join([]string{"SharedKey ", f.accountName, ":", signature}, "")
|
|
||||||
request.Header[headerAuthorization] = []string{authHeader}
|
|
||||||
|
|
||||||
response, err := next.Do(ctx, request)
|
|
||||||
if err != nil && response != nil && response.Response() != nil && response.Response().StatusCode == http.StatusForbidden {
|
|
||||||
// Service failed to authenticate request, log it
|
|
||||||
po.Log(pipeline.LogError, "===== HTTP Forbidden status, String-to-Sign:\n"+stringToSign+"\n===============================\n")
|
|
||||||
}
|
|
||||||
return response, err
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// credentialMarker is a package-internal method that exists just to satisfy the Credential interface.
|
|
||||||
func (*SharedKeyCredential) credentialMarker() {}
|
|
||||||
|
|
||||||
// Constants ensuring that header names are correctly spelled and consistently cased.
|
|
||||||
const (
|
|
||||||
headerAuthorization = "Authorization"
|
|
||||||
headerCacheControl = "Cache-Control"
|
|
||||||
headerContentEncoding = "Content-Encoding"
|
|
||||||
headerContentDisposition = "Content-Disposition"
|
|
||||||
headerContentLanguage = "Content-Language"
|
|
||||||
headerContentLength = "Content-Length"
|
|
||||||
headerContentMD5 = "Content-MD5"
|
|
||||||
headerContentType = "Content-Type"
|
|
||||||
headerDate = "Date"
|
|
||||||
headerIfMatch = "If-Match"
|
|
||||||
headerIfModifiedSince = "If-Modified-Since"
|
|
||||||
headerIfNoneMatch = "If-None-Match"
|
|
||||||
headerIfUnmodifiedSince = "If-Unmodified-Since"
|
|
||||||
headerRange = "Range"
|
|
||||||
headerUserAgent = "User-Agent"
|
|
||||||
headerXmsDate = "x-ms-date"
|
|
||||||
headerXmsVersion = "x-ms-version"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ComputeHMACSHA256 generates a hash signature for an HTTP request or for a SAS.
|
|
||||||
func (f SharedKeyCredential) ComputeHMACSHA256(message string) (base64String string) {
|
|
||||||
h := hmac.New(sha256.New, f.accountKey)
|
|
||||||
h.Write([]byte(message))
|
|
||||||
return base64.StdEncoding.EncodeToString(h.Sum(nil))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *SharedKeyCredential) buildStringToSign(request pipeline.Request) (string, error) {
|
|
||||||
// https://docs.microsoft.com/en-us/rest/api/storageservices/authentication-for-the-azure-storage-services
|
|
||||||
headers := request.Header
|
|
||||||
contentLength := headers.Get(headerContentLength)
|
|
||||||
if contentLength == "0" {
|
|
||||||
contentLength = ""
|
|
||||||
}
|
|
||||||
|
|
||||||
canonicalizedResource, err := f.buildCanonicalizedResource(request.URL)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
stringToSign := strings.Join([]string{
|
|
||||||
request.Method,
|
|
||||||
headers.Get(headerContentEncoding),
|
|
||||||
headers.Get(headerContentLanguage),
|
|
||||||
contentLength,
|
|
||||||
headers.Get(headerContentMD5),
|
|
||||||
headers.Get(headerContentType),
|
|
||||||
"", // Empty date because x-ms-date is expected (as per web page above)
|
|
||||||
headers.Get(headerIfModifiedSince),
|
|
||||||
headers.Get(headerIfMatch),
|
|
||||||
headers.Get(headerIfNoneMatch),
|
|
||||||
headers.Get(headerIfUnmodifiedSince),
|
|
||||||
headers.Get(headerRange),
|
|
||||||
buildCanonicalizedHeader(headers),
|
|
||||||
canonicalizedResource,
|
|
||||||
}, "\n")
|
|
||||||
return stringToSign, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func buildCanonicalizedHeader(headers http.Header) string {
|
|
||||||
cm := map[string][]string{}
|
|
||||||
for k, v := range headers {
|
|
||||||
headerName := strings.TrimSpace(strings.ToLower(k))
|
|
||||||
if strings.HasPrefix(headerName, "x-ms-") {
|
|
||||||
cm[headerName] = v // NOTE: the value must not have any whitespace around it.
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if len(cm) == 0 {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
keys := make([]string, 0, len(cm))
|
|
||||||
for key := range cm {
|
|
||||||
keys = append(keys, key)
|
|
||||||
}
|
|
||||||
sort.Strings(keys)
|
|
||||||
ch := bytes.NewBufferString("")
|
|
||||||
for i, key := range keys {
|
|
||||||
if i > 0 {
|
|
||||||
ch.WriteRune('\n')
|
|
||||||
}
|
|
||||||
ch.WriteString(key)
|
|
||||||
ch.WriteRune(':')
|
|
||||||
ch.WriteString(strings.Join(cm[key], ","))
|
|
||||||
}
|
|
||||||
return string(ch.Bytes())
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *SharedKeyCredential) buildCanonicalizedResource(u *url.URL) (string, error) {
|
|
||||||
// https://docs.microsoft.com/en-us/rest/api/storageservices/authentication-for-the-azure-storage-services
|
|
||||||
cr := bytes.NewBufferString("/")
|
|
||||||
cr.WriteString(f.accountName)
|
|
||||||
|
|
||||||
if len(u.Path) > 0 {
|
|
||||||
// Any portion of the CanonicalizedResource string that is derived from
|
|
||||||
// the resource's URI should be encoded exactly as it is in the URI.
|
|
||||||
// -- https://msdn.microsoft.com/en-gb/library/azure/dd179428.aspx
|
|
||||||
cr.WriteString(u.EscapedPath())
|
|
||||||
} else {
|
|
||||||
// a slash is required to indicate the root path
|
|
||||||
cr.WriteString("/")
|
|
||||||
}
|
|
||||||
|
|
||||||
// params is a map[string][]string; param name is key; params values is []string
|
|
||||||
params, err := url.ParseQuery(u.RawQuery) // Returns URL decoded values
|
|
||||||
if err != nil {
|
|
||||||
return "", errors.New("parsing query parameters must succeed, otherwise there might be serious problems in the SDK/generated code")
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(params) > 0 { // There is at least 1 query parameter
|
|
||||||
paramNames := []string{} // We use this to sort the parameter key names
|
|
||||||
for paramName := range params {
|
|
||||||
paramNames = append(paramNames, paramName) // paramNames must be lowercase
|
|
||||||
}
|
|
||||||
sort.Strings(paramNames)
|
|
||||||
|
|
||||||
for _, paramName := range paramNames {
|
|
||||||
paramValues := params[paramName]
|
|
||||||
sort.Strings(paramValues)
|
|
||||||
|
|
||||||
// Join the sorted key values separated by ','
|
|
||||||
// Then prepend "keyName:"; then add this string to the buffer
|
|
||||||
cr.WriteString("\n" + paramName + ":" + strings.Join(paramValues, ","))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return string(cr.Bytes()), nil
|
|
||||||
}
|
|
||||||
137
vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_credential_token.go
generated
vendored
137
vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_credential_token.go
generated
vendored
|
|
@ -1,137 +0,0 @@
|
||||||
package azblob
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"errors"
|
|
||||||
"sync/atomic"
|
|
||||||
|
|
||||||
"runtime"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/Azure/azure-pipeline-go/pipeline"
|
|
||||||
)
|
|
||||||
|
|
||||||
// TokenRefresher represents a callback method that you write; this method is called periodically
|
|
||||||
// so you can refresh the token credential's value.
|
|
||||||
type TokenRefresher func(credential TokenCredential) time.Duration
|
|
||||||
|
|
||||||
// TokenCredential represents a token credential (which is also a pipeline.Factory).
|
|
||||||
type TokenCredential interface {
|
|
||||||
Credential
|
|
||||||
Token() string
|
|
||||||
SetToken(newToken string)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewTokenCredential creates a token credential for use with role-based access control (RBAC) access to Azure Storage
|
|
||||||
// resources. You initialize the TokenCredential with an initial token value. If you pass a non-nil value for
|
|
||||||
// tokenRefresher, then the function you pass will be called immediately so it can refresh and change the
|
|
||||||
// TokenCredential's token value by calling SetToken. Your tokenRefresher function must return a time.Duration
|
|
||||||
// indicating how long the TokenCredential object should wait before calling your tokenRefresher function again.
|
|
||||||
// If your tokenRefresher callback fails to refresh the token, you can return a duration of 0 to stop your
|
|
||||||
// TokenCredential object from ever invoking tokenRefresher again. Also, oen way to deal with failing to refresh a
|
|
||||||
// token is to cancel a context.Context object used by requests that have the TokenCredential object in their pipeline.
|
|
||||||
func NewTokenCredential(initialToken string, tokenRefresher TokenRefresher) TokenCredential {
|
|
||||||
tc := &tokenCredential{}
|
|
||||||
tc.SetToken(initialToken) // We don't set it above to guarantee atomicity
|
|
||||||
if tokenRefresher == nil {
|
|
||||||
return tc // If no callback specified, return the simple tokenCredential
|
|
||||||
}
|
|
||||||
|
|
||||||
tcwr := &tokenCredentialWithRefresh{token: tc}
|
|
||||||
tcwr.token.startRefresh(tokenRefresher)
|
|
||||||
runtime.SetFinalizer(tcwr, func(deadTC *tokenCredentialWithRefresh) {
|
|
||||||
deadTC.token.stopRefresh()
|
|
||||||
deadTC.token = nil // Sanity (not really required)
|
|
||||||
})
|
|
||||||
return tcwr
|
|
||||||
}
|
|
||||||
|
|
||||||
// tokenCredentialWithRefresh is a wrapper over a token credential.
|
|
||||||
// When this wrapper object gets GC'd, it stops the tokenCredential's timer
|
|
||||||
// which allows the tokenCredential object to also be GC'd.
|
|
||||||
type tokenCredentialWithRefresh struct {
|
|
||||||
token *tokenCredential
|
|
||||||
}
|
|
||||||
|
|
||||||
// credentialMarker is a package-internal method that exists just to satisfy the Credential interface.
|
|
||||||
func (*tokenCredentialWithRefresh) credentialMarker() {}
|
|
||||||
|
|
||||||
// Token returns the current token value
|
|
||||||
func (f *tokenCredentialWithRefresh) Token() string { return f.token.Token() }
|
|
||||||
|
|
||||||
// SetToken changes the current token value
|
|
||||||
func (f *tokenCredentialWithRefresh) SetToken(token string) { f.token.SetToken(token) }
|
|
||||||
|
|
||||||
// New satisfies pipeline.Factory's New method creating a pipeline policy object.
|
|
||||||
func (f *tokenCredentialWithRefresh) New(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.Policy {
|
|
||||||
return f.token.New(next, po)
|
|
||||||
}
|
|
||||||
|
|
||||||
///////////////////////////////////////////////////////////////////////////////
|
|
||||||
|
|
||||||
// tokenCredential is a pipeline.Factory is the credential's policy factory.
|
|
||||||
type tokenCredential struct {
|
|
||||||
token atomic.Value
|
|
||||||
|
|
||||||
// The members below are only used if the user specified a tokenRefresher callback function.
|
|
||||||
timer *time.Timer
|
|
||||||
tokenRefresher TokenRefresher
|
|
||||||
lock sync.Mutex
|
|
||||||
stopped bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// credentialMarker is a package-internal method that exists just to satisfy the Credential interface.
|
|
||||||
func (*tokenCredential) credentialMarker() {}
|
|
||||||
|
|
||||||
// Token returns the current token value
|
|
||||||
func (f *tokenCredential) Token() string { return f.token.Load().(string) }
|
|
||||||
|
|
||||||
// SetToken changes the current token value
|
|
||||||
func (f *tokenCredential) SetToken(token string) { f.token.Store(token) }
|
|
||||||
|
|
||||||
// startRefresh calls refresh which immediately calls tokenRefresher
|
|
||||||
// and then starts a timer to call tokenRefresher in the future.
|
|
||||||
func (f *tokenCredential) startRefresh(tokenRefresher TokenRefresher) {
|
|
||||||
f.tokenRefresher = tokenRefresher
|
|
||||||
f.stopped = false // In case user calls StartRefresh, StopRefresh, & then StartRefresh again
|
|
||||||
f.refresh()
|
|
||||||
}
|
|
||||||
|
|
||||||
// refresh calls the user's tokenRefresher so they can refresh the token (by
|
|
||||||
// calling SetToken) and then starts another time (based on the returned duration)
|
|
||||||
// in order to refresh the token again in the future.
|
|
||||||
func (f *tokenCredential) refresh() {
|
|
||||||
d := f.tokenRefresher(f) // Invoke the user's refresh callback outside of the lock
|
|
||||||
if d > 0 { // If duration is 0 or negative, refresher wants to not be called again
|
|
||||||
f.lock.Lock()
|
|
||||||
if !f.stopped {
|
|
||||||
f.timer = time.AfterFunc(d, f.refresh)
|
|
||||||
}
|
|
||||||
f.lock.Unlock()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// stopRefresh stops any pending timer and sets stopped field to true to prevent
|
|
||||||
// any new timer from starting.
|
|
||||||
// NOTE: Stopping the timer allows the GC to destroy the tokenCredential object.
|
|
||||||
func (f *tokenCredential) stopRefresh() {
|
|
||||||
f.lock.Lock()
|
|
||||||
f.stopped = true
|
|
||||||
if f.timer != nil {
|
|
||||||
f.timer.Stop()
|
|
||||||
}
|
|
||||||
f.lock.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
// New satisfies pipeline.Factory's New method creating a pipeline policy object.
|
|
||||||
func (f *tokenCredential) New(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.Policy {
|
|
||||||
return pipeline.PolicyFunc(func(ctx context.Context, request pipeline.Request) (pipeline.Response, error) {
|
|
||||||
if request.URL.Scheme != "https" {
|
|
||||||
// HTTPS must be used, otherwise the tokens are at the risk of being exposed
|
|
||||||
return nil, errors.New("token credentials require a URL using the https protocol scheme")
|
|
||||||
}
|
|
||||||
request.Header[headerAuthorization] = []string{"Bearer " + f.Token()}
|
|
||||||
return next.Do(ctx, request)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
45
vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_pipeline.go
generated
vendored
45
vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_pipeline.go
generated
vendored
|
|
@ -1,45 +0,0 @@
|
||||||
package azblob
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/Azure/azure-pipeline-go/pipeline"
|
|
||||||
)
|
|
||||||
|
|
||||||
// PipelineOptions is used to configure a request policy pipeline's retry policy and logging.
|
|
||||||
type PipelineOptions struct {
|
|
||||||
// Log configures the pipeline's logging infrastructure indicating what information is logged and where.
|
|
||||||
Log pipeline.LogOptions
|
|
||||||
|
|
||||||
// Retry configures the built-in retry policy behavior.
|
|
||||||
Retry RetryOptions
|
|
||||||
|
|
||||||
// RequestLog configures the built-in request logging policy.
|
|
||||||
RequestLog RequestLogOptions
|
|
||||||
|
|
||||||
// Telemetry configures the built-in telemetry policy behavior.
|
|
||||||
Telemetry TelemetryOptions
|
|
||||||
|
|
||||||
// HTTPSender configures the sender of HTTP requests
|
|
||||||
HTTPSender pipeline.Factory
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewPipeline creates a Pipeline using the specified credentials and options.
|
|
||||||
func NewPipeline(c Credential, o PipelineOptions) pipeline.Pipeline {
|
|
||||||
// Closest to API goes first; closest to the wire goes last
|
|
||||||
f := []pipeline.Factory{
|
|
||||||
NewTelemetryPolicyFactory(o.Telemetry),
|
|
||||||
NewUniqueRequestIDPolicyFactory(),
|
|
||||||
NewRetryPolicyFactory(o.Retry),
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, ok := c.(*anonymousCredentialPolicyFactory); !ok {
|
|
||||||
// For AnonymousCredential, we optimize out the policy factory since it doesn't do anything
|
|
||||||
// NOTE: The credential's policy factory must appear close to the wire so it can sign any
|
|
||||||
// changes made by other factories (like UniqueRequestIDPolicyFactory)
|
|
||||||
f = append(f, c)
|
|
||||||
}
|
|
||||||
f = append(f,
|
|
||||||
NewRequestLogPolicyFactory(o.RequestLog),
|
|
||||||
pipeline.MethodFactoryMarker()) // indicates at what stage in the pipeline the method factory is invoked
|
|
||||||
|
|
||||||
return pipeline.NewPipeline(f, pipeline.Options{HTTPSender: o.HTTPSender, Log: o.Log})
|
|
||||||
}
|
|
||||||
194
vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_policy_request_log.go
generated
vendored
194
vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_policy_request_log.go
generated
vendored
|
|
@ -1,194 +0,0 @@
|
||||||
package azblob
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"runtime"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/Azure/azure-pipeline-go/pipeline"
|
|
||||||
)
|
|
||||||
|
|
||||||
// RequestLogOptions configures the retry policy's behavior.
|
|
||||||
type RequestLogOptions struct {
|
|
||||||
// LogWarningIfTryOverThreshold logs a warning if a tried operation takes longer than the specified
|
|
||||||
// duration (-1=no logging; 0=default threshold).
|
|
||||||
LogWarningIfTryOverThreshold time.Duration
|
|
||||||
|
|
||||||
// SyslogDisabled is a flag to check if logging to Syslog/Windows-Event-Logger is enabled or not
|
|
||||||
// We by default print to Syslog/Windows-Event-Logger.
|
|
||||||
// If SyslogDisabled is not provided explicitly, the default value will be false.
|
|
||||||
SyslogDisabled bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o RequestLogOptions) defaults() RequestLogOptions {
|
|
||||||
if o.LogWarningIfTryOverThreshold == 0 {
|
|
||||||
// It would be good to relate this to https://azure.microsoft.com/en-us/support/legal/sla/storage/v1_2/
|
|
||||||
// But this monitors the time to get the HTTP response; NOT the time to download the response body.
|
|
||||||
o.LogWarningIfTryOverThreshold = 3 * time.Second // Default to 3 seconds
|
|
||||||
}
|
|
||||||
return o
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewRequestLogPolicyFactory creates a RequestLogPolicyFactory object configured using the specified options.
|
|
||||||
func NewRequestLogPolicyFactory(o RequestLogOptions) pipeline.Factory {
|
|
||||||
o = o.defaults() // Force defaults to be calculated
|
|
||||||
return pipeline.FactoryFunc(func(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.PolicyFunc {
|
|
||||||
// These variables are per-policy; shared by multiple calls to Do
|
|
||||||
var try int32
|
|
||||||
operationStart := time.Now() // If this is the 1st try, record the operation state time
|
|
||||||
return func(ctx context.Context, request pipeline.Request) (response pipeline.Response, err error) {
|
|
||||||
try++ // The first try is #1 (not #0)
|
|
||||||
|
|
||||||
// Log the outgoing request as informational
|
|
||||||
if po.ShouldLog(pipeline.LogInfo) {
|
|
||||||
b := &bytes.Buffer{}
|
|
||||||
fmt.Fprintf(b, "==> OUTGOING REQUEST (Try=%d)\n", try)
|
|
||||||
pipeline.WriteRequestWithResponse(b, prepareRequestForLogging(request), nil, nil)
|
|
||||||
po.Log(pipeline.LogInfo, b.String())
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set the time for this particular retry operation and then Do the operation.
|
|
||||||
tryStart := time.Now()
|
|
||||||
response, err = next.Do(ctx, request) // Make the request
|
|
||||||
tryEnd := time.Now()
|
|
||||||
tryDuration := tryEnd.Sub(tryStart)
|
|
||||||
opDuration := tryEnd.Sub(operationStart)
|
|
||||||
|
|
||||||
logLevel, forceLog := pipeline.LogInfo, false // Default logging information
|
|
||||||
|
|
||||||
// If the response took too long, we'll upgrade to warning.
|
|
||||||
if o.LogWarningIfTryOverThreshold > 0 && tryDuration > o.LogWarningIfTryOverThreshold {
|
|
||||||
// Log a warning if the try duration exceeded the specified threshold
|
|
||||||
logLevel, forceLog = pipeline.LogWarning, !o.SyslogDisabled
|
|
||||||
}
|
|
||||||
|
|
||||||
var sc int
|
|
||||||
if err == nil { // We got a valid response from the service
|
|
||||||
sc = response.Response().StatusCode
|
|
||||||
} else { // We got an error, so we should inspect if we got a response
|
|
||||||
if se, ok := err.(StorageError); ok {
|
|
||||||
if r := se.Response(); r != nil {
|
|
||||||
sc = r.StatusCode
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if sc == 0 || ((sc >= 400 && sc <= 499) && sc != http.StatusNotFound && sc != http.StatusConflict &&
|
|
||||||
sc != http.StatusPreconditionFailed && sc != http.StatusRequestedRangeNotSatisfiable) || (sc >= 500 && sc <= 599) {
|
|
||||||
logLevel, forceLog = pipeline.LogError, !o.SyslogDisabled // Promote to Error any 4xx (except those listed is an error) or any 5xx
|
|
||||||
} else {
|
|
||||||
// For other status codes, we leave the level as is.
|
|
||||||
}
|
|
||||||
|
|
||||||
if shouldLog := po.ShouldLog(logLevel); forceLog || shouldLog {
|
|
||||||
// We're going to log this; build the string to log
|
|
||||||
b := &bytes.Buffer{}
|
|
||||||
slow := ""
|
|
||||||
if o.LogWarningIfTryOverThreshold > 0 && tryDuration > o.LogWarningIfTryOverThreshold {
|
|
||||||
slow = fmt.Sprintf("[SLOW >%v]", o.LogWarningIfTryOverThreshold)
|
|
||||||
}
|
|
||||||
fmt.Fprintf(b, "==> REQUEST/RESPONSE (Try=%d/%v%s, OpTime=%v) -- ", try, tryDuration, slow, opDuration)
|
|
||||||
if err != nil { // This HTTP request did not get a response from the service
|
|
||||||
fmt.Fprint(b, "REQUEST ERROR\n")
|
|
||||||
} else {
|
|
||||||
if logLevel == pipeline.LogError {
|
|
||||||
fmt.Fprint(b, "RESPONSE STATUS CODE ERROR\n")
|
|
||||||
} else {
|
|
||||||
fmt.Fprint(b, "RESPONSE SUCCESSFULLY RECEIVED\n")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pipeline.WriteRequestWithResponse(b, prepareRequestForLogging(request), response.Response(), err)
|
|
||||||
if logLevel <= pipeline.LogError {
|
|
||||||
b.Write(stack()) // For errors (or lower levels), we append the stack trace (an expensive operation)
|
|
||||||
}
|
|
||||||
msg := b.String()
|
|
||||||
|
|
||||||
if forceLog {
|
|
||||||
pipeline.ForceLog(logLevel, msg)
|
|
||||||
}
|
|
||||||
if shouldLog {
|
|
||||||
po.Log(logLevel, msg)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return response, err
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// RedactSigQueryParam redacts the 'sig' query parameter in URL's raw query to protect secret.
|
|
||||||
func RedactSigQueryParam(rawQuery string) (bool, string) {
|
|
||||||
rawQuery = strings.ToLower(rawQuery) // lowercase the string so we can look for ?sig= and &sig=
|
|
||||||
sigFound := strings.Contains(rawQuery, "?sig=")
|
|
||||||
if !sigFound {
|
|
||||||
sigFound = strings.Contains(rawQuery, "&sig=")
|
|
||||||
if !sigFound {
|
|
||||||
return sigFound, rawQuery // [?|&]sig= not found; return same rawQuery passed in (no memory allocation)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// [?|&]sig= found, redact its value
|
|
||||||
values, _ := url.ParseQuery(rawQuery)
|
|
||||||
for name := range values {
|
|
||||||
if strings.EqualFold(name, "sig") {
|
|
||||||
values[name] = []string{"REDACTED"}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return sigFound, values.Encode()
|
|
||||||
}
|
|
||||||
|
|
||||||
func prepareRequestForLogging(request pipeline.Request) *http.Request {
|
|
||||||
req := request
|
|
||||||
if sigFound, rawQuery := RedactSigQueryParam(req.URL.RawQuery); sigFound {
|
|
||||||
// Make copy so we don't destroy the query parameters we actually need to send in the request
|
|
||||||
req = request.Copy()
|
|
||||||
req.Request.URL.RawQuery = rawQuery
|
|
||||||
}
|
|
||||||
|
|
||||||
return prepareRequestForServiceLogging(req)
|
|
||||||
}
|
|
||||||
|
|
||||||
func stack() []byte {
|
|
||||||
buf := make([]byte, 1024)
|
|
||||||
for {
|
|
||||||
n := runtime.Stack(buf, false)
|
|
||||||
if n < len(buf) {
|
|
||||||
return buf[:n]
|
|
||||||
}
|
|
||||||
buf = make([]byte, 2*len(buf))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
///////////////////////////////////////////////////////////////////////////////////////
|
|
||||||
// Redact phase useful for blob and file service only. For other services,
|
|
||||||
// this method can directly return request.Request.
|
|
||||||
///////////////////////////////////////////////////////////////////////////////////////
|
|
||||||
func prepareRequestForServiceLogging(request pipeline.Request) *http.Request {
|
|
||||||
req := request
|
|
||||||
if exist, key := doesHeaderExistCaseInsensitive(req.Header, xMsCopySourceHeader); exist {
|
|
||||||
req = request.Copy()
|
|
||||||
url, err := url.Parse(req.Header.Get(key))
|
|
||||||
if err == nil {
|
|
||||||
if sigFound, rawQuery := RedactSigQueryParam(url.RawQuery); sigFound {
|
|
||||||
url.RawQuery = rawQuery
|
|
||||||
req.Header.Set(xMsCopySourceHeader, url.String())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return req.Request
|
|
||||||
}
|
|
||||||
|
|
||||||
const xMsCopySourceHeader = "x-ms-copy-source"
|
|
||||||
|
|
||||||
func doesHeaderExistCaseInsensitive(header http.Header, key string) (bool, string) {
|
|
||||||
for keyInHeader := range header {
|
|
||||||
if strings.EqualFold(keyInHeader, key) {
|
|
||||||
return true, keyInHeader
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false, ""
|
|
||||||
}
|
|
||||||
414
vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_policy_retry.go
generated
vendored
414
vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_policy_retry.go
generated
vendored
|
|
@ -1,414 +0,0 @@
|
||||||
package azblob
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"errors"
|
|
||||||
"io"
|
|
||||||
"io/ioutil"
|
|
||||||
"math/rand"
|
|
||||||
"net"
|
|
||||||
"net/http"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/Azure/azure-pipeline-go/pipeline"
|
|
||||||
)
|
|
||||||
|
|
||||||
// RetryPolicy tells the pipeline what kind of retry policy to use. See the RetryPolicy* constants.
|
|
||||||
type RetryPolicy int32
|
|
||||||
|
|
||||||
const (
|
|
||||||
// RetryPolicyExponential tells the pipeline to use an exponential back-off retry policy
|
|
||||||
RetryPolicyExponential RetryPolicy = 0
|
|
||||||
|
|
||||||
// RetryPolicyFixed tells the pipeline to use a fixed back-off retry policy
|
|
||||||
RetryPolicyFixed RetryPolicy = 1
|
|
||||||
)
|
|
||||||
|
|
||||||
// RetryOptions configures the retry policy's behavior.
|
|
||||||
type RetryOptions struct {
|
|
||||||
// Policy tells the pipeline what kind of retry policy to use. See the RetryPolicy* constants.\
|
|
||||||
// A value of zero means that you accept our default policy.
|
|
||||||
Policy RetryPolicy
|
|
||||||
|
|
||||||
// MaxTries specifies the maximum number of attempts an operation will be tried before producing an error (0=default).
|
|
||||||
// A value of zero means that you accept our default policy. A value of 1 means 1 try and no retries.
|
|
||||||
MaxTries int32
|
|
||||||
|
|
||||||
// TryTimeout indicates the maximum time allowed for any single try of an HTTP request.
|
|
||||||
// A value of zero means that you accept our default timeout. NOTE: When transferring large amounts
|
|
||||||
// of data, the default TryTimeout will probably not be sufficient. You should override this value
|
|
||||||
// based on the bandwidth available to the host machine and proximity to the Storage service. A good
|
|
||||||
// starting point may be something like (60 seconds per MB of anticipated-payload-size).
|
|
||||||
TryTimeout time.Duration
|
|
||||||
|
|
||||||
// RetryDelay specifies the amount of delay to use before retrying an operation (0=default).
|
|
||||||
// When RetryPolicy is specified as RetryPolicyExponential, the delay increases exponentially
|
|
||||||
// with each retry up to a maximum specified by MaxRetryDelay.
|
|
||||||
// If you specify 0, then you must also specify 0 for MaxRetryDelay.
|
|
||||||
// If you specify RetryDelay, then you must also specify MaxRetryDelay, and MaxRetryDelay should be
|
|
||||||
// equal to or greater than RetryDelay.
|
|
||||||
RetryDelay time.Duration
|
|
||||||
|
|
||||||
// MaxRetryDelay specifies the maximum delay allowed before retrying an operation (0=default).
|
|
||||||
// If you specify 0, then you must also specify 0 for RetryDelay.
|
|
||||||
MaxRetryDelay time.Duration
|
|
||||||
|
|
||||||
// RetryReadsFromSecondaryHost specifies whether the retry policy should retry a read operation against another host.
|
|
||||||
// If RetryReadsFromSecondaryHost is "" (the default) then operations are not retried against another host.
|
|
||||||
// NOTE: Before setting this field, make sure you understand the issues around reading stale & potentially-inconsistent
|
|
||||||
// data at this webpage: https://docs.microsoft.com/en-us/azure/storage/common/storage-designing-ha-apps-with-ragrs
|
|
||||||
RetryReadsFromSecondaryHost string // Comment this our for non-Blob SDKs
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o RetryOptions) retryReadsFromSecondaryHost() string {
|
|
||||||
return o.RetryReadsFromSecondaryHost // This is for the Blob SDK only
|
|
||||||
//return "" // This is for non-blob SDKs
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o RetryOptions) defaults() RetryOptions {
|
|
||||||
// We assume the following:
|
|
||||||
// 1. o.Policy should either be RetryPolicyExponential or RetryPolicyFixed
|
|
||||||
// 2. o.MaxTries >= 0
|
|
||||||
// 3. o.TryTimeout, o.RetryDelay, and o.MaxRetryDelay >=0
|
|
||||||
// 4. o.RetryDelay <= o.MaxRetryDelay
|
|
||||||
// 5. Both o.RetryDelay and o.MaxRetryDelay must be 0 or neither can be 0
|
|
||||||
|
|
||||||
IfDefault := func(current *time.Duration, desired time.Duration) {
|
|
||||||
if *current == time.Duration(0) {
|
|
||||||
*current = desired
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set defaults if unspecified
|
|
||||||
if o.MaxTries == 0 {
|
|
||||||
o.MaxTries = 4
|
|
||||||
}
|
|
||||||
switch o.Policy {
|
|
||||||
case RetryPolicyExponential:
|
|
||||||
IfDefault(&o.TryTimeout, 1*time.Minute)
|
|
||||||
IfDefault(&o.RetryDelay, 4*time.Second)
|
|
||||||
IfDefault(&o.MaxRetryDelay, 120*time.Second)
|
|
||||||
|
|
||||||
case RetryPolicyFixed:
|
|
||||||
IfDefault(&o.TryTimeout, 1*time.Minute)
|
|
||||||
IfDefault(&o.RetryDelay, 30*time.Second)
|
|
||||||
IfDefault(&o.MaxRetryDelay, 120*time.Second)
|
|
||||||
}
|
|
||||||
return o
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o RetryOptions) calcDelay(try int32) time.Duration { // try is >=1; never 0
|
|
||||||
pow := func(number int64, exponent int32) int64 { // pow is nested helper function
|
|
||||||
var result int64 = 1
|
|
||||||
for n := int32(0); n < exponent; n++ {
|
|
||||||
result *= number
|
|
||||||
}
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
|
|
||||||
delay := time.Duration(0)
|
|
||||||
switch o.Policy {
|
|
||||||
case RetryPolicyExponential:
|
|
||||||
delay = time.Duration(pow(2, try-1)-1) * o.RetryDelay
|
|
||||||
|
|
||||||
case RetryPolicyFixed:
|
|
||||||
if try > 1 { // Any try after the 1st uses the fixed delay
|
|
||||||
delay = o.RetryDelay
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Introduce some jitter: [0.0, 1.0) / 2 = [0.0, 0.5) + 0.8 = [0.8, 1.3)
|
|
||||||
// For casts and rounding - be careful, as per https://github.com/golang/go/issues/20757
|
|
||||||
delay = time.Duration(float32(delay) * (rand.Float32()/2 + 0.8)) // NOTE: We want math/rand; not crypto/rand
|
|
||||||
if delay > o.MaxRetryDelay {
|
|
||||||
delay = o.MaxRetryDelay
|
|
||||||
}
|
|
||||||
return delay
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewRetryPolicyFactory creates a RetryPolicyFactory object configured using the specified options.
|
|
||||||
func NewRetryPolicyFactory(o RetryOptions) pipeline.Factory {
|
|
||||||
o = o.defaults() // Force defaults to be calculated
|
|
||||||
return pipeline.FactoryFunc(func(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.PolicyFunc {
|
|
||||||
return func(ctx context.Context, request pipeline.Request) (response pipeline.Response, err error) {
|
|
||||||
// Before each try, we'll select either the primary or secondary URL.
|
|
||||||
primaryTry := int32(0) // This indicates how many tries we've attempted against the primary DC
|
|
||||||
|
|
||||||
// We only consider retrying against a secondary if we have a read request (GET/HEAD) AND this policy has a Secondary URL it can use
|
|
||||||
considerSecondary := (request.Method == http.MethodGet || request.Method == http.MethodHead) && o.retryReadsFromSecondaryHost() != ""
|
|
||||||
|
|
||||||
// Exponential retry algorithm: ((2 ^ attempt) - 1) * delay * random(0.8, 1.2)
|
|
||||||
// When to retry: connection failure or temporary/timeout. NOTE: StorageError considers HTTP 500/503 as temporary & is therefore retryable
|
|
||||||
// If using a secondary:
|
|
||||||
// Even tries go against primary; odd tries go against the secondary
|
|
||||||
// For a primary wait ((2 ^ primaryTries - 1) * delay * random(0.8, 1.2)
|
|
||||||
// If secondary gets a 404, don't fail, retry but future retries are only against the primary
|
|
||||||
// When retrying against a secondary, ignore the retry count and wait (.1 second * random(0.8, 1.2))
|
|
||||||
for try := int32(1); try <= o.MaxTries; try++ {
|
|
||||||
logf("\n=====> Try=%d\n", try)
|
|
||||||
|
|
||||||
// Determine which endpoint to try. It's primary if there is no secondary or if it is an add # attempt.
|
|
||||||
tryingPrimary := !considerSecondary || (try%2 == 1)
|
|
||||||
// Select the correct host and delay
|
|
||||||
if tryingPrimary {
|
|
||||||
primaryTry++
|
|
||||||
delay := o.calcDelay(primaryTry)
|
|
||||||
logf("Primary try=%d, Delay=%v\n", primaryTry, delay)
|
|
||||||
time.Sleep(delay) // The 1st try returns 0 delay
|
|
||||||
} else {
|
|
||||||
// For casts and rounding - be careful, as per https://github.com/golang/go/issues/20757
|
|
||||||
delay := time.Duration(float32(time.Second) * (rand.Float32()/2 + 0.8))
|
|
||||||
logf("Secondary try=%d, Delay=%v\n", try-primaryTry, delay)
|
|
||||||
time.Sleep(delay) // Delay with some jitter before trying secondary
|
|
||||||
}
|
|
||||||
|
|
||||||
// Clone the original request to ensure that each try starts with the original (unmutated) request.
|
|
||||||
requestCopy := request.Copy()
|
|
||||||
|
|
||||||
// For each try, seek to the beginning of the Body stream. We do this even for the 1st try because
|
|
||||||
// the stream may not be at offset 0 when we first get it and we want the same behavior for the
|
|
||||||
// 1st try as for additional tries.
|
|
||||||
err = requestCopy.RewindBody()
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.New("we must be able to seek on the Body Stream, otherwise retries would cause data corruption")
|
|
||||||
}
|
|
||||||
|
|
||||||
if !tryingPrimary {
|
|
||||||
requestCopy.URL.Host = o.retryReadsFromSecondaryHost()
|
|
||||||
requestCopy.Host = o.retryReadsFromSecondaryHost()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set the server-side timeout query parameter "timeout=[seconds]"
|
|
||||||
timeout := int32(o.TryTimeout.Seconds()) // Max seconds per try
|
|
||||||
if deadline, ok := ctx.Deadline(); ok { // If user's ctx has a deadline, make the timeout the smaller of the two
|
|
||||||
t := int32(deadline.Sub(time.Now()).Seconds()) // Duration from now until user's ctx reaches its deadline
|
|
||||||
logf("MaxTryTimeout=%d secs, TimeTilDeadline=%d sec\n", timeout, t)
|
|
||||||
if t < timeout {
|
|
||||||
timeout = t
|
|
||||||
}
|
|
||||||
if timeout < 0 {
|
|
||||||
timeout = 0 // If timeout ever goes negative, set it to zero; this happen while debugging
|
|
||||||
}
|
|
||||||
logf("TryTimeout adjusted to=%d sec\n", timeout)
|
|
||||||
}
|
|
||||||
q := requestCopy.Request.URL.Query()
|
|
||||||
q.Set("timeout", strconv.Itoa(int(timeout+1))) // Add 1 to "round up"
|
|
||||||
requestCopy.Request.URL.RawQuery = q.Encode()
|
|
||||||
logf("Url=%s\n", requestCopy.Request.URL.String())
|
|
||||||
|
|
||||||
// Set the time for this particular retry operation and then Do the operation.
|
|
||||||
tryCtx, tryCancel := context.WithTimeout(ctx, time.Second*time.Duration(timeout))
|
|
||||||
//requestCopy.Body = &deadlineExceededReadCloser{r: requestCopy.Request.Body}
|
|
||||||
response, err = next.Do(tryCtx, requestCopy) // Make the request
|
|
||||||
/*err = improveDeadlineExceeded(err)
|
|
||||||
if err == nil {
|
|
||||||
response.Response().Body = &deadlineExceededReadCloser{r: response.Response().Body}
|
|
||||||
}*/
|
|
||||||
logf("Err=%v, response=%v\n", err, response)
|
|
||||||
|
|
||||||
action := "" // This MUST get changed within the switch code below
|
|
||||||
switch {
|
|
||||||
case ctx.Err() != nil:
|
|
||||||
action = "NoRetry: Op timeout"
|
|
||||||
case !tryingPrimary && response != nil && response.Response() != nil && response.Response().StatusCode == http.StatusNotFound:
|
|
||||||
// If attempt was against the secondary & it returned a StatusNotFound (404), then
|
|
||||||
// the resource was not found. This may be due to replication delay. So, in this
|
|
||||||
// case, we'll never try the secondary again for this operation.
|
|
||||||
considerSecondary = false
|
|
||||||
action = "Retry: Secondary URL returned 404"
|
|
||||||
case err != nil:
|
|
||||||
// NOTE: Protocol Responder returns non-nil if REST API returns invalid status code for the invoked operation.
|
|
||||||
// Use ServiceCode to verify if the error is related to storage service-side,
|
|
||||||
// ServiceCode is set only when error related to storage service happened.
|
|
||||||
if stErr, ok := err.(StorageError); ok {
|
|
||||||
if stErr.Temporary() {
|
|
||||||
action = "Retry: StorageError with error service code and Temporary()"
|
|
||||||
} else if stErr.Response() != nil && isSuccessStatusCode(stErr.Response()) { // TODO: This is a temporarily work around, remove this after protocol layer fix the issue that net.Error is wrapped as storageError
|
|
||||||
action = "Retry: StorageError with success status code"
|
|
||||||
} else {
|
|
||||||
action = "NoRetry: StorageError not Temporary() and without retriable status code"
|
|
||||||
}
|
|
||||||
} else if netErr, ok := err.(net.Error); ok {
|
|
||||||
// Use non-retriable net.Error list, but not retriable list.
|
|
||||||
// As there are errors without Temporary() implementation,
|
|
||||||
// while need be retried, like 'connection reset by peer', 'transport connection broken' and etc.
|
|
||||||
// So the SDK do retry for most of the case, unless the error should not be retried for sure.
|
|
||||||
if !isNotRetriable(netErr) {
|
|
||||||
action = "Retry: net.Error and not in the non-retriable list"
|
|
||||||
} else {
|
|
||||||
action = "NoRetry: net.Error and in the non-retriable list"
|
|
||||||
}
|
|
||||||
} else if err == io.ErrUnexpectedEOF {
|
|
||||||
action = "Retry: unexpected EOF"
|
|
||||||
} else {
|
|
||||||
action = "NoRetry: unrecognized error"
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
action = "NoRetry: successful HTTP request" // no error
|
|
||||||
}
|
|
||||||
|
|
||||||
logf("Action=%s\n", action)
|
|
||||||
// fmt.Println(action + "\n") // This is where we could log the retry operation; action is why we're retrying
|
|
||||||
if action[0] != 'R' { // Retry only if action starts with 'R'
|
|
||||||
if err != nil {
|
|
||||||
tryCancel() // If we're returning an error, cancel this current/last per-retry timeout context
|
|
||||||
} else {
|
|
||||||
// We wrap the last per-try context in a body and overwrite the Response's Body field with our wrapper.
|
|
||||||
// So, when the user closes the Body, the our per-try context gets closed too.
|
|
||||||
// Another option, is that the Last Policy do this wrapping for a per-retry context (not for the user's context)
|
|
||||||
if response == nil || response.Response() == nil {
|
|
||||||
// We do panic in the case response or response.Response() is nil,
|
|
||||||
// as for client, the response should not be nil if request is sent and the operations is executed successfully.
|
|
||||||
// Another option, is that execute the cancel function when response or response.Response() is nil,
|
|
||||||
// as in this case, current per-try has nothing to do in future.
|
|
||||||
return nil, errors.New("invalid state, response should not be nil when the operation is executed successfully")
|
|
||||||
}
|
|
||||||
response.Response().Body = &contextCancelReadCloser{cf: tryCancel, body: response.Response().Body}
|
|
||||||
}
|
|
||||||
break // Don't retry
|
|
||||||
}
|
|
||||||
if response != nil && response.Response() != nil && response.Response().Body != nil {
|
|
||||||
// If we're going to retry and we got a previous response, then flush its body to avoid leaking its TCP connection
|
|
||||||
body := response.Response().Body
|
|
||||||
io.Copy(ioutil.Discard, body)
|
|
||||||
body.Close()
|
|
||||||
}
|
|
||||||
// If retrying, cancel the current per-try timeout context
|
|
||||||
tryCancel()
|
|
||||||
}
|
|
||||||
return response, err // Not retryable or too many retries; return the last response/error
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// contextCancelReadCloser helps to invoke context's cancelFunc properly when the ReadCloser is closed.
|
|
||||||
type contextCancelReadCloser struct {
|
|
||||||
cf context.CancelFunc
|
|
||||||
body io.ReadCloser
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rc *contextCancelReadCloser) Read(p []byte) (n int, err error) {
|
|
||||||
return rc.body.Read(p)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rc *contextCancelReadCloser) Close() error {
|
|
||||||
err := rc.body.Close()
|
|
||||||
if rc.cf != nil {
|
|
||||||
rc.cf()
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// isNotRetriable checks if the provided net.Error isn't retriable.
|
|
||||||
func isNotRetriable(errToParse net.Error) bool {
|
|
||||||
// No error, so this is NOT retriable.
|
|
||||||
if errToParse == nil {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// The error is either temporary or a timeout so it IS retriable (not not retriable).
|
|
||||||
if errToParse.Temporary() || errToParse.Timeout() {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
genericErr := error(errToParse)
|
|
||||||
|
|
||||||
// From here all the error are neither Temporary() nor Timeout().
|
|
||||||
switch err := errToParse.(type) {
|
|
||||||
case *net.OpError:
|
|
||||||
// The net.Error is also a net.OpError but the inner error is nil, so this is not retriable.
|
|
||||||
if err.Err == nil {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
genericErr = err.Err
|
|
||||||
}
|
|
||||||
|
|
||||||
switch genericErr.(type) {
|
|
||||||
case *net.AddrError, net.UnknownNetworkError, *net.DNSError, net.InvalidAddrError, *net.ParseError, *net.DNSConfigError:
|
|
||||||
// If the error is one of the ones listed, then it is NOT retriable.
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// If it's invalid header field name/value error thrown by http module, then it is NOT retriable.
|
|
||||||
// This could happen when metadata's key or value is invalid. (RoundTrip in transport.go)
|
|
||||||
if strings.Contains(genericErr.Error(), "invalid header field") {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// Assume the error is retriable.
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
var successStatusCodes = []int{http.StatusOK, http.StatusCreated, http.StatusAccepted, http.StatusNoContent, http.StatusPartialContent}
|
|
||||||
|
|
||||||
func isSuccessStatusCode(resp *http.Response) bool {
|
|
||||||
if resp == nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
for _, i := range successStatusCodes {
|
|
||||||
if i == resp.StatusCode {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// According to https://github.com/golang/go/wiki/CompilerOptimizations, the compiler will inline this method and hopefully optimize all calls to it away
|
|
||||||
var logf = func(format string, a ...interface{}) {}
|
|
||||||
|
|
||||||
// Use this version to see the retry method's code path (import "fmt")
|
|
||||||
//var logf = fmt.Printf
|
|
||||||
|
|
||||||
/*
|
|
||||||
type deadlineExceededReadCloser struct {
|
|
||||||
r io.ReadCloser
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *deadlineExceededReadCloser) Read(p []byte) (int, error) {
|
|
||||||
n, err := 0, io.EOF
|
|
||||||
if r.r != nil {
|
|
||||||
n, err = r.r.Read(p)
|
|
||||||
}
|
|
||||||
return n, improveDeadlineExceeded(err)
|
|
||||||
}
|
|
||||||
func (r *deadlineExceededReadCloser) Seek(offset int64, whence int) (int64, error) {
|
|
||||||
// For an HTTP request, the ReadCloser MUST also implement seek
|
|
||||||
// For an HTTP response, Seek MUST not be called (or this will panic)
|
|
||||||
o, err := r.r.(io.Seeker).Seek(offset, whence)
|
|
||||||
return o, improveDeadlineExceeded(err)
|
|
||||||
}
|
|
||||||
func (r *deadlineExceededReadCloser) Close() error {
|
|
||||||
if c, ok := r.r.(io.Closer); ok {
|
|
||||||
c.Close()
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// timeoutError is the internal struct that implements our richer timeout error.
|
|
||||||
type deadlineExceeded struct {
|
|
||||||
responseError
|
|
||||||
}
|
|
||||||
|
|
||||||
var _ net.Error = (*deadlineExceeded)(nil) // Ensure deadlineExceeded implements the net.Error interface at compile time
|
|
||||||
|
|
||||||
// improveDeadlineExceeded creates a timeoutError object that implements the error interface IF cause is a context.DeadlineExceeded error.
|
|
||||||
func improveDeadlineExceeded(cause error) error {
|
|
||||||
// If cause is not DeadlineExceeded, return the same error passed in.
|
|
||||||
if cause != context.DeadlineExceeded {
|
|
||||||
return cause
|
|
||||||
}
|
|
||||||
// Else, convert DeadlineExceeded to our timeoutError which gives a richer string message
|
|
||||||
return &deadlineExceeded{
|
|
||||||
responseError: responseError{
|
|
||||||
ErrorNode: pipeline.ErrorNode{}.Initialize(cause, 3),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Error implements the error interface's Error method to return a string representation of the error.
|
|
||||||
func (e *deadlineExceeded) Error() string {
|
|
||||||
return e.ErrorNode.Error("context deadline exceeded; when creating a pipeline, consider increasing RetryOptions' TryTimeout field")
|
|
||||||
}
|
|
||||||
*/
|
|
||||||
51
vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_policy_telemetry.go
generated
vendored
51
vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_policy_telemetry.go
generated
vendored
|
|
@ -1,51 +0,0 @@
|
||||||
package azblob
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"runtime"
|
|
||||||
|
|
||||||
"github.com/Azure/azure-pipeline-go/pipeline"
|
|
||||||
)
|
|
||||||
|
|
||||||
// TelemetryOptions configures the telemetry policy's behavior.
|
|
||||||
type TelemetryOptions struct {
|
|
||||||
// Value is a string prepended to each request's User-Agent and sent to the service.
|
|
||||||
// The service records the user-agent in logs for diagnostics and tracking of client requests.
|
|
||||||
Value string
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewTelemetryPolicyFactory creates a factory that can create telemetry policy objects
|
|
||||||
// which add telemetry information to outgoing HTTP requests.
|
|
||||||
func NewTelemetryPolicyFactory(o TelemetryOptions) pipeline.Factory {
|
|
||||||
b := &bytes.Buffer{}
|
|
||||||
b.WriteString(o.Value)
|
|
||||||
if b.Len() > 0 {
|
|
||||||
b.WriteRune(' ')
|
|
||||||
}
|
|
||||||
fmt.Fprintf(b, "Azure-Storage/%s %s", serviceLibVersion, platformInfo)
|
|
||||||
telemetryValue := b.String()
|
|
||||||
|
|
||||||
return pipeline.FactoryFunc(func(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.PolicyFunc {
|
|
||||||
return func(ctx context.Context, request pipeline.Request) (pipeline.Response, error) {
|
|
||||||
request.Header.Set("User-Agent", telemetryValue)
|
|
||||||
return next.Do(ctx, request)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// NOTE: the ONLY function that should write to this variable is this func
|
|
||||||
var platformInfo = func() string {
|
|
||||||
// Azure-Storage/version (runtime; os type and version)”
|
|
||||||
// Azure-Storage/1.4.0 (NODE-VERSION v4.5.0; Windows_NT 10.0.14393)'
|
|
||||||
operatingSystem := runtime.GOOS // Default OS string
|
|
||||||
switch operatingSystem {
|
|
||||||
case "windows":
|
|
||||||
operatingSystem = os.Getenv("OS") // Get more specific OS information
|
|
||||||
case "linux": // accept default OS info
|
|
||||||
case "freebsd": // accept default OS info
|
|
||||||
}
|
|
||||||
return fmt.Sprintf("(%s; %s)", runtime.Version(), operatingSystem)
|
|
||||||
}()
|
|
||||||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue