From ef5ae67e6f833256b60170deed2b1b53722357bc Mon Sep 17 00:00:00 2001 From: Marcell Mars Date: Sun, 18 Apr 2021 12:25:49 +0200 Subject: [PATCH] latest hugo, added hugo version to logs... --- go.mod | 40 +- go.sum | 324 +- main.go | 2 +- metahugo.go | 29 +- vendor/cloud.google.com/go/CHANGES.md | 188 + vendor/cloud.google.com/go/README.md | 6 +- vendor/cloud.google.com/go/doc.go | 14 + vendor/cloud.google.com/go/go.mod | 25 +- vendor/cloud.google.com/go/go.sum | 232 +- .../go/iam/credentials/apiv1/doc.go | 4 +- .../iam/credentials/apiv1/gapic_metadata.json | 38 + .../apiv1/iam_credentials_client.go | 2 +- .../go/internal/.repo-metadata-full.json | 244 +- vendor/cloud.google.com/go/storage/CHANGES.md | 28 + vendor/cloud.google.com/go/storage/README.md | 6 +- vendor/cloud.google.com/go/storage/bucket.go | 84 +- vendor/cloud.google.com/go/storage/doc.go | 15 +- vendor/cloud.google.com/go/storage/go.mod | 21 +- vendor/cloud.google.com/go/storage/go.sum | 233 +- .../go/storage/post_policy_v4.go | 16 +- vendor/cloud.google.com/go/storage/storage.go | 125 +- .../Azure/azure-pipeline-go/pipeline/error.go | 3 + .../azblob/atomicmorph.go | 69 - .../azblob/bytes_writer.go | 24 + .../azblob/chunkwriting.go | 219 + .../azblob/common_utils.go | 1 + .../azure-storage-blob-go/azblob/highlevel.go | 459 +- .../azblob/parsing_urls.go | 21 +- .../azblob/request_common.go | 33 + .../azblob/sas_service.go | 36 +- .../azblob/section_writer.go | 47 + .../azblob/service_codes_blob.go | 7 +- .../azblob/url_append_blob.go | 42 +- .../azure-storage-blob-go/azblob/url_blob.go | 171 +- .../azblob/url_block_blob.go | 129 +- .../azblob/url_container.go | 12 +- .../azblob/url_page_blob.go | 80 +- .../azblob/url_service.go | 31 +- .../azure-storage-blob-go/azblob/version.go | 2 +- .../azblob/zc_mmf_unix.go | 27 - .../azblob/zc_mmf_windows.go | 38 - .../azblob/zc_pipeline.go | 1 - .../azblob/zc_policy_request_log.go | 22 +- .../azblob/zc_policy_retry.go | 2 + .../azblob/zc_policy_unique_request_id.go | 19 +- .../azblob/zc_retry_reader.go | 12 +- .../azblob/zc_sas_account.go | 19 +- .../azblob/zc_sas_query_params.go | 50 +- .../azblob/zc_service_codes_common.go | 3 + .../azblob/zc_storage_error.go | 2 +- .../azblob/zz_generated_append_blob.go | 260 +- .../azblob/zz_generated_blob.go | 1052 +- .../azblob/zz_generated_block_blob.go | 243 +- .../azblob/zz_generated_client.go | 2 +- .../azblob/zz_generated_container.go | 80 +- .../azblob/zz_generated_models.go | 2405 +++- .../azblob/zz_generated_page_blob.go | 294 +- .../azblob/zz_generated_service.go | 161 +- .../azblob/zz_generated_version.go | 2 +- .../azblob/zz_response_helpers.go | 10 +- .../github.com/Azure/go-autorest/.gitignore | 32 + .../github.com/Azure/go-autorest/CHANGELOG.md | 1004 ++ .../github.com/Azure/go-autorest/GNUmakefile | 23 + .../github.com/Azure/go-autorest/Gopkg.lock | 324 + .../github.com/Azure/go-autorest/Gopkg.toml | 59 + vendor/github.com/Azure/go-autorest/LICENSE | 191 + vendor/github.com/Azure/go-autorest/README.md | 165 + .../Azure/go-autorest/autorest/LICENSE | 191 + .../Azure/go-autorest/autorest/adal/LICENSE | 191 + .../Azure/go-autorest/autorest/adal/README.md | 292 + .../Azure/go-autorest/autorest/adal/config.go | 151 + .../go-autorest/autorest/adal/devicetoken.go | 273 + .../Azure/go-autorest/autorest/adal/go.mod | 13 + .../Azure/go-autorest/autorest/adal/go.sum | 21 + .../autorest/adal/go_mod_tidy_hack.go | 24 + .../go-autorest/autorest/adal/persist.go | 135 + .../Azure/go-autorest/autorest/adal/sender.go | 96 + .../Azure/go-autorest/autorest/adal/token.go | 1336 +++ .../go-autorest/autorest/adal/token_1.13.go | 75 + .../go-autorest/autorest/adal/token_legacy.go | 74 + .../go-autorest/autorest/adal/version.go | 45 + .../go-autorest/autorest/authorization.go | 353 + .../go-autorest/autorest/authorization_sas.go | 66 + .../autorest/authorization_storage.go | 307 + .../Azure/go-autorest/autorest/autorest.go | 150 + .../Azure/go-autorest/autorest/azure/async.go | 991 ++ .../Azure/go-autorest/autorest/azure/azure.go | 388 + .../autorest/azure/environments.go | 269 + .../autorest/azure/metadata_environment.go | 245 + .../Azure/go-autorest/autorest/azure/rp.go | 204 + .../Azure/go-autorest/autorest/client.go | 328 + .../Azure/go-autorest/autorest/date/LICENSE | 191 + .../Azure/go-autorest/autorest/date/date.go | 96 + .../Azure/go-autorest/autorest/date/go.mod | 5 + .../Azure/go-autorest/autorest/date/go.sum | 2 + .../autorest/date/go_mod_tidy_hack.go | 24 + .../Azure/go-autorest/autorest/date/time.go | 103 + .../go-autorest/autorest/date/timerfc1123.go | 100 + .../go-autorest/autorest/date/unixtime.go | 123 + .../go-autorest/autorest/date/utility.go | 25 + .../Azure/go-autorest/autorest/error.go | 103 + .../Azure/go-autorest/autorest/go.mod | 12 + .../Azure/go-autorest/autorest/go.sum | 23 + .../go-autorest/autorest/go_mod_tidy_hack.go | 24 + .../Azure/go-autorest/autorest/preparer.go | 547 + .../Azure/go-autorest/autorest/responder.go | 269 + .../go-autorest/autorest/retriablerequest.go | 52 + .../autorest/retriablerequest_1.7.go | 54 + .../autorest/retriablerequest_1.8.go | 66 + .../Azure/go-autorest/autorest/sender.go | 447 + .../Azure/go-autorest/autorest/utility.go | 232 + .../go-autorest/autorest/utility_1.13.go | 29 + .../go-autorest/autorest/utility_legacy.go | 31 + .../Azure/go-autorest/autorest/version.go | 41 + .../Azure/go-autorest/azure-pipelines.yml | 105 + vendor/github.com/Azure/go-autorest/doc.go | 18 + .../Azure/go-autorest/logger/LICENSE | 191 + .../Azure/go-autorest/logger/go.mod | 5 + .../Azure/go-autorest/logger/go.sum | 2 + .../go-autorest/logger/go_mod_tidy_hack.go | 24 + .../Azure/go-autorest/logger/logger.go | 337 + .../Azure/go-autorest/tracing/LICENSE | 191 + .../Azure/go-autorest/tracing/go.mod | 5 + .../Azure/go-autorest/tracing/go.sum | 2 + .../go-autorest/tracing/go_mod_tidy_hack.go | 24 + .../Azure/go-autorest/tracing/tracing.go | 67 + .../aws/aws-sdk-go/aws/client/client.go | 4 - .../aws/aws-sdk-go/aws/client/logger.go | 8 + .../stscreds/assume_role_provider.go | 2 +- .../aws/aws-sdk-go/aws/endpoints/defaults.go | 474 +- .../aws/aws-sdk-go/aws/signer/v4/v4.go | 5 +- .../github.com/aws/aws-sdk-go/aws/version.go | 2 +- .../aws-sdk-go/internal/s3shared/arn/arn.go | 18 +- .../s3shared/arn/s3_object_lambda_arn.go | 15 + .../aws/aws-sdk-go/service/s3/api.go | 2121 ++-- .../aws-sdk-go/service/s3/customizations.go | 2 + .../aws/aws-sdk-go/service/s3/endpoint.go | 102 +- .../aws-sdk-go/service/s3/endpoint_builder.go | 98 +- .../aws/aws-sdk-go/service/s3/errors.go | 6 +- .../service/s3/s3iface/interface.go | 4 + .../aws-sdk-go/service/s3/s3manager/arn.go | 23 + .../service/s3/s3manager/download.go | 4 + .../aws-sdk-go/service/s3/s3manager/upload.go | 4 + .../service/s3/s3manager/upload_input.go | 30 +- .../aws/aws-sdk-go/service/sts/api.go | 404 +- .../evanw/esbuild/internal/ast/ast.go | 56 +- .../evanw/esbuild/internal/bundler/bundler.go | 1283 ++- .../evanw/esbuild/internal/bundler/debug.go | 132 + .../evanw/esbuild/internal/bundler/linker.go | 5055 +++++---- .../evanw/esbuild/internal/cache/cache_fs.go | 10 +- .../esbuild/internal/compat/css_table.go | 4 + .../evanw/esbuild/internal/compat/js_table.go | 136 +- .../evanw/esbuild/internal/config/config.go | 149 +- .../evanw/esbuild/internal/config/globals.go | 711 +- .../evanw/esbuild/internal/css_ast/css_ast.go | 7 +- .../esbuild/internal/css_parser/css_parser.go | 184 +- .../internal/css_printer/css_printer.go | 124 +- .../evanw/esbuild/internal/fs/fs.go | 50 +- .../evanw/esbuild/internal/fs/fs_mock.go | 32 +- .../evanw/esbuild/internal/fs/fs_real.go | 127 +- .../evanw/esbuild/internal/graph/graph.go | 339 + .../evanw/esbuild/internal/graph/input.go | 102 + .../evanw/esbuild/internal/graph/meta.go | 201 + .../evanw/esbuild/internal/helpers/bitset.go | 27 + .../evanw/esbuild/internal/helpers/joiner.go | 67 + .../evanw/esbuild/internal/helpers/mime.go | 31 + .../evanw/esbuild/internal/js_ast/js_ast.go | 285 +- .../esbuild/internal/js_lexer/js_lexer.go | 56 +- .../esbuild/internal/js_parser/js_parser.go | 1822 +-- .../internal/js_parser/js_parser_lower.go | 574 +- .../esbuild/internal/js_parser/ts_parser.go | 360 +- .../esbuild/internal/js_printer/js_printer.go | 400 +- .../evanw/esbuild/internal/logger/logger.go | 472 +- .../esbuild/internal/logger/logger_darwin.go | 1 + .../esbuild/internal/logger/logger_linux.go | 1 + .../esbuild/internal/logger/logger_windows.go | 45 +- .../evanw/esbuild/internal/renamer/renamer.go | 73 +- .../esbuild/internal/resolver/dataurl.go | 76 + .../esbuild/internal/resolver/package_json.go | 1000 ++ .../esbuild/internal/resolver/resolver.go | 1298 ++- .../internal/resolver/tsconfig_json.go | 12 +- .../evanw/esbuild/internal/runtime/runtime.go | 139 +- .../esbuild/internal/sourcemap/sourcemap.go | 181 + .../evanw/esbuild/internal/xxhash/LICENSE.txt | 22 + .../evanw/esbuild/internal/xxhash/README.md | 1 + .../evanw/esbuild/internal/xxhash/xxhash.go | 235 + .../esbuild/internal/xxhash/xxhash_other.go | 74 + .../github.com/evanw/esbuild/pkg/api/api.go | 102 +- .../evanw/esbuild/pkg/api/api_impl.go | 961 +- .../evanw/esbuild/pkg/api/serve_other.go | 561 + .../evanw/esbuild/pkg/api/serve_wasm.go | 11 + .../form3tech-oss/jwt-go/.gitignore | 5 + .../form3tech-oss/jwt-go/.travis.yml | 12 + .../github.com/form3tech-oss/jwt-go/LICENSE | 8 + .../form3tech-oss/jwt-go/MIGRATION_GUIDE.md | 97 + .../github.com/form3tech-oss/jwt-go/README.md | 104 + .../form3tech-oss/jwt-go/VERSION_HISTORY.md | 118 + .../github.com/form3tech-oss/jwt-go/claims.go | 136 + vendor/github.com/form3tech-oss/jwt-go/doc.go | 4 + .../github.com/form3tech-oss/jwt-go/ecdsa.go | 148 + .../form3tech-oss/jwt-go/ecdsa_utils.go | 69 + .../github.com/form3tech-oss/jwt-go/errors.go | 59 + .../github.com/form3tech-oss/jwt-go/hmac.go | 95 + .../form3tech-oss/jwt-go/map_claims.go | 102 + .../github.com/form3tech-oss/jwt-go/none.go | 52 + .../github.com/form3tech-oss/jwt-go/parser.go | 148 + vendor/github.com/form3tech-oss/jwt-go/rsa.go | 101 + .../form3tech-oss/jwt-go/rsa_pss.go | 142 + .../form3tech-oss/jwt-go/rsa_utils.go | 101 + .../form3tech-oss/jwt-go/signing_method.go | 35 + .../github.com/form3tech-oss/jwt-go/token.go | 108 + .../getkin/kin-openapi/jsoninfo/field_info.go | 14 +- .../getkin/kin-openapi/jsoninfo/marshal.go | 6 +- .../getkin/kin-openapi/jsoninfo/unmarshal.go | 12 +- .../jsoninfo/unsupported_properties_error.go | 11 +- .../getkin/kin-openapi/openapi3/encoding.go | 2 +- .../getkin/kin-openapi/openapi3/info.go | 6 +- .../getkin/kin-openapi/openapi3/link.go | 2 +- .../getkin/kin-openapi/openapi3/operation.go | 2 +- .../getkin/kin-openapi/openapi3/parameter.go | 2 +- .../getkin/kin-openapi/openapi3/path_item.go | 4 +- .../getkin/kin-openapi/openapi3/schema.go | 63 +- .../kin-openapi/openapi3/schema_formats.go | 2 +- .../kin-openapi/openapi3/security_scheme.go | 30 +- .../getkin/kin-openapi/openapi3/server.go | 58 +- .../getkin/kin-openapi/openapi3/swagger.go | 6 +- .../kin-openapi/openapi3/swagger_loader.go | 408 +- .../getkin/kin-openapi/openapi3/tag.go | 11 + .../github.com/go-openapi/swag/.golangci.yml | 19 + vendor/github.com/go-openapi/swag/.travis.yml | 30 +- vendor/github.com/go-openapi/swag/README.md | 1 - vendor/github.com/go-openapi/swag/convert.go | 16 +- .../go-openapi/swag/convert_types.go | 195 +- vendor/github.com/go-openapi/swag/go.mod | 14 +- vendor/github.com/go-openapi/swag/go.sum | 29 +- vendor/github.com/go-openapi/swag/json.go | 8 +- vendor/github.com/go-openapi/swag/loading.go | 42 +- vendor/github.com/go-openapi/swag/util.go | 6 +- .../gohugoio/hugo/commands/genautocomplete.go | 8 +- .../gohugoio/hugo/commands/new_theme.go | 2 +- .../hugo/common/hugo/version_current.go | 2 +- .../github.com/gohugoio/hugo/deploy/deploy.go | 2 +- .../gohugoio/hugo/hugolib/config.go | 27 +- .../github.com/gohugoio/hugo/hugolib/page.go | 10 +- .../gohugoio/hugo/hugolib/page__output.go | 33 +- .../gohugoio/hugo/hugolib/page__per_output.go | 2 +- .../github.com/gohugoio/hugo/hugolib/site.go | 2 +- .../asciidocext/asciidocext_config/config.go | 12 - .../hugo/markup/asciidocext/convert.go | 6 +- .../hugo/markup/converter/converter.go | 2 +- .../hugo/markup/converter/hooks/hooks.go | 61 +- .../gohugoio/hugo/markup/goldmark/convert.go | 24 +- .../extensions/attributes/attributes.go | 12 +- .../hugo/markup/goldmark/render_hooks.go | 86 +- .../gohugoio/hugo/media/mediaType.go | 196 +- .../gohugoio/hugo/output/docshelper.go | 2 +- .../github.com/gohugoio/hugo/output/layout.go | 2 +- .../gohugoio/hugo/output/outputFormat.go | 37 +- .../hugo/parser/metadecoders/format.go | 2 +- .../gohugoio/hugo/releaser/releaser.go | 2 +- .../hugo/resources/images/exif/exif.go | 15 +- .../gohugoio/hugo/resources/images/image.go | 2 +- .../hugo/resources/page/page_paths.go | 11 +- .../gohugoio/hugo/resources/resource_spec.go | 4 +- .../resource_transformers/js/options.go | 2 - .../gohugoio/hugo/tpl/strings/init.go | 16 + .../tpl/tplimpl/embedded/templates.autogen.go | 18 +- .../gohugoio/hugo/tpl/tplimpl/shortcodes.go | 2 +- .../golang/protobuf/proto/registry.go | 10 +- .../github.com/golang/protobuf/ptypes/any.go | 14 + .../github.com/golang/protobuf/ptypes/doc.go | 4 + .../golang/protobuf/ptypes/duration.go | 4 + .../golang/protobuf/ptypes/timestamp.go | 9 + .../google/go-cmp/cmp/report_compare.go | 4 +- .../google/go-cmp/cmp/report_slices.go | 25 +- vendor/github.com/google/uuid/hash.go | 4 +- vendor/github.com/google/uuid/sql.go | 2 +- vendor/github.com/google/uuid/uuid.go | 10 +- vendor/github.com/google/uuid/version4.go | 8 + vendor/github.com/google/wire/README.md | 2 +- vendor/github.com/google/wire/go.sum | 3 + .../github.com/googleapis/gax-go/.gitignore | 1 - .../github.com/googleapis/gax-go/.travis.yml | 12 - .../googleapis/gax-go/CODE_OF_CONDUCT.md | 43 - .../googleapis/gax-go/CONTRIBUTING.md | 27 - vendor/github.com/googleapis/gax-go/LICENSE | 27 - vendor/github.com/googleapis/gax-go/README.md | 29 - .../googleapis/gax-go/call_option.go | 157 - vendor/github.com/googleapis/gax-go/gax.go | 38 - vendor/github.com/googleapis/gax-go/header.go | 24 - vendor/github.com/googleapis/gax-go/invoke.go | 89 - vendor/github.com/josharian/intern/README.md | 5 + vendor/github.com/josharian/intern/go.mod | 3 + vendor/github.com/josharian/intern/intern.go | 44 + vendor/github.com/josharian/intern/license.md | 21 + vendor/github.com/kyokomi/emoji/v2/README.md | 8 +- .../magiconair/properties/.travis.yml | 5 + .../magiconair/properties/CHANGELOG.md | 23 +- .../properties/{LICENSE => LICENSE.md} | 9 +- .../magiconair/properties/README.md | 1 - .../github.com/magiconair/properties/go.mod | 2 + .../github.com/magiconair/properties/load.go | 5 +- .../magiconair/properties/properties.go | 31 +- .../github.com/mailru/easyjson/buffer/pool.go | 72 +- .../mailru/easyjson/jlexer/lexer.go | 230 +- .../mailru/easyjson/jwriter/writer.go | 41 +- vendor/github.com/mattn/go-runewidth/go.mod | 2 + vendor/github.com/mattn/go-runewidth/go.sum | 2 + .../mattn/go-runewidth/runewidth.go | 122 +- .../mattn/go-runewidth/runewidth_table.go | 6 +- .../mitchellh/hashstructure/README.md | 8 +- .../github.com/mitchellh/hashstructure/go.mod | 2 + .../mitchellh/hashstructure/hashstructure.go | 82 +- .../mitchellh/hashstructure/include.go | 7 + .../mitchellh/mapstructure/.travis.yml | 9 - .../mitchellh/mapstructure/CHANGELOG.md | 12 + .../mitchellh/mapstructure/decode_hooks.go | 71 +- .../mitchellh/mapstructure/mapstructure.go | 125 +- .../niklasfasching/go-org/org/block.go | 15 +- .../niklasfasching/go-org/org/html_writer.go | 16 +- .../pelletier/go-toml/CONTRIBUTING.md | 4 +- vendor/github.com/pelletier/go-toml/LICENSE | 2 +- vendor/github.com/pelletier/go-toml/README.md | 10 +- .../pelletier/go-toml/azure-pipelines.yml | 74 +- vendor/github.com/pelletier/go-toml/fuzzit.sh | 26 - vendor/github.com/pelletier/go-toml/go.mod | 2 - vendor/github.com/pelletier/go-toml/go.sum | 19 - vendor/github.com/pelletier/go-toml/lexer.go | 306 +- .../github.com/pelletier/go-toml/marshal.go | 43 +- vendor/github.com/pelletier/go-toml/parser.go | 93 +- vendor/github.com/pelletier/go-toml/token.go | 6 +- vendor/github.com/pelletier/go-toml/toml.go | 4 + .../github.com/pelletier/go-toml/tomlpub.go | 71 + .../pelletier/go-toml/tomltree_write.go | 36 +- .../pelletier/go-toml/tomltree_writepub.go | 6 + .../LICENSE => rivo/uniseg/LICENSE.txt} | 2 +- vendor/github.com/rivo/uniseg/README.md | 62 + vendor/github.com/rivo/uniseg/doc.go | 8 + vendor/github.com/rivo/uniseg/go.mod | 3 + vendor/github.com/rivo/uniseg/grapheme.go | 268 + vendor/github.com/rivo/uniseg/properties.go | 1658 +++ .../github.com/russross/blackfriday/README.md | 81 +- vendor/github.com/russross/blackfriday/go.mod | 2 + .../russross/blackfriday/v2/README.md | 90 +- .../russross/blackfriday/v2/block.go | 30 +- .../github.com/russross/blackfriday/v2/doc.go | 28 + .../russross/blackfriday/v2/entities.go | 2236 ++++ .../github.com/russross/blackfriday/v2/esc.go | 42 +- .../russross/blackfriday/v2/html.go | 9 +- .../russross/blackfriday/v2/inline.go | 2 +- .../russross/blackfriday/v2/node.go | 12 +- .../sanitized_anchor_name/.travis.yml | 16 - .../shurcooL/sanitized_anchor_name/README.md | 36 - .../shurcooL/sanitized_anchor_name/go.mod | 1 - .../shurcooL/sanitized_anchor_name/main.go | 29 - vendor/github.com/spf13/afero/.travis.yml | 1 + vendor/github.com/spf13/afero/README.md | 2 +- vendor/github.com/spf13/afero/iofs.go | 288 + vendor/github.com/spf13/afero/mem/file.go | 2 +- vendor/github.com/spf13/cobra/.golangci.yml | 48 + vendor/github.com/spf13/cobra/.travis.yml | 9 +- vendor/github.com/spf13/cobra/CHANGELOG.md | 35 +- vendor/github.com/spf13/cobra/CONDUCT.md | 37 + vendor/github.com/spf13/cobra/Makefile | 18 +- vendor/github.com/spf13/cobra/README.md | 32 +- .../spf13/cobra/bash_completions.go | 133 +- .../spf13/cobra/bash_completions.md | 2 +- vendor/github.com/spf13/cobra/cobra.go | 15 + vendor/github.com/spf13/cobra/command.go | 116 +- .../spf13/cobra/custom_completions.go | 4 +- vendor/github.com/spf13/cobra/doc/man_docs.go | 28 +- .../spf13/cobra/fish_completions.go | 6 +- vendor/github.com/spf13/cobra/go.mod | 2 +- vendor/github.com/spf13/cobra/go.sum | 4 +- .../spf13/cobra/powershell_completions.go | 319 +- .../spf13/cobra/powershell_completions.md | 15 +- .../spf13/cobra/projects_using_cobra.md | 3 + .../spf13/cobra/shell_completions.md | 119 +- .../github.com/spf13/cobra/zsh_completions.go | 4 +- .../github.com/tdewolff/minify/v2/.travis.yml | 2 +- .../github.com/tdewolff/minify/v2/Dockerfile | 4 +- .../github.com/tdewolff/minify/v2/README.md | 5 + .../github.com/tdewolff/minify/v2/common.go | 12 + .../github.com/tdewolff/minify/v2/css/css.go | 122 +- .../github.com/tdewolff/minify/v2/css/hash.go | 430 +- vendor/github.com/tdewolff/minify/v2/go.mod | 2 +- vendor/github.com/tdewolff/minify/v2/go.sum | 4 +- .../tdewolff/minify/v2/html/buffer.go | 9 +- .../tdewolff/minify/v2/html/html.go | 25 +- vendor/github.com/tdewolff/minify/v2/js/js.go | 28 +- .../github.com/tdewolff/minify/v2/js/util.go | 5 - .../tdewolff/minify/v2/svg/buffer.go | 8 +- .../tdewolff/minify/v2/svg/pathdata.go | 6 +- .../github.com/tdewolff/minify/v2/svg/svg.go | 11 +- .../tdewolff/minify/v2/xml/buffer.go | 4 +- .../github.com/tdewolff/parse/v2/css/lex.go | 64 +- vendor/github.com/tdewolff/parse/v2/js/ast.go | 24 +- vendor/github.com/tdewolff/parse/v2/js/lex.go | 45 +- .../github.com/tdewolff/parse/v2/js/parse.go | 70 +- .../tdewolff/parse/v2/js/tokentype.go | 93 +- vendor/github.com/yuin/goldmark/README.md | 2 +- .../goldmark/extension/definition_list.go | 2 +- .../yuin/goldmark/extension/footnote.go | 2 +- .../yuin/goldmark/extension/table.go | 41 +- .../yuin/goldmark/parser/code_block.go | 6 +- .../yuin/goldmark/parser/fcode_block.go | 3 +- .../github.com/yuin/goldmark/parser/list.go | 2 + .../yuin/goldmark/parser/list_item.go | 2 + .../yuin/goldmark/renderer/html/html.go | 1 + vendor/go.opencensus.io/.travis.yml | 17 - vendor/go.opencensus.io/go.mod | 15 +- vendor/go.opencensus.io/go.sum | 82 +- vendor/go.opencensus.io/trace/basetypes.go | 10 + vendor/go.opencensus.io/trace/spanstore.go | 14 +- vendor/go.opencensus.io/trace/trace.go | 144 +- vendor/go.opencensus.io/trace/trace_api.go | 265 + vendor/gocloud.dev/AUTHORS | 1 + vendor/gocloud.dev/CONTRIBUTORS | 1 + .../gocloud.dev/blob/azureblob/azureblob.go | 248 +- vendor/gocloud.dev/blob/blob.go | 129 +- vendor/gocloud.dev/blob/driver/driver.go | 11 + vendor/gocloud.dev/blob/fileblob/fileblob.go | 123 +- vendor/gocloud.dev/blob/gcsblob/gcsblob.go | 28 +- vendor/gocloud.dev/blob/s3blob/s3blob.go | 83 +- vendor/gocloud.dev/internal/retry/retry.go | 2 +- .../x/crypto/chacha20/chacha_arm64.go | 3 +- .../x/crypto/chacha20/chacha_arm64.s | 2 +- .../x/crypto/chacha20/chacha_noasm.go | 3 +- .../x/crypto/chacha20/chacha_ppc64le.go | 3 +- .../x/crypto/chacha20/chacha_ppc64le.s | 2 +- .../x/crypto/chacha20/chacha_s390x.go | 3 +- .../x/crypto/chacha20/chacha_s390x.s | 2 +- .../x/crypto/curve25519/curve25519_amd64.go | 3 +- .../x/crypto/curve25519/curve25519_amd64.s | 2 +- .../x/crypto/curve25519/curve25519_noasm.go | 3 +- vendor/golang.org/x/crypto/ed25519/ed25519.go | 1 + .../x/crypto/ed25519/ed25519_go113.go | 1 + .../x/crypto/internal/subtle/aliasing.go | 3 +- ...iasing_appengine.go => aliasing_purego.go} | 3 +- .../golang.org/x/crypto/pkcs12/bmp-string.go | 50 + vendor/golang.org/x/crypto/pkcs12/crypto.go | 131 + vendor/golang.org/x/crypto/pkcs12/errors.go | 23 + .../x/crypto/pkcs12/internal/rc2/rc2.go | 271 + vendor/golang.org/x/crypto/pkcs12/mac.go | 45 + vendor/golang.org/x/crypto/pkcs12/pbkdf.go | 170 + vendor/golang.org/x/crypto/pkcs12/pkcs12.go | 360 + vendor/golang.org/x/crypto/pkcs12/safebags.go | 57 + .../x/crypto/poly1305/bits_compat.go | 1 + .../x/crypto/poly1305/bits_go1.13.go | 1 + .../golang.org/x/crypto/poly1305/mac_noasm.go | 3 +- .../golang.org/x/crypto/poly1305/sum_amd64.go | 3 +- .../golang.org/x/crypto/poly1305/sum_amd64.s | 2 +- .../x/crypto/poly1305/sum_ppc64le.go | 3 +- .../x/crypto/poly1305/sum_ppc64le.s | 2 +- .../golang.org/x/crypto/poly1305/sum_s390x.go | 3 +- .../golang.org/x/crypto/poly1305/sum_s390x.s | 2 +- vendor/golang.org/x/crypto/ssh/client_auth.go | 2 +- vendor/golang.org/x/crypto/ssh/kex.go | 13 +- vendor/golang.org/x/crypto/ssh/server.go | 4 + vendor/golang.org/x/image/ccitt/reader.go | 138 +- vendor/golang.org/x/image/ccitt/table.go | 41 +- vendor/golang.org/x/image/tiff/fuzz.go | 1 + vendor/golang.org/x/image/webp/decode.go | 17 +- vendor/golang.org/x/net/context/go17.go | 1 + vendor/golang.org/x/net/context/go19.go | 1 + vendor/golang.org/x/net/context/pre_go17.go | 1 + vendor/golang.org/x/net/context/pre_go19.go | 1 + vendor/golang.org/x/net/http2/Dockerfile | 2 +- vendor/golang.org/x/net/http2/go111.go | 1 + vendor/golang.org/x/net/http2/not_go111.go | 1 + vendor/golang.org/x/net/http2/server.go | 30 +- vendor/golang.org/x/net/idna/idna10.0.0.go | 1 + vendor/golang.org/x/net/idna/idna9.0.0.go | 1 + vendor/golang.org/x/net/idna/tables10.0.0.go | 1 + vendor/golang.org/x/net/idna/tables11.0.0.go | 1 + vendor/golang.org/x/net/idna/tables12.0.0.go | 1 + vendor/golang.org/x/net/idna/tables13.0.0.go | 1 + vendor/golang.org/x/net/idna/tables9.0.0.go | 1 + .../x/oauth2/google/appengine_gen1.go | 1 + .../x/oauth2/google/appengine_gen2_flex.go | 1 + vendor/golang.org/x/oauth2/google/default.go | 14 +- vendor/golang.org/x/oauth2/google/doc.go | 43 +- vendor/golang.org/x/oauth2/google/google.go | 25 + .../google/internal/externalaccount/aws.go | 466 + .../externalaccount/basecredentials.go | 163 + .../internal/externalaccount/clientauth.go | 41 + .../google/internal/externalaccount/err.go | 18 + .../externalaccount/filecredsource.go | 57 + .../internal/externalaccount/impersonate.go | 83 + .../internal/externalaccount/sts_exchange.go | 104 + .../internal/externalaccount/urlcredsource.go | 74 + .../x/oauth2/internal/client_appengine.go | 1 + vendor/golang.org/x/sys/unix/mkerrors.sh | 8 +- vendor/golang.org/x/sys/unix/syscall_linux.go | 39 +- .../x/sys/unix/zerrors_darwin_amd64.go | 25 + .../x/sys/unix/zerrors_darwin_arm64.go | 25 + .../x/sys/unix/zerrors_freebsd_arm.go | 9 + vendor/golang.org/x/sys/unix/zerrors_linux.go | 4 + .../x/sys/unix/zerrors_solaris_amd64.go | 3 + .../x/sys/unix/zerrors_zos_s390x.go | 1 + vendor/golang.org/x/sys/unix/ztypes_linux.go | 15 + .../x/sys/windows/security_windows.go | 16 +- .../x/text/internal/language/language.go | 90 +- .../x/text/internal/language/parse.go | 37 +- vendor/golang.org/x/text/language/go1_1.go | 1 + vendor/golang.org/x/text/language/go1_2.go | 1 + vendor/golang.org/x/text/language/language.go | 4 + vendor/golang.org/x/text/language/tables.go | 8 +- .../x/text/secure/bidirule/bidirule10.0.0.go | 1 + .../x/text/secure/bidirule/bidirule9.0.0.go | 1 + vendor/golang.org/x/text/unicode/bidi/bidi.go | 221 +- vendor/golang.org/x/text/unicode/bidi/core.go | 63 +- .../x/text/unicode/bidi/tables10.0.0.go | 1 + .../x/text/unicode/bidi/tables11.0.0.go | 1 + .../x/text/unicode/bidi/tables12.0.0.go | 1 + .../x/text/unicode/bidi/tables13.0.0.go | 1 + .../x/text/unicode/bidi/tables9.0.0.go | 1 + .../x/text/unicode/norm/tables10.0.0.go | 1 + .../x/text/unicode/norm/tables11.0.0.go | 1 + .../x/text/unicode/norm/tables12.0.0.go | 1 + .../x/text/unicode/norm/tables13.0.0.go | 1 + .../x/text/unicode/norm/tables9.0.0.go | 1 + .../golang.org/x/text/width/tables10.0.0.go | 1 + .../golang.org/x/text/width/tables11.0.0.go | 1 + .../golang.org/x/text/width/tables12.0.0.go | 1 + .../golang.org/x/text/width/tables13.0.0.go | 1 + vendor/golang.org/x/text/width/tables9.0.0.go | 1 + .../api/compute/v1/compute-api.json | 2542 ++++- .../api/compute/v1/compute-gen.go | 9776 +++++++++++++++-- .../api/storage/v1/storage-gen.go | 307 +- .../api/annotations/field_behavior.pb.go | 4 +- .../iam/credentials/v1/common.pb.go | 23 +- .../iam/credentials/v1/iamcredentials.pb.go | 9 +- .../genproto/googleapis/type/expr/expr.pb.go | 4 +- vendor/google.golang.org/grpc/Makefile | 30 +- vendor/google.golang.org/grpc/SECURITY.md | 3 + .../grpc/balancer/balancer.go | 10 + .../grpc/balancer/base/balancer.go | 53 +- .../grpc_lb_v1/load_balancer_grpc.pb.go | 10 +- .../balancer/grpclb/grpclb_remote_balancer.go | 20 +- .../grpc/balancer/grpclb/grpclb_util.go | 10 +- .../grpc/balancer_conn_wrappers.go | 8 + vendor/google.golang.org/grpc/clientconn.go | 6 +- .../grpc/credentials/alts/alts.go | 5 +- .../proto/grpc_gcp/handshaker_grpc.pb.go | 10 +- .../grpc/credentials/alts/utils.go | 94 - .../grpc/encoding/proto/proto.go | 14 +- vendor/google.golang.org/grpc/go.mod | 4 +- vendor/google.golang.org/grpc/go.sum | 14 +- .../grpc/internal/googlecloud/googlecloud.go | 128 + .../grpc/internal/grpcutil/target.go | 33 +- .../grpc/internal/internal.go | 6 +- .../grpc/internal/metadata/metadata.go | 50 + .../grpc/internal/resolver/config_selector.go | 77 +- .../grpc/internal/resolver/unix/unix.go | 28 +- .../grpc/internal/syscall/syscall_linux.go | 20 +- .../grpc/internal/syscall/syscall_nonlinux.go | 2 +- .../grpc/internal/transport/http2_client.go | 49 +- .../grpc/internal/transport/http2_server.go | 15 + .../grpc/internal/transport/http_util.go | 32 + .../grpc/internal/transport/transport.go | 3 + vendor/google.golang.org/grpc/pickfirst.go | 2 +- vendor/google.golang.org/grpc/regenerate.sh | 34 +- vendor/google.golang.org/grpc/rpc_util.go | 3 +- vendor/google.golang.org/grpc/server.go | 7 + .../google.golang.org/grpc/status/status.go | 8 +- vendor/google.golang.org/grpc/stream.go | 29 +- vendor/google.golang.org/grpc/version.go | 2 +- vendor/google.golang.org/grpc/vet.sh | 21 +- .../cmd/protoc-gen-go/internal_gengo/main.go | 35 +- .../internal_gengo/well_known_types.go | 7 +- .../protobuf/compiler/protogen/protogen.go | 347 +- .../protobuf/encoding/prototext/decode.go | 30 +- .../protobuf/encoding/prototext/encode.go | 84 +- .../protobuf/internal/descfmt/stringer.go | 2 + .../protobuf/internal/detrand/rand.go | 8 + .../encoding/messageset/messageset.go | 35 +- .../protobuf/internal/encoding/tag/tag.go | 2 +- .../protobuf/internal/encoding/text/encode.go | 8 +- .../protobuf/internal/fieldsort/fieldsort.go | 40 - .../protobuf/internal/filedesc/build.go | 3 + .../protobuf/internal/filedesc/desc.go | 77 +- .../protobuf/internal/filedesc/desc_lazy.go | 4 +- .../protobuf/internal/filedesc/desc_list.go | 172 +- .../internal/filedesc/desc_list_gen.go | 11 + .../protobuf/internal/impl/api_export.go | 2 +- .../protobuf/internal/impl/codec_field.go | 18 +- .../protobuf/internal/impl/codec_gen.go | 974 +- .../protobuf/internal/impl/codec_map.go | 19 +- .../protobuf/internal/impl/codec_message.go | 68 +- .../internal/impl/codec_messageset.go | 21 +- .../protobuf/internal/impl/codec_reflect.go | 8 +- .../protobuf/internal/impl/convert.go | 29 + .../protobuf/internal/impl/decode.go | 16 +- .../protobuf/internal/impl/encode.go | 10 +- .../protobuf/internal/impl/legacy_export.go | 2 +- .../internal/impl/legacy_extension.go | 3 +- .../protobuf/internal/impl/legacy_message.go | 122 +- .../protobuf/internal/impl/merge.go | 6 +- .../protobuf/internal/impl/message.go | 69 +- .../protobuf/internal/impl/message_reflect.go | 125 +- .../internal/impl/message_reflect_field.go | 85 +- .../protobuf/internal/impl/pointer_reflect.go | 1 + .../protobuf/internal/impl/pointer_unsafe.go | 1 + .../protobuf/internal/mapsort/mapsort.go | 43 - .../protobuf/internal/order/order.go | 89 + .../protobuf/internal/order/range.go | 115 + .../protobuf/internal/version/version.go | 2 +- .../protobuf/proto/decode.go | 18 +- .../protobuf/proto/decode_gen.go | 128 +- .../protobuf/proto/encode.go | 55 +- .../google.golang.org/protobuf/proto/equal.go | 25 +- .../protobuf/proto/messageset.go | 7 +- .../google.golang.org/protobuf/proto/proto.go | 9 + .../protobuf/reflect/protodesc/desc.go | 1 + .../protobuf/reflect/protodesc/desc_init.go | 4 +- .../reflect/protodesc/desc_validate.go | 3 + .../protobuf/reflect/protodesc/proto.go | 14 +- .../protobuf/reflect/protoreflect/source.go | 84 +- .../reflect/protoreflect/source_gen.go | 461 + .../protobuf/reflect/protoreflect/type.go | 34 + .../reflect/protoregistry/registry.go | 157 +- .../types/descriptorpb/descriptor.pb.go | 19 +- .../protobuf/types/known/anypb/any.pb.go | 22 +- .../types/known/durationpb/duration.pb.go | 20 +- .../types/known/timestamppb/timestamp.pb.go | 29 +- .../protobuf/types/pluginpb/plugin.pb.go | 63 +- vendor/gopkg.in/ini.v1/.travis.yml | 20 - vendor/gopkg.in/ini.v1/Makefile | 2 +- vendor/gopkg.in/ini.v1/README.md | 8 +- vendor/gopkg.in/ini.v1/codecov.yml | 9 + vendor/gopkg.in/ini.v1/data_source.go | 2 + vendor/gopkg.in/ini.v1/file.go | 173 +- vendor/gopkg.in/ini.v1/ini.go | 24 +- vendor/gopkg.in/ini.v1/key.go | 120 +- vendor/gopkg.in/ini.v1/parser.go | 25 +- vendor/gopkg.in/ini.v1/section.go | 14 +- vendor/gopkg.in/ini.v1/struct.go | 234 +- vendor/modules.txt | 164 +- 639 files changed, 61738 insertions(+), 13311 deletions(-) create mode 100644 vendor/cloud.google.com/go/iam/credentials/apiv1/gapic_metadata.json delete mode 100644 vendor/github.com/Azure/azure-storage-blob-go/azblob/atomicmorph.go create mode 100644 vendor/github.com/Azure/azure-storage-blob-go/azblob/bytes_writer.go create mode 100644 vendor/github.com/Azure/azure-storage-blob-go/azblob/chunkwriting.go create mode 100644 vendor/github.com/Azure/azure-storage-blob-go/azblob/common_utils.go create mode 100644 vendor/github.com/Azure/azure-storage-blob-go/azblob/request_common.go create mode 100644 vendor/github.com/Azure/azure-storage-blob-go/azblob/section_writer.go delete mode 100644 vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_mmf_unix.go delete mode 100644 vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_mmf_windows.go create mode 100644 vendor/github.com/Azure/go-autorest/.gitignore create mode 100644 vendor/github.com/Azure/go-autorest/CHANGELOG.md create mode 100644 vendor/github.com/Azure/go-autorest/GNUmakefile create mode 100644 vendor/github.com/Azure/go-autorest/Gopkg.lock create mode 100644 vendor/github.com/Azure/go-autorest/Gopkg.toml create mode 100644 vendor/github.com/Azure/go-autorest/LICENSE create mode 100644 vendor/github.com/Azure/go-autorest/README.md create mode 100644 vendor/github.com/Azure/go-autorest/autorest/LICENSE create mode 100644 vendor/github.com/Azure/go-autorest/autorest/adal/LICENSE create mode 100644 vendor/github.com/Azure/go-autorest/autorest/adal/README.md create mode 100644 vendor/github.com/Azure/go-autorest/autorest/adal/config.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/adal/go.mod create mode 100644 vendor/github.com/Azure/go-autorest/autorest/adal/go.sum create mode 100644 vendor/github.com/Azure/go-autorest/autorest/adal/go_mod_tidy_hack.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/adal/persist.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/adal/sender.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/adal/token.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/adal/token_1.13.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/adal/token_legacy.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/adal/version.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/authorization.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/authorization_sas.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/authorization_storage.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/autorest.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/azure/async.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/azure/azure.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/azure/environments.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/azure/metadata_environment.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/azure/rp.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/client.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/date/LICENSE create mode 100644 vendor/github.com/Azure/go-autorest/autorest/date/date.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/date/go.mod create mode 100644 vendor/github.com/Azure/go-autorest/autorest/date/go.sum create mode 100644 vendor/github.com/Azure/go-autorest/autorest/date/go_mod_tidy_hack.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/date/time.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/date/timerfc1123.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/date/unixtime.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/date/utility.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/error.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/go.mod create mode 100644 vendor/github.com/Azure/go-autorest/autorest/go.sum create mode 100644 vendor/github.com/Azure/go-autorest/autorest/go_mod_tidy_hack.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/preparer.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/responder.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/retriablerequest.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.7.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.8.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/sender.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/utility.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/utility_1.13.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/utility_legacy.go create mode 100644 vendor/github.com/Azure/go-autorest/autorest/version.go create mode 100644 vendor/github.com/Azure/go-autorest/azure-pipelines.yml create mode 100644 vendor/github.com/Azure/go-autorest/doc.go create mode 100644 vendor/github.com/Azure/go-autorest/logger/LICENSE create mode 100644 vendor/github.com/Azure/go-autorest/logger/go.mod create mode 100644 vendor/github.com/Azure/go-autorest/logger/go.sum create mode 100644 vendor/github.com/Azure/go-autorest/logger/go_mod_tidy_hack.go create mode 100644 vendor/github.com/Azure/go-autorest/logger/logger.go create mode 100644 vendor/github.com/Azure/go-autorest/tracing/LICENSE create mode 100644 vendor/github.com/Azure/go-autorest/tracing/go.mod create mode 100644 vendor/github.com/Azure/go-autorest/tracing/go.sum create mode 100644 vendor/github.com/Azure/go-autorest/tracing/go_mod_tidy_hack.go create mode 100644 vendor/github.com/Azure/go-autorest/tracing/tracing.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/s3shared/arn/s3_object_lambda_arn.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/arn.go create mode 100644 vendor/github.com/evanw/esbuild/internal/bundler/debug.go create mode 100644 vendor/github.com/evanw/esbuild/internal/graph/graph.go create mode 100644 vendor/github.com/evanw/esbuild/internal/graph/input.go create mode 100644 vendor/github.com/evanw/esbuild/internal/graph/meta.go create mode 100644 vendor/github.com/evanw/esbuild/internal/helpers/bitset.go create mode 100644 vendor/github.com/evanw/esbuild/internal/helpers/joiner.go create mode 100644 vendor/github.com/evanw/esbuild/internal/helpers/mime.go create mode 100644 vendor/github.com/evanw/esbuild/internal/resolver/dataurl.go create mode 100644 vendor/github.com/evanw/esbuild/internal/resolver/package_json.go create mode 100644 vendor/github.com/evanw/esbuild/internal/xxhash/LICENSE.txt create mode 100644 vendor/github.com/evanw/esbuild/internal/xxhash/README.md create mode 100644 vendor/github.com/evanw/esbuild/internal/xxhash/xxhash.go create mode 100644 vendor/github.com/evanw/esbuild/internal/xxhash/xxhash_other.go create mode 100644 vendor/github.com/evanw/esbuild/pkg/api/serve_other.go create mode 100644 vendor/github.com/evanw/esbuild/pkg/api/serve_wasm.go create mode 100644 vendor/github.com/form3tech-oss/jwt-go/.gitignore create mode 100644 vendor/github.com/form3tech-oss/jwt-go/.travis.yml create mode 100644 vendor/github.com/form3tech-oss/jwt-go/LICENSE create mode 100644 vendor/github.com/form3tech-oss/jwt-go/MIGRATION_GUIDE.md create mode 100644 vendor/github.com/form3tech-oss/jwt-go/README.md create mode 100644 vendor/github.com/form3tech-oss/jwt-go/VERSION_HISTORY.md create mode 100644 vendor/github.com/form3tech-oss/jwt-go/claims.go create mode 100644 vendor/github.com/form3tech-oss/jwt-go/doc.go create mode 100644 vendor/github.com/form3tech-oss/jwt-go/ecdsa.go create mode 100644 vendor/github.com/form3tech-oss/jwt-go/ecdsa_utils.go create mode 100644 vendor/github.com/form3tech-oss/jwt-go/errors.go create mode 100644 vendor/github.com/form3tech-oss/jwt-go/hmac.go create mode 100644 vendor/github.com/form3tech-oss/jwt-go/map_claims.go create mode 100644 vendor/github.com/form3tech-oss/jwt-go/none.go create mode 100644 vendor/github.com/form3tech-oss/jwt-go/parser.go create mode 100644 vendor/github.com/form3tech-oss/jwt-go/rsa.go create mode 100644 vendor/github.com/form3tech-oss/jwt-go/rsa_pss.go create mode 100644 vendor/github.com/form3tech-oss/jwt-go/rsa_utils.go create mode 100644 vendor/github.com/form3tech-oss/jwt-go/signing_method.go create mode 100644 vendor/github.com/form3tech-oss/jwt-go/token.go delete mode 100644 vendor/github.com/googleapis/gax-go/.gitignore delete mode 100644 vendor/github.com/googleapis/gax-go/.travis.yml delete mode 100644 vendor/github.com/googleapis/gax-go/CODE_OF_CONDUCT.md delete mode 100644 vendor/github.com/googleapis/gax-go/CONTRIBUTING.md delete mode 100644 vendor/github.com/googleapis/gax-go/LICENSE delete mode 100644 vendor/github.com/googleapis/gax-go/README.md delete mode 100644 vendor/github.com/googleapis/gax-go/call_option.go delete mode 100644 vendor/github.com/googleapis/gax-go/gax.go delete mode 100644 vendor/github.com/googleapis/gax-go/header.go delete mode 100644 vendor/github.com/googleapis/gax-go/invoke.go create mode 100644 vendor/github.com/josharian/intern/README.md create mode 100644 vendor/github.com/josharian/intern/go.mod create mode 100644 vendor/github.com/josharian/intern/intern.go create mode 100644 vendor/github.com/josharian/intern/license.md rename vendor/github.com/magiconair/properties/{LICENSE => LICENSE.md} (84%) create mode 100644 vendor/github.com/mattn/go-runewidth/go.sum delete mode 100644 vendor/github.com/mitchellh/mapstructure/.travis.yml delete mode 100644 vendor/github.com/pelletier/go-toml/fuzzit.sh delete mode 100644 vendor/github.com/pelletier/go-toml/go.sum create mode 100644 vendor/github.com/pelletier/go-toml/tomlpub.go create mode 100644 vendor/github.com/pelletier/go-toml/tomltree_writepub.go rename vendor/github.com/{shurcooL/sanitized_anchor_name/LICENSE => rivo/uniseg/LICENSE.txt} (96%) create mode 100644 vendor/github.com/rivo/uniseg/README.md create mode 100644 vendor/github.com/rivo/uniseg/doc.go create mode 100644 vendor/github.com/rivo/uniseg/go.mod create mode 100644 vendor/github.com/rivo/uniseg/grapheme.go create mode 100644 vendor/github.com/rivo/uniseg/properties.go create mode 100644 vendor/github.com/russross/blackfriday/v2/entities.go delete mode 100644 vendor/github.com/shurcooL/sanitized_anchor_name/.travis.yml delete mode 100644 vendor/github.com/shurcooL/sanitized_anchor_name/README.md delete mode 100644 vendor/github.com/shurcooL/sanitized_anchor_name/go.mod delete mode 100644 vendor/github.com/shurcooL/sanitized_anchor_name/main.go create mode 100644 vendor/github.com/spf13/afero/iofs.go create mode 100644 vendor/github.com/spf13/cobra/.golangci.yml create mode 100644 vendor/github.com/spf13/cobra/CONDUCT.md delete mode 100644 vendor/go.opencensus.io/.travis.yml create mode 100644 vendor/go.opencensus.io/trace/trace_api.go rename vendor/golang.org/x/crypto/internal/subtle/{aliasing_appengine.go => aliasing_purego.go} (97%) create mode 100644 vendor/golang.org/x/crypto/pkcs12/bmp-string.go create mode 100644 vendor/golang.org/x/crypto/pkcs12/crypto.go create mode 100644 vendor/golang.org/x/crypto/pkcs12/errors.go create mode 100644 vendor/golang.org/x/crypto/pkcs12/internal/rc2/rc2.go create mode 100644 vendor/golang.org/x/crypto/pkcs12/mac.go create mode 100644 vendor/golang.org/x/crypto/pkcs12/pbkdf.go create mode 100644 vendor/golang.org/x/crypto/pkcs12/pkcs12.go create mode 100644 vendor/golang.org/x/crypto/pkcs12/safebags.go create mode 100644 vendor/golang.org/x/oauth2/google/internal/externalaccount/aws.go create mode 100644 vendor/golang.org/x/oauth2/google/internal/externalaccount/basecredentials.go create mode 100644 vendor/golang.org/x/oauth2/google/internal/externalaccount/clientauth.go create mode 100644 vendor/golang.org/x/oauth2/google/internal/externalaccount/err.go create mode 100644 vendor/golang.org/x/oauth2/google/internal/externalaccount/filecredsource.go create mode 100644 vendor/golang.org/x/oauth2/google/internal/externalaccount/impersonate.go create mode 100644 vendor/golang.org/x/oauth2/google/internal/externalaccount/sts_exchange.go create mode 100644 vendor/golang.org/x/oauth2/google/internal/externalaccount/urlcredsource.go create mode 100644 vendor/google.golang.org/grpc/SECURITY.md create mode 100644 vendor/google.golang.org/grpc/internal/googlecloud/googlecloud.go create mode 100644 vendor/google.golang.org/grpc/internal/metadata/metadata.go delete mode 100644 vendor/google.golang.org/protobuf/internal/fieldsort/fieldsort.go delete mode 100644 vendor/google.golang.org/protobuf/internal/mapsort/mapsort.go create mode 100644 vendor/google.golang.org/protobuf/internal/order/order.go create mode 100644 vendor/google.golang.org/protobuf/internal/order/range.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go delete mode 100644 vendor/gopkg.in/ini.v1/.travis.yml create mode 100644 vendor/gopkg.in/ini.v1/codecov.yml diff --git a/go.mod b/go.mod index fae4c4f..25c7036 100644 --- a/go.mod +++ b/go.mod @@ -3,11 +3,47 @@ module git.sandpoints.org/Drawwell/SandpointsGitHook go 1.16 require ( + cloud.google.com/go/storage v1.14.0 // indirect + github.com/Azure/go-autorest/autorest v0.11.18 // indirect github.com/PumpkinSeed/cage v0.1.0 + github.com/aws/aws-sdk-go v1.38.21 // indirect + github.com/evanw/esbuild v0.11.12 // indirect + github.com/getkin/kin-openapi v0.55.0 // indirect github.com/go-git/go-git/v5 v5.2.0 - github.com/gohugoio/hugo v0.81.0 + github.com/go-openapi/swag v0.19.15 // indirect + github.com/gohugoio/hugo v0.82.0 + github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect + github.com/google/uuid v1.2.0 // indirect + github.com/google/wire v0.5.0 // indirect + github.com/jdkato/prose v1.2.1 // indirect + github.com/magiconair/properties v1.8.5 // indirect + github.com/mailru/easyjson v0.7.7 // indirect + github.com/mattn/go-runewidth v0.0.12 // indirect + github.com/mitchellh/hashstructure v1.1.0 // indirect + github.com/mitchellh/mapstructure v1.4.1 // indirect + github.com/nicksnyder/go-i18n/v2 v2.1.2 // indirect + github.com/niklasfasching/go-org v1.5.0 // indirect + github.com/pelletier/go-toml v1.9.0 // indirect + github.com/rivo/uniseg v0.2.0 // indirect + github.com/rogpeppe/go-internal v1.8.0 // indirect + github.com/russross/blackfriday v1.6.0 // indirect + github.com/russross/blackfriday/v2 v2.1.0 // indirect + github.com/spf13/afero v1.6.0 // indirect + github.com/spf13/cobra v1.1.3 // indirect github.com/spf13/viper v1.7.1 + github.com/tdewolff/minify/v2 v2.9.16 // indirect + github.com/tdewolff/parse/v2 v2.5.15 // indirect + github.com/yuin/goldmark v1.3.5 // indirect + gocloud.dev v0.22.0 // indirect + golang.org/x/crypto v0.0.0-20210415154028-4f45737414dc // indirect + golang.org/x/image v0.0.0-20210220032944-ac19c3e999fb // indirect golang.org/x/mod v0.4.2 // indirect - golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4 // indirect + golang.org/x/net v0.0.0-20210415231046-e915ea6b2b7d // indirect + golang.org/x/oauth2 v0.0.0-20210413134643-5e61552d6c78 // indirect + golang.org/x/sys v0.0.0-20210415045647-66c3f260301c // indirect golang.org/x/tools v0.1.1-0.20210319172145-bda8f5cee399 // indirect + google.golang.org/api v0.44.0 // indirect + google.golang.org/genproto v0.0.0-20210416161957-9910b6c460de // indirect + google.golang.org/grpc v1.37.0 // indirect + gopkg.in/ini.v1 v1.62.0 // indirect ) diff --git a/go.sum b/go.sum index cd446df..7ecb81f 100644 --- a/go.sum +++ b/go.sum @@ -18,9 +18,14 @@ cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZ cloud.google.com/go v0.58.0/go.mod h1:W+9FnSUw6nhVwXlFcp1eL+krq5+HQUJeUogSeJZZiWg= cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go v0.66.0/go.mod h1:dgqGAjKCDxyhGTtC9dAREQGUJpkceNm1yt590Qno0Ko= cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= -cloud.google.com/go v0.74.0 h1:kpgPA77kSSbjSs+fWHkPTxQ6J5Z2Qkruo5jfXEkHxNQ= cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= +cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= +cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= +cloud.google.com/go v0.81.0 h1:at8Tk2zUz63cLPR0JPWm5vp77pEZmzxEQBEfRKn1VV8= +cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= @@ -31,56 +36,95 @@ cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7 cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= cloud.google.com/go/firestore v1.2.0/go.mod h1:iISCjWnTpnoJT1R287xRdjvQHJrxQOpeah4phb5D3h0= +cloud.google.com/go/firestore v1.4.0/go.mod h1:NjjGEnxCS3CAKYp+vmALu20QzcqasGodQp48WxJGAYc= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/pubsub v1.9.0/go.mod h1:G3o6/kJvEMIEAN5urdkaP4be49WQsjNiykBIto9LFtY= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.9.0/go.mod h1:m+/etGaqZbylxaNT876QGXqEHp4PR2Rq5GMqICWb9bU= -cloud.google.com/go/storage v1.10.0 h1:STgFzyU5/8miMl0//zKh2aQeTyeaUH3WN9bSUiJ09bA= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +cloud.google.com/go/storage v1.12.0/go.mod h1:fFLk2dp2oAhDz8QFKwqrjdJvxSp/W2g7nillojlL5Ho= +cloud.google.com/go/storage v1.14.0 h1:6RRlFMv1omScs6iq2hfE3IvgE+l6RfJPampq8UZc5TU= +cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= contrib.go.opencensus.io/exporter/aws v0.0.0-20181029163544-2befc13012d0/go.mod h1:uu1P0UCM/6RbsMrgPa98ll8ZcHM858i/AD06a9aLRCA= +contrib.go.opencensus.io/exporter/aws v0.0.0-20200617204711-c478e41e60e9/go.mod h1:uu1P0UCM/6RbsMrgPa98ll8ZcHM858i/AD06a9aLRCA= contrib.go.opencensus.io/exporter/stackdriver v0.12.1/go.mod h1:iwB6wGarfphGGe/e5CWqyUk/cLzKnWsOKPVW3no6OTw= +contrib.go.opencensus.io/exporter/stackdriver v0.13.4/go.mod h1:aXENhDJ1Y4lIg4EUaVTwzvYETVNZk10Pu26tevFKLUc= contrib.go.opencensus.io/integrations/ocsql v0.1.4/go.mod h1:8DsSdjz3F+APR+0z0WkU1aRorQCFfRxvqjUUPMbF3fE= +contrib.go.opencensus.io/integrations/ocsql v0.1.7/go.mod h1:8DsSdjz3F+APR+0z0WkU1aRorQCFfRxvqjUUPMbF3fE= contrib.go.opencensus.io/resource v0.1.1/go.mod h1:F361eGI91LCmW1I/Saf+rX0+OFcigGlFvXwEGEnkRLA= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/Azure/azure-amqp-common-go/v3 v3.0.0/go.mod h1:SY08giD/XbhTz07tJdpw1SoxQXHPN30+DI3Z04SYqyg= +github.com/Azure/azure-amqp-common-go/v3 v3.0.1/go.mod h1:PBIGdzcO1teYoufTKMcGibdKaYZv4avS+O6LNIp8bq0= +github.com/Azure/azure-amqp-common-go/v3 v3.1.0/go.mod h1:PBIGdzcO1teYoufTKMcGibdKaYZv4avS+O6LNIp8bq0= github.com/Azure/azure-pipeline-go v0.2.1/go.mod h1:UGSo8XybXnIGZ3epmeBw7Jdz+HiUVpqIlpz/HKHylF4= -github.com/Azure/azure-pipeline-go v0.2.2 h1:6oiIS9yaG6XCCzhgAgKFfIWyo4LLCiDhZot6ltoThhY= github.com/Azure/azure-pipeline-go v0.2.2/go.mod h1:4rQ/NZncSvGqNkkOsNpOU1tgoNuIlp9AfUH5G1tvCHc= +github.com/Azure/azure-pipeline-go v0.2.3 h1:7U9HBg1JFK3jHl5qmo4CTZKFTVgMwdFHMVtCdfBE21U= +github.com/Azure/azure-pipeline-go v0.2.3/go.mod h1:x841ezTBIMG6O3lAcl8ATHnsOPVl2bqk7S3ta6S6u4k= github.com/Azure/azure-sdk-for-go v37.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go v49.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-service-bus-go v0.10.1/go.mod h1:E/FOceuKAFUfpbIJDKWz/May6guE+eGibfGT6q+n1to= -github.com/Azure/azure-storage-blob-go v0.9.0 h1:kORqvzXP8ORhKbW13FflGUaSE5CMyDWun9UwMxY8gPs= +github.com/Azure/azure-service-bus-go v0.10.7/go.mod h1:o5z/3lDG1iT/T/G7vgIwIqVDTx9Qa2wndf5OdzSzpF8= github.com/Azure/azure-storage-blob-go v0.9.0/go.mod h1:8UBPbiOhrMQ4pLPi3gA1tXnpjrS76UYE/fo5A40vf4g= +github.com/Azure/azure-storage-blob-go v0.13.0 h1:lgWHvFh+UYBNVQLFHXkvul2f6yOPA9PIH82RTG2cSwc= +github.com/Azure/azure-storage-blob-go v0.13.0/go.mod h1:pA9kNqtjUeQF2zOSu4s//nUdBD+e64lEuc4sVnuOfNs= github.com/Azure/go-amqp v0.12.6/go.mod h1:qApuH6OFTSKZFmCOxccvAv5rLizBQf4v8pRmG138DPo= github.com/Azure/go-amqp v0.12.7/go.mod h1:qApuH6OFTSKZFmCOxccvAv5rLizBQf4v8pRmG138DPo= +github.com/Azure/go-amqp v0.13.0/go.mod h1:qj+o8xPCz9tMSbQ83Vp8boHahuRDl5mkNHyt1xlxUTs= +github.com/Azure/go-amqp v0.13.1/go.mod h1:qj+o8xPCz9tMSbQ83Vp8boHahuRDl5mkNHyt1xlxUTs= +github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= +github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= -github.com/Azure/go-autorest/autorest v0.9.3 h1:OZEIaBbMdUE/Js+BQKlpO81XlISgipr6yDJ+PSwsgi4= github.com/Azure/go-autorest/autorest v0.9.3/go.mod h1:GsRuLYvwzLjjjRoWEIyMUaYq8GNUx2nRB378IPt/1p0= +github.com/Azure/go-autorest/autorest v0.11.3/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= +github.com/Azure/go-autorest/autorest v0.11.7/go.mod h1:V6p3pKZx1KKkJubbxnDWrzNhEIfOy/pTGasLqzHIPHs= +github.com/Azure/go-autorest/autorest v0.11.9/go.mod h1:eipySxLmqSyC5s5k1CLupqet0PSENBEDP93LQ9a8QYw= +github.com/Azure/go-autorest/autorest v0.11.12/go.mod h1:eipySxLmqSyC5s5k1CLupqet0PSENBEDP93LQ9a8QYw= +github.com/Azure/go-autorest/autorest v0.11.18 h1:90Y4srNYrwOtAgVo3ndrQkTYn6kf1Eg/AjTFJ8Is2aM= +github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA= github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= github.com/Azure/go-autorest/autorest/adal v0.8.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= github.com/Azure/go-autorest/autorest/adal v0.8.1/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= -github.com/Azure/go-autorest/autorest/adal v0.8.3 h1:O1AGG9Xig71FxdX9HO5pGNyZ7TbSyHaVg+5eJO/jSGw= github.com/Azure/go-autorest/autorest/adal v0.8.3/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= -github.com/Azure/go-autorest/autorest/azure/auth v0.4.2 h1:iM6UAvjR97ZIeR93qTcwpKNMpV+/FTWjwEbuPD495Tk= +github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg= +github.com/Azure/go-autorest/autorest/adal v0.9.2/go.mod h1:/3SMAM86bP6wC9Ev35peQDUeqFZBMH07vvUOmg4z/fE= +github.com/Azure/go-autorest/autorest/adal v0.9.4/go.mod h1:/3SMAM86bP6wC9Ev35peQDUeqFZBMH07vvUOmg4z/fE= +github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= +github.com/Azure/go-autorest/autorest/adal v0.9.6/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= +github.com/Azure/go-autorest/autorest/adal v0.9.13 h1:Mp5hbtOePIzM8pJVRa3YLrWWmZtoxRXqUEzCfJt3+/Q= +github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= github.com/Azure/go-autorest/autorest/azure/auth v0.4.2/go.mod h1:90gmfKdlmKgfjUpnCEpOJzsUEjrWDSLwHIG73tSXddM= -github.com/Azure/go-autorest/autorest/azure/cli v0.3.1 h1:LXl088ZQlP0SBppGFsRZonW6hSvwgL5gRByMbvUbx8U= +github.com/Azure/go-autorest/autorest/azure/auth v0.5.3 h1:lZifaPRAk1bqg5vGqreL6F8uLC5V0fDpY8nFvc3boFc= +github.com/Azure/go-autorest/autorest/azure/auth v0.5.3/go.mod h1:4bJZhUhcq8LB20TruwHbAQsmUs2Xh+QR7utuJpLXX3A= github.com/Azure/go-autorest/autorest/azure/cli v0.3.1/go.mod h1:ZG5p860J94/0kI9mNJVoIoLgXcirM2gF5i2kWloofxw= +github.com/Azure/go-autorest/autorest/azure/cli v0.4.2 h1:dMOmEJfkLKW/7JsokJqkyoYSgmR08hi9KrhjZb+JALY= +github.com/Azure/go-autorest/autorest/azure/cli v0.4.2/go.mod h1:7qkJkT+j6b+hIpzMOwPChJhTqS8VbsqqgULzMNRugoM= github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= -github.com/Azure/go-autorest/autorest/date v0.2.0 h1:yW+Zlqf26583pE43KhfnhFcdmSWlm5Ew6bxipnr/tbM= github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= +github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= +github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= +github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= +github.com/Azure/go-autorest/autorest/mocks v0.4.1 h1:K0laFcLE6VLTOwNgSxaGbUcLPuGXlNkbVvq4cW4nIHk= +github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= github.com/Azure/go-autorest/autorest/to v0.3.0/go.mod h1:MgwOyqaIuKdG4TL/2ywSsIWKAfJfgHDo8ObuUk3t5sA= +github.com/Azure/go-autorest/autorest/to v0.4.0/go.mod h1:fE8iZBn7LQR7zH/9XU2NcPR4o9jEImooCeWJcYV/zLE= github.com/Azure/go-autorest/autorest/validation v0.2.0/go.mod h1:3EEqHnBxQGHXRYq3HT1WyXAvT7LLY3tl70hw6tQIbjI= -github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY= +github.com/Azure/go-autorest/autorest/validation v0.3.0/go.mod h1:yhLgjC0Wda5DYXl6JAsWyUe4KVNffhoDhG0zVzUMo3E= github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= -github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k= +github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= +github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+ZtXWSmf4Tg= +github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= +github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= +github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= github.com/BurntSushi/locker v0.0.0-20171006230638-a6e239ea1c69 h1:+tu3HOoMXB7RXEINRVIpxJCT+KdYiI7LAEAUrOw3dIU= github.com/BurntSushi/locker v0.0.0-20171006230638-a6e239ea1c69/go.mod h1:L1AbZdiDllfyYH5l5OkAaZtk7VkWe89bPJFmnDBNHxg= github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= @@ -89,6 +133,7 @@ github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym github.com/GeertJohan/go.incremental v1.0.0/go.mod h1:6fAjUhbVuX1KcMD3c8TEgVUqmo4seqhv0i0kdATSkM0= github.com/GeertJohan/go.rice v1.0.0/go.mod h1:eH6gbSOAUv07dQuZVnBmoDP8mgsM1rtixis4Tib9if0= github.com/GoogleCloudPlatform/cloudsql-proxy v0.0.0-20191009163259-e802c2cb94ae/go.mod h1:mjwGPas4yKduTyubHvD1Atl9r1rUq8DfVy+gkVvZ+oo= +github.com/GoogleCloudPlatform/cloudsql-proxy v1.19.1/go.mod h1:+yYmuKqcBVkgRePGpUhTA9OEg0XsnFE96eZ6nJ2yCQM= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= @@ -126,9 +171,12 @@ github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPd github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/aws/aws-sdk-go v1.15.27/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0= github.com/aws/aws-sdk-go v1.19.18/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.23.20/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.31.13/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= -github.com/aws/aws-sdk-go v1.37.11 h1:W1gUQxt6jmiUsk2jkTVAlYsd3Sg8bNL2VDcWjrXmD+0= +github.com/aws/aws-sdk-go v1.36.1/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/aws/aws-sdk-go v1.37.11/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= +github.com/aws/aws-sdk-go v1.38.21 h1:D08DXWI4QRaawLaW+OtsIEClOI90I6eheJs1GwXTQVI= +github.com/aws/aws-sdk-go v1.38.21/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/bep/debounce v1.2.0 h1:wXds8Kq8qRfwAOpAxHrJDbCXgC5aHSzgQb/0gKsHQqo= @@ -145,6 +193,7 @@ github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kB github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= github.com/census-instrumentation/opencensus-proto v0.2.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cheekybits/is v0.0.0-20150225183255-68e9c0620927/go.mod h1:h/aW8ynjgkuj+NQRlZcDbAbM1ORAbXjXX77sX7T289U= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= @@ -155,6 +204,7 @@ github.com/cli/safeexec v1.0.0/go.mod h1:Z/D4tTN8Vs5gXYHDCbaM1S/anmEDnJb1iW0+EJ5 github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= @@ -171,12 +221,13 @@ github.com/davecgh/go-spew v0.0.0-20161028175848-04cdfd42973b/go.mod h1:J7Y8YcW2 github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/denisenkom/go-mssqldb v0.9.0/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU= github.com/devigned/tab v0.1.1/go.mod h1:XG9mPq0dFghrYvoBF3xdRrJzSTX1b7IQrvaL9mzjeJY= -github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= -github.com/dimchansky/utfbom v1.1.0 h1:FcM3g+nofKgUteL8dm/UpdRXNC9KmADgTpLKsu0TRo4= github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= +github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi/U= +github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE= github.com/disintegration/gift v1.2.1 h1:Y005a1X4Z7Uc+0gLpSAsKhWi4qLtsdEcMIbbdvdZ6pc= github.com/disintegration/gift v1.2.1/go.mod h1:Jh2i7f7Q2BM7Ezno3PhfezbR1xpUg9dUg3/RlKGr4HI= github.com/dlclark/regexp2 v1.1.6/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc= @@ -191,11 +242,16 @@ github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymF github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/evanw/esbuild v0.8.46 h1:RlryMOkj9pbbRog5IbHoVc5i6go4RIxa9BCcFDBozvA= -github.com/evanw/esbuild v0.8.46/go.mod h1:y2AFBAGVelPqPodpdtxWWqe6n2jYf5FrsJbligmRmuw= +github.com/evanw/esbuild v0.9.6/go.mod h1:y2AFBAGVelPqPodpdtxWWqe6n2jYf5FrsJbligmRmuw= +github.com/evanw/esbuild v0.11.12 h1:qbgiZa7efve0zCGhcKoy84JbIKy/Rp85hG4h1PYvFgo= +github.com/evanw/esbuild v0.11.12/go.mod h1:y2AFBAGVelPqPodpdtxWWqe6n2jYf5FrsJbligmRmuw= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= +github.com/form3tech-oss/jwt-go v3.2.2+incompatible h1:TcekIExNqud5crz4xD2pavyTgWiPvpYe4Xau31I0PRk= +github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= github.com/frankban/quicktest v1.4.1/go.mod h1:36zfPVQyHxymz4cH7wlDmVwDrJuljRB60qkgn7rorfQ= @@ -206,10 +262,13 @@ github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/getkin/kin-openapi v0.39.0 h1:Dy0xS0Vct2bmAIXsGq/ioagVQStca9QUBD3/4XpAORA= github.com/getkin/kin-openapi v0.39.0/go.mod h1:ZJSfy1PxJv2QQvH9EdBj3nupRTVvV42mkW6zKUlRBwk= +github.com/getkin/kin-openapi v0.55.0 h1:QD3oyPC7NJ+FgePucVN4FplV0llQbXuxyXeNrk9YUqc= +github.com/getkin/kin-openapi v0.55.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4= github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= +github.com/gin-gonic/gin v1.6.3/go.mod h1:75u5sXoLsGZoRN5Sgbi1eraJ4GU3++wFwWzhwvtwp4M= github.com/gliderlabs/ssh v0.2.2 h1:6zsha5zo/TWhRhwqCD3+EarCAgZ2yN28ipRnGPnwkI0= github.com/gliderlabs/ssh v0.2.2/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= github.com/go-git/gcfg v1.5.0 h1:Q5ViNfGF8zFgyJWPqYwA7qGFoMTEiBmdlkcfRmpIMa4= @@ -229,26 +288,36 @@ github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9 github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY= github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/swag v0.19.5 h1:lTz6Ys4CmqqCQmZPBlbQENR1/GucA2bzYTE12Pw4tFY= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.15 h1:D2NRCBzS9/pEY3gP9Nl8aDqGUcPFrwG2p+CNFrLyrCM= +github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= +github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= +github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= +github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA= +github.com/go-playground/validator/v10 v10.2.0/go.mod h1:uOYAAleCW8F/7oMFd6aG0GOhaH6EGOAJShg8Id5JGkI= github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/gobuffalo/flect v0.2.2 h1:PAVD7sp0KOdfswjAw9BpLCU9hXo7wFSzgpQ+zNeks/A= github.com/gobuffalo/flect v0.2.2/go.mod h1:vmkQwuZYhN5Pc4ljYQZzP+1sq+NEkK+lh20jmEmX3jc= github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= +github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee/go.mod h1:L0fX3K22YWvt/FAX9NnzrNzcI4wNYi9Yku4O0LKYflo= +github.com/gobwas/pool v0.2.0/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw= +github.com/gobwas/ws v1.0.2/go.mod h1:szmBTxLgaFppYjEmNtny/v3w89xOydFnnZMcgRRu/EM= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= -github.com/gohugoio/hugo v0.81.0 h1:PX8TYe1nrWOOMBWtQ/YvKs6QRrOjC5/RrZGE4tBb6EE= -github.com/gohugoio/hugo v0.81.0/go.mod h1:YKSyYjGD3DBL2EChKEz0VXeK35qrHTNRzOJYOl4/zS0= +github.com/gohugoio/hugo v0.82.0 h1:uBHlvDRcSR+HFDc6xuHqmtQy6MnXks3swohh6ywU2os= +github.com/gohugoio/hugo v0.82.0/go.mod h1:SvswTKhsnEgzBe5pQ7G7bCbhEucfWHHbhZ7Jx1Yiy8E= github.com/gohugoio/testmodBuilder/mods v0.0.0-20190520184928-c56af20f2e95 h1:sgew0XCnZwnzpWxTt3V8LLiCO7OQi3C6dycaE67wfkU= github.com/gohugoio/testmodBuilder/mods v0.0.0-20190520184928-c56af20f2e95/go.mod h1:bOlVlCa1/RajcHpXkrUXPSHB/Re1UnlXxD1Qp8SKOd8= +github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= @@ -256,6 +325,7 @@ github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -269,8 +339,11 @@ github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:W github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= @@ -281,12 +354,17 @@ github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.4 h1:L8R9j+yAqZuZjsqh/z+F1NCffTKKLShY6zXTItVIZ8M= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-replayers/grpcreplay v0.1.0 h1:eNb1y9rZFmY4ax45uEEECSa8fsxGRU+8Bil52ASAwic= +github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-replayers/grpcreplay v0.1.0/go.mod h1:8Ig2Idjpr6gifRd6pNVggX6TC1Zw6Jx74AKp7QNH2QE= -github.com/google/go-replayers/httpreplay v0.1.0 h1:AX7FUb4BjrrzNvblr/OlgwrmFiep6soj5K2QSDW7BGk= +github.com/google/go-replayers/grpcreplay v1.0.0 h1:B5kVOzJ1hBgnevTgIWhSTatQ3608yu/2NnU0Ta1d0kY= +github.com/google/go-replayers/grpcreplay v1.0.0/go.mod h1:8Ig2Idjpr6gifRd6pNVggX6TC1Zw6Jx74AKp7QNH2QE= github.com/google/go-replayers/httpreplay v0.1.0/go.mod h1:YKZViNhiGgqdBlUbI2MwGpq4pXxNmhJLPHQ7cv2b5no= +github.com/google/go-replayers/httpreplay v0.1.2 h1:HCfx+dQzwN9XbGTHF8qJ+67WN8glL9FTWV5rraCJ/jU= +github.com/google/go-replayers/httpreplay v0.1.2/go.mod h1:YKZViNhiGgqdBlUbI2MwGpq4pXxNmhJLPHQ7cv2b5no= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian v2.1.1-0.20190517191504-25dcb96d9e51+incompatible h1:xmapqc1AyLoB+ddYT6r04bD9lIjlOqGaREovi0SzFaE= github.com/google/martian v2.1.1-0.20190517191504-25dcb96d9e51+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= @@ -301,15 +379,21 @@ github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200507031123-427632fa3b1c/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200905233945-acf8798be1f7/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/subcommands v1.0.1/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/wire v0.4.0 h1:kXcsA/rIGzJImVqPdhfnr6q0xsS9gU0515q1EPpJ9fE= +github.com/google/uuid v1.2.0 h1:qJYtXnJRWmpe7m/3XlyhrsLrEURqHRM2kxzoxXqyUDs= +github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/wire v0.4.0/go.mod h1:ngWDr9Qvq3yZA10YrxfyGELY/AFWGVpy9c1LTRi1EoU= +github.com/google/wire v0.5.0 h1:I7ELFeVBr3yfPIcc8+MWvrjk+3VjbcSzoXm3JVa+jD8= +github.com/google/wire v0.5.0/go.mod h1:ngWDr9Qvq3yZA10YrxfyGELY/AFWGVpy9c1LTRi1EoU= github.com/googleapis/gax-go v2.0.2+incompatible h1:silFMLAnr330+NRuag/VjIGF7TLp/LBrV2CJKFLWEww= github.com/googleapis/gax-go v2.0.2+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= @@ -320,7 +404,9 @@ github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORR github.com/gorilla/csrf v1.6.0/go.mod h1:7tSf8kmjNYr7IWDCYhd3U8Ck34iQ/Yw5CJu7bAkHEGI= github.com/gorilla/handlers v1.4.1/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= +github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= @@ -355,8 +441,9 @@ github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NH github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= -github.com/jdkato/prose v1.2.0 h1:t/R3H6xOrVuIgNevWiOSJf1kEoeF2VWlrN6w76Tkzow= github.com/jdkato/prose v1.2.0/go.mod h1:WC4YKHtBdAMgBdmfdqBmEuVbBD0U5c9HQ6l1U8Cq0ts= +github.com/jdkato/prose v1.2.1 h1:Fp3UnJmLVISmlc57BgKUzdjr0lOtjqTZicL3PaYy6cU= +github.com/jdkato/prose v1.2.1/go.mod h1:AiRHgVagnEx2JbQRQowVBKjG0bcs/vtkGCH1dYAL1rA= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= @@ -367,7 +454,10 @@ github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGw github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1 h1:6QPYqodiu3GuPL+7mfx+NwDdp2eTkp9IfEUpgAwUN0o= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= @@ -378,6 +468,8 @@ github.com/kevinburke/ssh_config v0.0.0-20190725054713-01f96b0aa0cd h1:Coekwdh0v github.com/kevinburke/ssh_config v0.0.0-20190725054713-01f96b0aa0cd/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.10.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= @@ -390,15 +482,20 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/kyokomi/emoji/v2 v2.2.7 h1:v12BHO3OBfwokUfaQghK4RSDFpG4WfxeKJPSdcc73Nk= -github.com/kyokomi/emoji/v2 v2.2.7/go.mod h1:JUcn42DTdsXJo1SWanHh4HKDEyPaR5CqkmoirZZP9qE= +github.com/kyokomi/emoji/v2 v2.2.8 h1:jcofPxjHWEkJtkIbcLHvZhxKgCPl6C7MyjTrD4KDqUE= +github.com/kyokomi/emoji/v2 v2.2.8/go.mod h1:JUcn42DTdsXJo1SWanHh4HKDEyPaR5CqkmoirZZP9qE= +github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= github.com/lib/pq v1.1.1/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.9.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/magefile/mage v1.10.0/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A= -github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/magiconair/properties v1.8.5 h1:b6kJs+EmPFMYGkow9GiUyCyOvIwYetYJ3fSaWak/Gls= +github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e h1:hB2xlXdHp/pmPZq0y3QnmWAArdw9PqbmotexnWx/FU8= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/matryer/try v0.0.0-20161228173917-9ac251b645a2/go.mod h1:0KeJpeMD6o+O4hW7qJOT7vyQPKrWmj26uf5wMc/IiIs= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= @@ -410,8 +507,9 @@ github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNx github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= -github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0= github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/mattn/go-runewidth v0.0.12 h1:Y41i/hVW3Pgwr8gV+J23B9YEY0zxjptBuCWEaxmAOow= +github.com/mattn/go-runewidth v0.0.12/go.mod h1:RAqKPSqVFrSLVXbA8x7dzmKdmGzieGRCM46jaSJTDAk= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/mmark v1.3.6 h1:t47x5vThdwgLJzofNsbsAl7gmIiJ7kbDQN5BxwBmwvY= @@ -422,35 +520,45 @@ github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= -github.com/mitchellh/hashstructure v1.0.0 h1:ZkRJX1CyOoTkar7p/mLS5TZU4nJ1Rn/F8u9dGS02Q3Y= github.com/mitchellh/hashstructure v1.0.0/go.mod h1:QjSHrPWS+BGUVBYkbTZWEnOh3G1DutKwClXU/ABz6AQ= +github.com/mitchellh/hashstructure v1.1.0 h1:P6P1hdjqAAknpY/M1CGipelZgp+4y9ja9kmUZPXP+H0= +github.com/mitchellh/hashstructure v1.1.0/go.mod h1:xUDAozZz0Wmdiufv0uyhnHkUTN6/6d8ulp4AwfLKrmA= github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.3.3 h1:SzB1nHZ2Xi+17FP0zVQBHIZqvwRN9408fJO8h+eeNA8= github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.4.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.4.1 h1:CpVNEelQCZBooIPDn+AR3NpivK/TIKU8bDxdASFVQag= +github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/montanaflynn/stats v0.6.3/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= github.com/muesli/smartcrop v0.3.0 h1:JTlSkmxWg/oQ1TcLDoypuirdE8Y/jzNirQeLkxpA6Oc= github.com/muesli/smartcrop v0.3.0/go.mod h1:i2fCI/UorTfgEpPPLWiFBv4pye+YAG78RwcQLUkocpI= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/neurosnap/sentences v1.0.6/go.mod h1:pg1IapvYpWCJJm/Etxeh0+gtMf1rI1STY9S7eUCPbDc= github.com/nfnt/resize v0.0.0-20180221191011-83c6a9932646 h1:zYyBkD/k9seD2A7fsi6Oo2LfFZAehjjQMERAvZLEDnQ= github.com/nfnt/resize v0.0.0-20180221191011-83c6a9932646/go.mod h1:jpp1/29i3P1S/RLdc7JQKbRpFeM1dOBd8T9ki5s+AY8= -github.com/nicksnyder/go-i18n/v2 v2.1.1 h1:ATCOanRDlrfKVB4WHAdJnLEqZtDmKYsweqsOUYflnBU= github.com/nicksnyder/go-i18n/v2 v2.1.1/go.mod h1:d++QJC9ZVf7pa48qrsRWhMJ5pSHIPmS3OLqK1niyLxs= +github.com/nicksnyder/go-i18n/v2 v2.1.2 h1:QHYxcUJnGHBaq7XbvgunmZ2Pn0focXFqTD61CkH146c= +github.com/nicksnyder/go-i18n/v2 v2.1.2/go.mod h1:d++QJC9ZVf7pa48qrsRWhMJ5pSHIPmS3OLqK1niyLxs= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= -github.com/niklasfasching/go-org v1.4.0 h1:qPy4VEdX55f5QcLiaD3X7N/tY5XOgk4y2uEyQa02i7A= github.com/niklasfasching/go-org v1.4.0/go.mod h1:4FWT4U/Anir9ewjwNpbZIzMjG5RaXFafkyWZNEPRdk8= +github.com/niklasfasching/go-org v1.5.0 h1:V8IwoSPm/d61bceyWFxxnQLtlvNT+CjiYIhtZLdnMF0= +github.com/niklasfasching/go-org v1.5.0/go.mod h1:sSb8ylwnAG+h8MGFDB3R1D5bxf8wA08REfhjShg3kjA= github.com/nkovacs/streamquote v0.0.0-20170412213628-49af9bddb229/go.mod h1:0aYXnNPJ8l7uZxf45rWW1a/uME32OF0rhiYGNQ2oF2E= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/pelletier/go-toml v1.8.1 h1:1Nf83orprkJyknT6h7zbuEGUEjcyVlCxSUGTENmNCRM= github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrapLU/GW4pbc= +github.com/pelletier/go-toml v1.9.0 h1:NOd0BRdOKpPf0SxkL3HxSQOG7rNh+4kl6PHcBPFs7Q0= +github.com/pelletier/go-toml v1.9.0/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= @@ -470,14 +578,20 @@ github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y8 github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= +github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY= +github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.6.2 h1:aIihoIOHCiLZHxyoNQ+ABL4NKhFTgKLBdMLyEAh98m0= github.com/rogpeppe/go-internal v1.6.2/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= -github.com/russross/blackfriday v1.5.3-0.20200218234912-41c5fccfd6f6 h1:tlXG832s5pa9x9Gs3Rp2rTvEqjiDEuETUOSfBEiTcns= +github.com/rogpeppe/go-internal v1.8.0 h1:FCbCCtXNOY3UtUuHUYaghJg4y7Fd14rXifAYUAtL9R8= +github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE= github.com/russross/blackfriday v1.5.3-0.20200218234912-41c5fccfd6f6/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= -github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q= +github.com/russross/blackfriday v1.6.0 h1:KqfZb0pUVN2lYqZUYRddxF4OR8ZMURnJIG5Y3VRLtww= +github.com/russross/blackfriday v1.6.0/go.mod h1:ti0ldHuxg49ri4ksnFxlkCfN+hvslNlmVHqNRXXJNAY= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/rwcarlsen/goexif v0.0.0-20190401172101-9e8deecbddbd h1:CmH9+J6ZSsIjUK3dcGsnCnO41eRBOnY12zwkn5qVwgc= github.com/rwcarlsen/goexif v0.0.0-20190401172101-9e8deecbddbd/go.mod h1:hPqNNc0+uJM6H+SuU8sEs5K5IQeKccPqeSjfgcKGgPk= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= @@ -488,7 +602,6 @@ github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAm github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0= github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= github.com/shogo82148/go-shuffle v0.0.0-20180218125048-27e6095f230d/go.mod h1:2htx6lmL0NGLHlO8ZCf+lQBGBHIbEujyywxJArf+2Yc= -github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= @@ -498,13 +611,15 @@ github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9 github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= -github.com/spf13/afero v1.5.1 h1:VHu76Lk0LSP1x254maIu2bplkWpfBWI+B+6fdoZprcg= github.com/spf13/afero v1.5.1/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= +github.com/spf13/afero v1.6.0 h1:xoax2sJ2DT8S8xA2paPFjDCScCNeWsg75VG0DLRreiY= +github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng= github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cobra v1.1.1 h1:KfztREH0tPxJJ+geloSLaAkaPkr4ki2Er5quFV1TDo4= github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI= +github.com/spf13/cobra v1.1.3 h1:xghbfqPkxzxP3C/f3n5DdpAbdKLj4ZE4BWQI362l53M= +github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo= github.com/spf13/fsync v0.9.0 h1:f9CEt3DOB2mnHxZaftmEOFWjABEvKM/xpf3cUwJrGOY= github.com/spf13/fsync v0.9.0/go.mod h1:fNtJEfG3HiltN3y4cPOz6MLjos9+2pIEqLIgszqhp/0= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= @@ -528,13 +643,17 @@ github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= -github.com/tdewolff/minify/v2 v2.9.13 h1:RrwQhgGoYBhKN/ezStGB+crU64wPK1ZE5Jmkl63lif0= -github.com/tdewolff/minify/v2 v2.9.13/go.mod h1:faNOp+awAoo+fhFHD+NAkBOaXBAvJI2X2SDERGKnARo= -github.com/tdewolff/parse/v2 v2.5.10 h1:vj35n+ljq8LuYUx436s4qB18wuwP7thrLv+t1syE39M= -github.com/tdewolff/parse/v2 v2.5.10/go.mod h1:WzaJpRSbwq++EIQHYIRTpbYKNA3gn9it1Ik++q4zyho= +github.com/tdewolff/minify/v2 v2.9.15/go.mod h1:tK4qPnHUZgANtEGVMwTBxrF1eNIBkigHFYo7F3Y98GQ= +github.com/tdewolff/minify/v2 v2.9.16 h1:2Pv8pFRX/ZfjTRYX2xzcuNrkEJqU5TfriNJJYOeN3rI= +github.com/tdewolff/minify/v2 v2.9.16/go.mod h1:cjMkr4ZgFjqxXAQ1kR9Fm4l1046mmONd2g6yMzGuN/w= +github.com/tdewolff/parse/v2 v2.5.14/go.mod h1:WzaJpRSbwq++EIQHYIRTpbYKNA3gn9it1Ik++q4zyho= +github.com/tdewolff/parse/v2 v2.5.15 h1:hYZKJZ0KfHMGhN3+hER4R9gQM/umJThkeeyJNtsO86o= +github.com/tdewolff/parse/v2 v2.5.15/go.mod h1:WzaJpRSbwq++EIQHYIRTpbYKNA3gn9it1Ik++q4zyho= github.com/tdewolff/test v1.0.6 h1:76mzYJQ83Op284kMT+63iCNCI7NEERsIN8dLM+RiKr4= github.com/tdewolff/test v1.0.6/go.mod h1:6DAvZliBAAnD7rhVgwaM7DE5/d9NMOAJ09SqYqeK4QE= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= +github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= github.com/urfave/cli v1.22.4/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8= @@ -546,8 +665,9 @@ github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.3.2 h1:YjHC5TgyMmHpicTgEqDN0Q96Xo8K6tLXPnmNOHXCgs0= github.com/yuin/goldmark v1.3.2/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.3.5 h1:dPmz1Snjq0kmkz159iL7S6WzdahUTHnHB5M56WFVifs= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark-highlighting v0.0.0-20200307114337-60d527fdb691 h1:VWSxtAiQNh3zgHJpdpkpVYjTPqRE3P6UZCOPa1nRDio= github.com/yuin/goldmark-highlighting v0.0.0-20200307114337-60d527fdb691/go.mod h1:YLF3kDffRfUH/bTxOxHhV6lxwIB3Vfj91rEwNMS9MXo= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= @@ -557,25 +677,32 @@ go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.5 h1:dntmOdLpSpHlVqbW5Eay97DelsZHe+55D+xC6i0dDS0= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= +go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -gocloud.dev v0.20.0 h1:mbEKMfnyPV7W1Rj35R1xXfjszs9dXkwSOq2KoFr25g8= gocloud.dev v0.20.0/go.mod h1:+Y/RpSXrJthIOM8uFNzWp6MRu9pFPNFEEZrQMxpkfIc= +gocloud.dev v0.22.0 h1:psFb4EJ+bF9bjns7XR3n3tMMMB1LNs97YURcyh4oVWM= +gocloud.dev v0.22.0/go.mod h1:z3jKIQ0Es9LALVZFQ3wOvwqAsSLq1R5c/2RdmghDucw= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190219172222-a4c6cb3142f2/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201203163018-be400aefbc4c/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/crypto v0.0.0-20210415154028-4f45737414dc h1:+q90ECDSAQirdykUN6sPEiBXBsp8Csjcca8Oy7bgLTA= +golang.org/x/crypto v0.0.0-20210415154028-4f45737414dc/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -588,8 +715,9 @@ golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EH golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/image v0.0.0-20191214001246-9130b4cfad52 h1:2fktqPPvDiVEEVT/vSTeoUPXfmRxRaGy6GU8jypvEn0= golang.org/x/image v0.0.0-20191214001246-9130b4cfad52/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20210220032944-ac19c3e999fb h1:fqpd0EBDzlHRCjiphRR5Zo/RSWWQlWv34418dnEixWk= +golang.org/x/image v0.0.0-20210220032944-ac19c3e999fb/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -632,6 +760,7 @@ golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191112182307-2180aed22343/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -648,12 +777,18 @@ golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/ golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200904194848-62affa334b73/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20201224014010-6772e930b67b h1:iFwSg7t5GZmB/Q5TjiEAsdoLDrdJRC1RiF2WhuV29Qw= golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= +golang.org/x/net v0.0.0-20210415231046-e915ea6b2b7d h1:BgJvlyh+UqCUaPlscHJ+PN8GcpfrFdr7NHjd1JL0+Gs= +golang.org/x/net v0.0.0-20210415231046-e915ea6b2b7d/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -662,8 +797,14 @@ golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4Iltr golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5 h1:Lm4OryKCca1vehdsWogr9N4t7NfZxLbJoc/H0w4K4S4= +golang.org/x/oauth2 v0.0.0-20201203001011-0b49973bad19/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210413134643-5e61552d6c78 h1:rPRtHfUb0UKZeZ6GH4K4Nt4YRbE9V1u+QZX5upZXqJQ= +golang.org/x/oauth2 v0.0.0-20210413134643-5e61552d6c78/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -673,8 +814,9 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201207232520-09787c993a3a h1:DcqTD9SDLc+1P/r1EmRBwnVsrOwW+kk2vWf9n+1sGhs= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -693,6 +835,7 @@ golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191112214154-59a1497f0cea/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -715,25 +858,40 @@ golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200724161237-0e2f3a69832c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200828194041-157a740278f4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201202213521-69691e467435/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4 h1:EZ2mChiOa8udjfp6rRmswTbtZN/QzUQp4ptM4rnjHvc= +golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210415045647-66c3f260301c h1:6L+uOeS3OQt/f4eFHXZcTxeZrGCuz+CLElgEBjbcTA4= +golang.org/x/sys v0.0.0-20210415045647-66c3f260301c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.4 h1:0YWbFKbhXG/wIiuHDSKpS0Iy7FSA+u45VtBMfQcFTTc= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -751,6 +909,7 @@ golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgw golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191010075000-0337d82405ff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -783,11 +942,18 @@ golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200828161849-5deb26317202/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20200915173823-2db8f0ff891c/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= +golang.org/x/tools v0.0.0-20200918232735-d647fc253266/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201202200335-bef1c476418a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201203202102-a1a1cbeaa516/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.1-0.20210319172145-bda8f5cee399 h1:O5bm8buX/OaamnfcBrkjn0SPUIU30jFmaS8lP+ikkxs= golang.org/x/tools v0.1.1-0.20210319172145-bda8f5cee399/go.mod h1:9bzcO0MWcOuT0tm1iBGzDVPshzfwoVvREIui8C+MHqU= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -800,6 +966,7 @@ google.golang.org/api v0.5.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEt google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.10.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= @@ -813,14 +980,20 @@ google.golang.org/api v0.26.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0M google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/api v0.31.0/go.mod h1:CL+9IBCa2WWU6gRuBWaKqGWLFFwbEUXkfeMkHLQWYWo= +google.golang.org/api v0.32.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= -google.golang.org/api v0.40.0 h1:uWrpz12dpVPn7cojP82mk02XDgTJLDPc2KbVTxrWb4A= google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= +google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= +google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= +google.golang.org/api v0.44.0 h1:URs6qR1lAxDsqWITsQXI4ZkGiYJ5dHtRNiCpfs2OeKA= +google.golang.org/api v0.44.0/go.mod h1:EBOGZqzyhtvMDoxwS97ctnh0zUmYY6CxqXsc1AvkYD8= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.2/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= @@ -860,17 +1033,30 @@ google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7Fc google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200831141814-d751682dd103/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200914193844-75d14daec038/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200921151605-7abf4a1a14d5/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201203001206-6486ece9c497/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d h1:HV9Z9qMhQEsdlvxNFELgQ11RkMzO3CMkjEySjCtuLes= google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210416161957-9910b6c460de h1:+nG/xknR+Gc5ByHOtK1dT0Pl3LYo8NLR+Jz3XeBeGEg= +google.golang.org/genproto v0.0.0-20210416161957-9910b6c460de/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= @@ -880,9 +1066,14 @@ google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3Iji google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.32.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.34.0 h1:raiipEjMOIC/TO2AvyTxP25XFdLxNIBwzDh3FM3XztI= google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= +google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.37.0 h1:uSZWeQJX5j11bIQ4AJoj+McDBo29cY1MCoC1wO3ts+c= +google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -892,8 +1083,10 @@ google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2 google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= -google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -902,8 +1095,9 @@ gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8X gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/ini.v1 v1.51.1 h1:GyboHr4UqMiLUybYjd22ZjQIKEJEpgtLXtuGbR21Oho= gopkg.in/ini.v1 v1.51.1/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.62.0 h1:duBzk771uxoUuOlyRLkHsygud9+5lrlGjdFBb4mSKDU= +gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/neurosnap/sentences.v1 v1.0.6/go.mod h1:YlK+SN+fLQZj+kY3r8DkGDhDr91+S3JmTb5LSxFRQo0= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME= @@ -916,8 +1110,9 @@ gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776 h1:tQIYjPdBoyREyB9XMu+nnTclpTYkz2zFM+lzLJFO4gQ= +gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= @@ -925,6 +1120,7 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +nhooyr.io/websocket v1.8.6/go.mod h1:B70DZP8IakI65RVQ51MsWP/8jndNma26DVA/nFSCgW0= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/main.go b/main.go index 77d8ae4..1338413 100644 --- a/main.go +++ b/main.go @@ -15,7 +15,7 @@ import ( ) var ( - version = "21.04.04" + version = "21.04.05" logLines = "" ) diff --git a/metahugo.go b/metahugo.go index d2acf0b..98c60c8 100644 --- a/metahugo.go +++ b/metahugo.go @@ -1,6 +1,7 @@ package main import ( + "fmt" "hash/crc32" "path/filepath" "regexp" @@ -10,6 +11,7 @@ import ( "github.com/PumpkinSeed/cage" "github.com/gohugoio/hugo/commands" + hg "github.com/gohugoio/hugo/common/hugo" "github.com/spf13/viper" ) @@ -29,23 +31,30 @@ func hugoContext(hugo *Hugo, gitRepoPath string) { hugo.DestinationDir = filepath.Join(hugo.SourceDir, "public") } -func hugoRender(hugo *Hugo, hook *Hook) string { - // fmt.Println("hugo", "-s", hugo.SourceDir, "-d", hugo.DestinationDir, "--templateMetrics") +func hugoRun(hugoCommands []string) []string { logs := cage.Start() - hugoCommand := []string{"-s", hugo.SourceDir, "-d", hugo.DestinationDir, "--templateMetrics"} - if hook.Offline { - hugoCommand = append(hugoCommand, []string{"-e", "offline"}...) - } else if hook.Context == "PostReceive" { - hugoCommand = append(hugoCommand, []string{"-e", "gitea"}...) - } + hugoCommand := hugoCommands goHugo := commands.Execute(hugoCommand) cage.Stop(logs) if goHugo.Err != nil { check(goHugo.Err) runtime.Goexit() } - lgs := "~~~~~~~ Hugo's logs ~~~~~~~\n" - for _, l := range logs.Data { + return logs.Data +} + +func hugoRender(hugo *Hugo, hook *Hook) string { + // fmt.Println("hugo", "-s", hugo.SourceDir, "-d", hugo.DestinationDir, "--templateMetrics") + hugoCommands := []string{"-s", hugo.SourceDir, "-d", hugo.DestinationDir, "--templateMetrics"} + if hook.Offline { + hugoCommands = append(hugoCommands, []string{"-e", "offline"}...) + } else if hook.Context == "PostReceive" { + hugoCommands = append(hugoCommands, []string{"-e", "gitea"}...) + } + logs := hugoRun(hugoCommands) + + lgs := fmt.Sprintf("~~~~~~~ Hugo's logs (v%.2f)~~~~~~~\n", hg.CurrentVersion.Number) + for _, l := range logs { lgs = lgs + l + "\n" } return lgs diff --git a/vendor/cloud.google.com/go/CHANGES.md b/vendor/cloud.google.com/go/CHANGES.md index 6befd9f..6fc75f1 100644 --- a/vendor/cloud.google.com/go/CHANGES.md +++ b/vendor/cloud.google.com/go/CHANGES.md @@ -1,6 +1,194 @@ # Changes +## [0.81.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.80.0...v0.81.0) (2021-04-02) + + +### Features + +* **datacatalog:** Policy Tag Manager v1 API service feat: new RenameTagTemplateFieldEnumValue API feat: adding fully_qualified_name in lookup and search feat: added DATAPROC_METASTORE integrated system along with new entry types: DATABASE and SERVICE docs: Documentation improvements ([2b02a03](https://www.github.com/googleapis/google-cloud-go/commit/2b02a03ff9f78884da5a8e7b64a336014c61bde7)) +* **dialogflow/cx:** include original user query in WebhookRequest; add GetTextCaseresult API. doc: clarify resource format for session response. ([a0b1f6f](https://www.github.com/googleapis/google-cloud-go/commit/a0b1f6faae77d014fdee166ab018ddcd6f846ab4)) +* **dialogflow/cx:** include original user query in WebhookRequest; add GetTextCaseresult API. doc: clarify resource format for session response. ([b5b4da6](https://www.github.com/googleapis/google-cloud-go/commit/b5b4da6952922440d03051f629f3166f731dfaa3)) +* **dialogflow:** expose MP3_64_KBPS and MULAW for output audio encodings. ([b5b4da6](https://www.github.com/googleapis/google-cloud-go/commit/b5b4da6952922440d03051f629f3166f731dfaa3)) +* **secretmanager:** Rotation for Secrets ([2b02a03](https://www.github.com/googleapis/google-cloud-go/commit/2b02a03ff9f78884da5a8e7b64a336014c61bde7)) + + +### Bug Fixes + +* **internal/godocfx:** filter out non-Cloud ([#3878](https://www.github.com/googleapis/google-cloud-go/issues/3878)) ([625aef9](https://www.github.com/googleapis/google-cloud-go/commit/625aef9b47181cf627587cc9cde9e400713c6678)) + +## [0.80.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.79.0...v0.80.0) (2021-03-23) + + +### ⚠ BREAKING CHANGES + +* **all:** This is a breaking change in dialogflow + +### Features + +* **appengine:** added vm_liveness, search_api_available, network_settings, service_account, build_env_variables, kms_key_reference to v1 API ([fd04a55](https://www.github.com/googleapis/google-cloud-go/commit/fd04a552213f99619c714b5858548f61f4948493)) +* **assuredworkloads:** Add 'resource_settings' field to provide custom properties (ids) for the provisioned projects. ([ab4824a](https://www.github.com/googleapis/google-cloud-go/commit/ab4824a7914864228e59b244d6382de862139524)) +* **assuredworkloads:** add HIPAA and HITRUST compliance regimes ([ab4824a](https://www.github.com/googleapis/google-cloud-go/commit/ab4824a7914864228e59b244d6382de862139524)) +* **dialogflow/cx:** added fallback option when restoring an agent docs: clarified experiment length ([cd70aa9](https://www.github.com/googleapis/google-cloud-go/commit/cd70aa9cc1a5dccfe4e49d2d6ca6db2119553c86)) +* **dialogflow/cx:** start generating apiv3 ([#3850](https://www.github.com/googleapis/google-cloud-go/issues/3850)) ([febbdcf](https://www.github.com/googleapis/google-cloud-go/commit/febbdcf13fcea3f5d8186c3d3dface1c0d27ef9e)), refs [#3634](https://www.github.com/googleapis/google-cloud-go/issues/3634) +* **documentai:** add EVAL_SKIPPED value to the Provenance.OperationType enum in document.proto. ([cb43066](https://www.github.com/googleapis/google-cloud-go/commit/cb4306683926843f6e977f207fa6070bb9242a61)) +* **documentai:** start generating apiv1 ([#3853](https://www.github.com/googleapis/google-cloud-go/issues/3853)) ([d68e604](https://www.github.com/googleapis/google-cloud-go/commit/d68e604c953eea90489f6134e71849b24dd0fcbf)) +* **internal/godocfx:** add prettyprint class to code blocks ([#3819](https://www.github.com/googleapis/google-cloud-go/issues/3819)) ([6e49f21](https://www.github.com/googleapis/google-cloud-go/commit/6e49f2148b116ee439c8a882dcfeefb6e7647c57)) +* **internal/godocfx:** handle Markdown content ([#3816](https://www.github.com/googleapis/google-cloud-go/issues/3816)) ([56d5d0a](https://www.github.com/googleapis/google-cloud-go/commit/56d5d0a900197fb2de46120a0eda649f2c17448f)) +* **kms:** Add maxAttempts to retry policy for KMS gRPC service config feat: Add Bazel exports_files entry for KMS gRPC service config ([fd04a55](https://www.github.com/googleapis/google-cloud-go/commit/fd04a552213f99619c714b5858548f61f4948493)) +* **resourcesettings:** start generating apiv1 ([#3854](https://www.github.com/googleapis/google-cloud-go/issues/3854)) ([3b288b4](https://www.github.com/googleapis/google-cloud-go/commit/3b288b4fa593c6cb418f696b5b26768967c20b9e)) +* **speech:** Support output transcript to GCS for LongRunningRecognize. ([fd04a55](https://www.github.com/googleapis/google-cloud-go/commit/fd04a552213f99619c714b5858548f61f4948493)) +* **speech:** Support output transcript to GCS for LongRunningRecognize. ([cd70aa9](https://www.github.com/googleapis/google-cloud-go/commit/cd70aa9cc1a5dccfe4e49d2d6ca6db2119553c86)) +* **speech:** Support output transcript to GCS for LongRunningRecognize. ([35a8706](https://www.github.com/googleapis/google-cloud-go/commit/35a870662df8bf63c4ec10a0233d1d7a708007ee)) + + +### Miscellaneous Chores + +* **all:** auto-regenerate gapics ([#3837](https://www.github.com/googleapis/google-cloud-go/issues/3837)) ([ab4824a](https://www.github.com/googleapis/google-cloud-go/commit/ab4824a7914864228e59b244d6382de862139524)) + +## [0.79.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.78.0...v0.79.0) (2021-03-10) + + +### Features + +* **apigateway:** start generating apiv1 ([#3726](https://www.github.com/googleapis/google-cloud-go/issues/3726)) ([66046da](https://www.github.com/googleapis/google-cloud-go/commit/66046da2a4be5971ce2655dc6a5e1fadb08c3d1f)) +* **channel:** addition of billing_account field on Plan. docs: clarification that valid address lines are required for all customers. ([d4246aa](https://www.github.com/googleapis/google-cloud-go/commit/d4246aad4da3c3ef12350385f229bb908e3fb215)) +* **dialogflow/cx:** allow to disable webhook invocation per request ([d4246aa](https://www.github.com/googleapis/google-cloud-go/commit/d4246aad4da3c3ef12350385f229bb908e3fb215)) +* **dialogflow/cx:** allow to disable webhook invocation per request ([44c6bf9](https://www.github.com/googleapis/google-cloud-go/commit/44c6bf986f39a3c9fddf46788ae63bfbb3739441)) +* **dialogflow:** Add CCAI API ([18c88c4](https://www.github.com/googleapis/google-cloud-go/commit/18c88c437bd1741eaf5bf5911b9da6f6ea7cd75d)) +* **documentai:** remove the translation fields in document.proto. ([18c88c4](https://www.github.com/googleapis/google-cloud-go/commit/18c88c437bd1741eaf5bf5911b9da6f6ea7cd75d)) +* **documentai:** Update documentai/v1beta3 protos: add support for boolean normalized value ([529925b](https://www.github.com/googleapis/google-cloud-go/commit/529925ba79f4d3191ef80a13e566d86210fe4d25)) +* **internal/godocfx:** keep some cross links on same domain ([#3767](https://www.github.com/googleapis/google-cloud-go/issues/3767)) ([77f76ed](https://www.github.com/googleapis/google-cloud-go/commit/77f76ed09cb07a090ba9054063a7c002a35bca4e)) +* **internal:** add ability to regenerate one module's docs ([#3777](https://www.github.com/googleapis/google-cloud-go/issues/3777)) ([dc15995](https://www.github.com/googleapis/google-cloud-go/commit/dc15995521bd065da4cfaae95642588919a8c548)) +* **metastore:** added support for release channels when creating service ([18c88c4](https://www.github.com/googleapis/google-cloud-go/commit/18c88c437bd1741eaf5bf5911b9da6f6ea7cd75d)) +* **metastore:** Publish Dataproc Metastore v1alpha API ([18c88c4](https://www.github.com/googleapis/google-cloud-go/commit/18c88c437bd1741eaf5bf5911b9da6f6ea7cd75d)) +* **metastore:** start generating apiv1alpha ([#3747](https://www.github.com/googleapis/google-cloud-go/issues/3747)) ([359312a](https://www.github.com/googleapis/google-cloud-go/commit/359312ad6d4f61fb341d41ffa35fc0634979e650)) +* **metastore:** start generating apiv1beta ([#3788](https://www.github.com/googleapis/google-cloud-go/issues/3788)) ([2977095](https://www.github.com/googleapis/google-cloud-go/commit/297709593ad32f234c0fbcfa228cffcfd3e591f4)) +* **secretmanager:** added topic field to Secret ([f1323b1](https://www.github.com/googleapis/google-cloud-go/commit/f1323b10a3c7cc1d215730cefd3062064ef54c01)) + + +### Bug Fixes + +* **analytics/admin:** add `https://www.googleapis.com/auth/analytics.edit` OAuth2 scope to the list of acceptable scopes for all read only methods of the Admin API docs: update the documentation of the `update_mask` field used by Update() methods ([f1323b1](https://www.github.com/googleapis/google-cloud-go/commit/f1323b10a3c7cc1d215730cefd3062064ef54c01)) +* **apigateway:** Provide resource definitions for service management and IAM resources ([18c88c4](https://www.github.com/googleapis/google-cloud-go/commit/18c88c437bd1741eaf5bf5911b9da6f6ea7cd75d)) +* **functions:** Fix service namespace in grpc_service_config. ([7811a34](https://www.github.com/googleapis/google-cloud-go/commit/7811a34ef64d722480c640810251bb3a0d65d495)) +* **internal/godocfx:** prevent index out of bounds when pkg == mod ([#3768](https://www.github.com/googleapis/google-cloud-go/issues/3768)) ([3d80b4e](https://www.github.com/googleapis/google-cloud-go/commit/3d80b4e93b0f7e857d6e9681d8d6a429750ecf80)) +* **internal/godocfx:** use correct anchor links ([#3738](https://www.github.com/googleapis/google-cloud-go/issues/3738)) ([919039a](https://www.github.com/googleapis/google-cloud-go/commit/919039a01a006c41e720218bd55f83ce98a5edef)) +* **internal:** fix Bash syntax ([#3779](https://www.github.com/googleapis/google-cloud-go/issues/3779)) ([3dd245d](https://www.github.com/googleapis/google-cloud-go/commit/3dd245dbdbfa84f0bbe5a476412d8463fe3e700c)) +* **tables:** use area120tables_v1alpha1.yaml as api-service-config ([#3759](https://www.github.com/googleapis/google-cloud-go/issues/3759)) ([b130ec0](https://www.github.com/googleapis/google-cloud-go/commit/b130ec0aa946b1a1eaa4d5a7c33e72353ac1612e)) + +## [0.78.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.77.0...v0.78.0) (2021-02-22) + + +### Features + +* **area120/tables:** Added ListWorkspaces, GetWorkspace, BatchDeleteRows APIs. ([16597fa](https://www.github.com/googleapis/google-cloud-go/commit/16597fa1ce549053c7183e8456e23f554a5501de)) +* **area120/tables:** Added ListWorkspaces, GetWorkspace, BatchDeleteRows APIs. ([0bd21d7](https://www.github.com/googleapis/google-cloud-go/commit/0bd21d793f75924e5a2d033c58e8aaef89cf8113)) +* **dialogflow:** add additional_bindings to Dialogflow v2 ListIntents API docs: update copyrights and session docs ([0bd21d7](https://www.github.com/googleapis/google-cloud-go/commit/0bd21d793f75924e5a2d033c58e8aaef89cf8113)) +* **documentai:** Update documentai/v1beta3 protos ([613ced7](https://www.github.com/googleapis/google-cloud-go/commit/613ced702bbc82a154a4d3641b483f71c7cd1af4)) +* **gkehub:** Update Membership API v1beta1 proto ([613ced7](https://www.github.com/googleapis/google-cloud-go/commit/613ced702bbc82a154a4d3641b483f71c7cd1af4)) +* **servicecontrol:** Update the ruby_cloud_gapic_library rules for the libraries published to google-cloud-ruby to the form that works with build_gen (separate parameters for ruby_cloud_title and ruby_cloud_description). chore: Update Bazel-Ruby rules version. chore: Update build_gen version. ([0bd21d7](https://www.github.com/googleapis/google-cloud-go/commit/0bd21d793f75924e5a2d033c58e8aaef89cf8113)) +* **speech:** Support Model Adaptation. ([0bd21d7](https://www.github.com/googleapis/google-cloud-go/commit/0bd21d793f75924e5a2d033c58e8aaef89cf8113)) + + +### Bug Fixes + +* **dialogflow/cx:** RunTestCase http template. PHP REST client lib can be generated. feat: Support transition route group coverage for Test Cases. ([613ced7](https://www.github.com/googleapis/google-cloud-go/commit/613ced702bbc82a154a4d3641b483f71c7cd1af4)) +* **errorreporting:** Fixes ruby gem build ([0bd21d7](https://www.github.com/googleapis/google-cloud-go/commit/0bd21d793f75924e5a2d033c58e8aaef89cf8113)) + +## [0.77.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.76.0...v0.77.0) (2021-02-16) + + +### Features + +* **channel:** Add Pub/Sub endpoints for Cloud Channel API. ([1aea7c8](https://www.github.com/googleapis/google-cloud-go/commit/1aea7c87d39eed87620b488ba0dd60b88ff26c04)) +* **dialogflow/cx:** supports SentimentAnalysisResult in webhook request docs: minor updates in wording ([2b4414d](https://www.github.com/googleapis/google-cloud-go/commit/2b4414d973e3445725cd38901bf75340c97fc663)) +* **errorreporting:** Make resolution status field available for error groups. Now callers can set the status of an error group by passing this to UpdateGroup. When not specified, it's treated like OPEN. feat: Make source location available for error groups created from GAE. ([2b4414d](https://www.github.com/googleapis/google-cloud-go/commit/2b4414d973e3445725cd38901bf75340c97fc663)) +* **errorreporting:** Make resolution status field available for error groups. Now callers can set the status of an error group by passing this to UpdateGroup. When not specified, it's treated like OPEN. feat: Make source location available for error groups created from GAE. ([f66114b](https://www.github.com/googleapis/google-cloud-go/commit/f66114bc7233ad06e18f38dd39497a74d85fdbd8)) +* **gkehub:** start generating apiv1beta1 ([#3698](https://www.github.com/googleapis/google-cloud-go/issues/3698)) ([8aed3bd](https://www.github.com/googleapis/google-cloud-go/commit/8aed3bd1bbbe983e4891c813e4c5dc9b3aa1b9b2)) +* **internal/docfx:** full cross reference linking ([#3656](https://www.github.com/googleapis/google-cloud-go/issues/3656)) ([fcb7318](https://www.github.com/googleapis/google-cloud-go/commit/fcb7318eb338bf3828ac831ed06ca630e1876418)) +* **memcache:** added ApplySoftwareUpdate API docs: various clarifications, new documentation for ApplySoftwareUpdate chore: update proto annotations ([2b4414d](https://www.github.com/googleapis/google-cloud-go/commit/2b4414d973e3445725cd38901bf75340c97fc663)) +* **networkconnectivity:** Add state field in resources docs: Minor changes ([0b4370a](https://www.github.com/googleapis/google-cloud-go/commit/0b4370a0d397913d932dbbdc2046a958dc3b836a)) +* **networkconnectivity:** Add state field in resources docs: Minor changes ([b4b5898](https://www.github.com/googleapis/google-cloud-go/commit/b4b58987368f80494bbc7f651f50e9123200fb3f)) +* **recommendationengine:** start generating apiv1beta1 ([#3686](https://www.github.com/googleapis/google-cloud-go/issues/3686)) ([8f4e130](https://www.github.com/googleapis/google-cloud-go/commit/8f4e13009444d88a5a56144129f055623a2205ac)) + + +### Bug Fixes + +* **errorreporting:** Remove dependency on AppEngine's proto definitions. This also removes the source_references field. ([2b4414d](https://www.github.com/googleapis/google-cloud-go/commit/2b4414d973e3445725cd38901bf75340c97fc663)) +* **errorreporting:** Update bazel builds for ER client libraries. ([0b4370a](https://www.github.com/googleapis/google-cloud-go/commit/0b4370a0d397913d932dbbdc2046a958dc3b836a)) +* **internal/godocfx:** use exact list of top-level decls ([#3665](https://www.github.com/googleapis/google-cloud-go/issues/3665)) ([3cd2961](https://www.github.com/googleapis/google-cloud-go/commit/3cd2961bd7b9c29d82a21ba8850eff00c7c332fd)) +* **kms:** do not retry on 13 INTERNAL ([2b4414d](https://www.github.com/googleapis/google-cloud-go/commit/2b4414d973e3445725cd38901bf75340c97fc663)) +* **orgpolicy:** Fix constraint resource pattern annotation ([f66114b](https://www.github.com/googleapis/google-cloud-go/commit/f66114bc7233ad06e18f38dd39497a74d85fdbd8)) +* **orgpolicy:** Fix constraint resource pattern annotation ([0b4370a](https://www.github.com/googleapis/google-cloud-go/commit/0b4370a0d397913d932dbbdc2046a958dc3b836a)) +* **profiler:** make sure retries use the most up-to-date copy of the trailer ([#3660](https://www.github.com/googleapis/google-cloud-go/issues/3660)) ([3ba9ebc](https://www.github.com/googleapis/google-cloud-go/commit/3ba9ebcee2b8b43cdf2c8f8a3d810516a604b363)) +* **vision:** sync vision v1 protos to get extra FaceAnnotation Landmark Types ([2b4414d](https://www.github.com/googleapis/google-cloud-go/commit/2b4414d973e3445725cd38901bf75340c97fc663)) + +## [0.76.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.75.0...v0.76.0) (2021-02-02) + + +### Features + +* **accessapproval:** Migrate the Bazel rules for the libraries published to google-cloud-ruby to use the gapic-generator-ruby instead of the monolith generator. ([ac22beb](https://www.github.com/googleapis/google-cloud-go/commit/ac22beb9b90771b24c8b35db7587ad3f5c0a970e)) +* **all:** auto-regenerate gapics ([#3526](https://www.github.com/googleapis/google-cloud-go/issues/3526)) ([ab2af0b](https://www.github.com/googleapis/google-cloud-go/commit/ab2af0b32630dd97f44800f4e273184f887375db)) +* **all:** auto-regenerate gapics ([#3539](https://www.github.com/googleapis/google-cloud-go/issues/3539)) ([84d4d8a](https://www.github.com/googleapis/google-cloud-go/commit/84d4d8ae2d3fbf34a4a312a0a2e4062d18caaa3d)) +* **all:** auto-regenerate gapics ([#3546](https://www.github.com/googleapis/google-cloud-go/issues/3546)) ([959fde5](https://www.github.com/googleapis/google-cloud-go/commit/959fde5ab12f7aee206dd46022e3cad1bc3470f7)) +* **all:** auto-regenerate gapics ([#3563](https://www.github.com/googleapis/google-cloud-go/issues/3563)) ([102112a](https://www.github.com/googleapis/google-cloud-go/commit/102112a4e9285a16645aabc89789f613d4f47c9e)) +* **all:** auto-regenerate gapics ([#3576](https://www.github.com/googleapis/google-cloud-go/issues/3576)) ([ac22beb](https://www.github.com/googleapis/google-cloud-go/commit/ac22beb9b90771b24c8b35db7587ad3f5c0a970e)) +* **all:** auto-regenerate gapics ([#3580](https://www.github.com/googleapis/google-cloud-go/issues/3580)) ([9974a80](https://www.github.com/googleapis/google-cloud-go/commit/9974a8017b5de8129a586f2404a23396caea0ee1)) +* **all:** auto-regenerate gapics ([#3587](https://www.github.com/googleapis/google-cloud-go/issues/3587)) ([3859a6f](https://www.github.com/googleapis/google-cloud-go/commit/3859a6ffc447e9c0b4ef231e2788fbbcfe48a94f)) +* **all:** auto-regenerate gapics ([#3598](https://www.github.com/googleapis/google-cloud-go/issues/3598)) ([7bdebad](https://www.github.com/googleapis/google-cloud-go/commit/7bdebadbe06774c94ab745dfef4ce58ce40a5582)) +* **appengine:** start generating apiv1 ([#3561](https://www.github.com/googleapis/google-cloud-go/issues/3561)) ([2b6a3b4](https://www.github.com/googleapis/google-cloud-go/commit/2b6a3b4609e389da418a83eb60a8ae3710d646d7)) +* **assuredworkloads:** updated google.cloud.assuredworkloads.v1beta1.AssuredWorkloadsService service. Clients can now create workloads with US_REGIONAL_ACCESS compliance regime ([7bdebad](https://www.github.com/googleapis/google-cloud-go/commit/7bdebadbe06774c94ab745dfef4ce58ce40a5582)) +* **binaryauthorization:** start generating apiv1beta1 ([#3562](https://www.github.com/googleapis/google-cloud-go/issues/3562)) ([56e18a6](https://www.github.com/googleapis/google-cloud-go/commit/56e18a64836ab9482528b212eb139f649f7a35c3)) +* **channel:** Add Pub/Sub endpoints for Cloud Channel API. ([9070c86](https://www.github.com/googleapis/google-cloud-go/commit/9070c86e2c69f9405d42fc0e6fe7afd4a256d8b8)) +* **cloudtasks:** introducing field: ListQueuesRequest.read_mask, GetQueueRequest.read_mask, Queue.task_ttl, Queue.tombstone_ttl, Queue.stats, Task.pull_message and introducing messages: QueueStats PullMessage docs: updates to max burst size description ([7bdebad](https://www.github.com/googleapis/google-cloud-go/commit/7bdebadbe06774c94ab745dfef4ce58ce40a5582)) +* **cloudtasks:** introducing fields: ListQueuesRequest.read_mask, GetQueueRequest.read_mask, Queue.task_ttl, Queue.tombstone_ttl, Queue.stats and introducing messages: QueueStats docs: updates to AppEngineHttpRequest description ([7bdebad](https://www.github.com/googleapis/google-cloud-go/commit/7bdebadbe06774c94ab745dfef4ce58ce40a5582)) +* **datalabeling:** start generating apiv1beta1 ([#3582](https://www.github.com/googleapis/google-cloud-go/issues/3582)) ([d8a7fee](https://www.github.com/googleapis/google-cloud-go/commit/d8a7feef51d3344fa7e258aba1d9fbdab56dadcf)) +* **dataqna:** start generating apiv1alpha ([#3586](https://www.github.com/googleapis/google-cloud-go/issues/3586)) ([24c5b8f](https://www.github.com/googleapis/google-cloud-go/commit/24c5b8f4f45f8cd8b3001b1ca5a8d80e9f3b39d5)) +* **dialogflow/cx:** Add new Experiment service docs: minor doc update on redact field in intent.proto and page.proto ([0959f27](https://www.github.com/googleapis/google-cloud-go/commit/0959f27e85efe94d39437ceef0ff62ddceb8e7a7)) +* **dialogflow/cx:** added support for test cases and agent validation ([7bdebad](https://www.github.com/googleapis/google-cloud-go/commit/7bdebadbe06774c94ab745dfef4ce58ce40a5582)) +* **dialogflow/cx:** added support for test cases and agent validation ([3859a6f](https://www.github.com/googleapis/google-cloud-go/commit/3859a6ffc447e9c0b4ef231e2788fbbcfe48a94f)) +* **dialogflow:** add C++ targets for DialogFlow ([959fde5](https://www.github.com/googleapis/google-cloud-go/commit/959fde5ab12f7aee206dd46022e3cad1bc3470f7)) +* **documentai:** start generating apiv1beta3 ([#3595](https://www.github.com/googleapis/google-cloud-go/issues/3595)) ([5ae21fa](https://www.github.com/googleapis/google-cloud-go/commit/5ae21fa1cfb8b8dacbcd0fc43eee430f7db63102)) +* **domains:** start generating apiv1beta1 ([#3632](https://www.github.com/googleapis/google-cloud-go/issues/3632)) ([b8ada6f](https://www.github.com/googleapis/google-cloud-go/commit/b8ada6f197e680d0bb26aa031e6431bc099a3149)) +* **godocfx:** include alt documentation link ([#3530](https://www.github.com/googleapis/google-cloud-go/issues/3530)) ([806cdd5](https://www.github.com/googleapis/google-cloud-go/commit/806cdd56fb6fdddd7a6c1354e55e0d1259bd6c8b)) +* **internal/gapicgen:** change commit formatting to match standard ([#3500](https://www.github.com/googleapis/google-cloud-go/issues/3500)) ([d1e3d46](https://www.github.com/googleapis/google-cloud-go/commit/d1e3d46c47c425581e2b149c07f8e27ffc373c7e)) +* **internal/godocfx:** xref function declarations ([#3615](https://www.github.com/googleapis/google-cloud-go/issues/3615)) ([2bdbb87](https://www.github.com/googleapis/google-cloud-go/commit/2bdbb87a682d799cf5e262a61a3ef1faf41151af)) +* **mediatranslation:** start generating apiv1beta1 ([#3636](https://www.github.com/googleapis/google-cloud-go/issues/3636)) ([4129469](https://www.github.com/googleapis/google-cloud-go/commit/412946966cf7f53c51deff1b1cc1a12d62ed0279)) +* **memcache:** start generating apiv1 ([#3579](https://www.github.com/googleapis/google-cloud-go/issues/3579)) ([eabf7cf](https://www.github.com/googleapis/google-cloud-go/commit/eabf7cfde7b3a3cc1b35c320ba52e07be9926359)) +* **networkconnectivity:** initial generation of apiv1alpha1 ([#3567](https://www.github.com/googleapis/google-cloud-go/issues/3567)) ([adf489a](https://www.github.com/googleapis/google-cloud-go/commit/adf489a536292e3196677621477eae0d52761e7f)) +* **orgpolicy:** start generating apiv2 ([#3652](https://www.github.com/googleapis/google-cloud-go/issues/3652)) ([c103847](https://www.github.com/googleapis/google-cloud-go/commit/c1038475779fda3589aa9659d4ad0b703036b531)) +* **osconfig/agentendpoint:** add ApplyConfigTask to AgentEndpoint API ([9070c86](https://www.github.com/googleapis/google-cloud-go/commit/9070c86e2c69f9405d42fc0e6fe7afd4a256d8b8)) +* **osconfig/agentendpoint:** add ApplyConfigTask to AgentEndpoint API ([9af529c](https://www.github.com/googleapis/google-cloud-go/commit/9af529c21e98b62c4617f7a7191c307659cf8bb8)) +* **recommender:** add bindings for folder/org type resources for protos in recommendations, insights and recommender_service to enable v1 api for folder/org ([7bdebad](https://www.github.com/googleapis/google-cloud-go/commit/7bdebadbe06774c94ab745dfef4ce58ce40a5582)) +* **recommender:** auto generated cl for enabling v1beta1 folder/org APIs and integration test ([7bdebad](https://www.github.com/googleapis/google-cloud-go/commit/7bdebadbe06774c94ab745dfef4ce58ce40a5582)) +* **resourcemanager:** start generating apiv2 ([#3575](https://www.github.com/googleapis/google-cloud-go/issues/3575)) ([93d0ebc](https://www.github.com/googleapis/google-cloud-go/commit/93d0ebceb4270351518a13958005bb68f0cace60)) +* **secretmanager:** added expire_time and ttl fields to Secret ([9974a80](https://www.github.com/googleapis/google-cloud-go/commit/9974a8017b5de8129a586f2404a23396caea0ee1)) +* **secretmanager:** added expire_time and ttl fields to Secret ([ac22beb](https://www.github.com/googleapis/google-cloud-go/commit/ac22beb9b90771b24c8b35db7587ad3f5c0a970e)) +* **servicecontrol:** start generating apiv1 ([#3644](https://www.github.com/googleapis/google-cloud-go/issues/3644)) ([f84938b](https://www.github.com/googleapis/google-cloud-go/commit/f84938bb4042a5629fd66bda42de028fd833648a)) +* **servicemanagement:** start generating apiv1 ([#3614](https://www.github.com/googleapis/google-cloud-go/issues/3614)) ([b96134f](https://www.github.com/googleapis/google-cloud-go/commit/b96134fe91c182237359000cd544af5fec60d7db)) + + +### Bug Fixes + +* **datacatalog:** Update PHP package name casing to match the PHP namespace in the proto files ([c7ecf0f](https://www.github.com/googleapis/google-cloud-go/commit/c7ecf0f3f454606b124e52d20af2545b2c68646f)) +* **internal/godocfx:** add TOC element for module root package ([#3599](https://www.github.com/googleapis/google-cloud-go/issues/3599)) ([1d6eb23](https://www.github.com/googleapis/google-cloud-go/commit/1d6eb238206fcf8815d88981527ef176851afd7a)) +* **profiler:** Force gax to retry in case of certificate errors ([#3178](https://www.github.com/googleapis/google-cloud-go/issues/3178)) ([35dcd72](https://www.github.com/googleapis/google-cloud-go/commit/35dcd725dcd03266ed7439de40c277376b38cd71)) + +## [0.75.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.74.0...v0.75.0) (2021-01-11) + + +### Features + +* **all:** auto-regenerate gapics , refs [#3514](https://www.github.com/googleapis/google-cloud-go/issues/3514) [#3501](https://www.github.com/googleapis/google-cloud-go/issues/3501) [#3497](https://www.github.com/googleapis/google-cloud-go/issues/3497) [#3455](https://www.github.com/googleapis/google-cloud-go/issues/3455) [#3448](https://www.github.com/googleapis/google-cloud-go/issues/3448) +* **channel:** start generating apiv1 ([#3517](https://www.github.com/googleapis/google-cloud-go/issues/3517)) ([2cf3b3c](https://www.github.com/googleapis/google-cloud-go/commit/2cf3b3cf7d99f2efd6868a710fad9e935fc87965)) + + +### Bug Fixes + +* **internal/gapicgen:** don't regen files that have been deleted ([#3471](https://www.github.com/googleapis/google-cloud-go/issues/3471)) ([112ca94](https://www.github.com/googleapis/google-cloud-go/commit/112ca9416cc8a2502b32547dc8d789655452f84a)) + ## [0.74.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.73.0...v0.74.0) (2020-12-10) diff --git a/vendor/cloud.google.com/go/README.md b/vendor/cloud.google.com/go/README.md index 1ffe670..ba024f5 100644 --- a/vendor/cloud.google.com/go/README.md +++ b/vendor/cloud.google.com/go/README.md @@ -1,6 +1,6 @@ # Google Cloud Client Libraries for Go -[![GoDoc](https://godoc.org/cloud.google.com/go?status.svg)](https://pkg.go.dev/cloud.google.com/go) +[![Go Reference](https://pkg.go.dev/badge/cloud.google.com/go.svg)](https://pkg.go.dev/cloud.google.com/go) Go packages for [Google Cloud Platform](https://cloud.google.com) services. @@ -25,8 +25,6 @@ To install the packages on your system, *do not clone the repo*. Instead: **NOTE:** Some of these packages are under development, and may occasionally make backwards-incompatible changes. -**NOTE:** Github repo is a mirror of [https://code.googlesource.com/gocloud](https://code.googlesource.com/gocloud). - ## Supported APIs | Google API | Status | Package | @@ -56,6 +54,7 @@ make backwards-incompatible changes. | [Monitoring][cloud-monitoring] | stable | [`cloud.google.com/go/monitoring/apiv3`](https://pkg.go.dev/cloud.google.com/go/monitoring/apiv3) | | [OS Login][cloud-oslogin] | stable | [`cloud.google.com/go/oslogin/apiv1`](https://pkg.go.dev/cloud.google.com/go/oslogin/apiv1) | | [Pub/Sub][cloud-pubsub] | stable | [`cloud.google.com/go/pubsub`](https://pkg.go.dev/cloud.google.com/go/pubsub) | +| [Pub/Sub Lite][cloud-pubsublite] | beta | [`cloud.google.com/go/pubsublite`](https://pkg.go.dev/cloud.google.com/go/pubsublite) | | [Phishing Protection][cloud-phishingprotection] | alpha | [`cloud.google.com/go/phishingprotection/apiv1beta1`](https://pkg.go.dev/cloud.google.com/go/phishingprotection/apiv1beta1) | | [reCAPTCHA Enterprise][cloud-recaptcha] | alpha | [`cloud.google.com/go/recaptchaenterprise/apiv1beta1`](https://pkg.go.dev/cloud.google.com/go/recaptchaenterprise/apiv1beta1) | | [Recommender][cloud-recommender] | beta | [`cloud.google.com/go/recommender/apiv1beta1`](https://pkg.go.dev/cloud.google.com/go/recommender/apiv1beta1) | @@ -152,6 +151,7 @@ for more information. [cloud-irm]: https://cloud.google.com/incident-response/docs/concepts [cloud-kms]: https://cloud.google.com/kms/ [cloud-pubsub]: https://cloud.google.com/pubsub/ +[cloud-pubsublite]: https://cloud.google.com/pubsub/lite [cloud-storage]: https://cloud.google.com/storage/ [cloud-language]: https://cloud.google.com/natural-language [cloud-logging]: https://cloud.google.com/logging/ diff --git a/vendor/cloud.google.com/go/doc.go b/vendor/cloud.google.com/go/doc.go index 0130d74..b667cc8 100644 --- a/vendor/cloud.google.com/go/doc.go +++ b/vendor/cloud.google.com/go/doc.go @@ -85,6 +85,20 @@ https://godoc.org/google.golang.org/grpc/grpclog for more information. For HTTP logging, set the GODEBUG environment variable to "http2debug=1" or "http2debug=2". +Inspecting errors + +Most of the errors returned by the generated clients can be converted into a +`grpc.Status`. Converting your errors to this type can be a useful to get +more information about what went wrong while debugging. + if err != { + if s, ok := status.FromError(err); ok { + log.Println(s.Message()) + for _, d := range s.Proto().Details { + log.Println(d) + } + } + } + Client Stability Clients in this repository are considered alpha or beta unless otherwise diff --git a/vendor/cloud.google.com/go/go.mod b/vendor/cloud.google.com/go/go.mod index e4e2434..4fa03ca 100644 --- a/vendor/cloud.google.com/go/go.mod +++ b/vendor/cloud.google.com/go/go.mod @@ -4,21 +4,20 @@ go 1.11 require ( cloud.google.com/go/storage v1.10.0 - github.com/golang/mock v1.4.4 - github.com/golang/protobuf v1.4.3 - github.com/google/go-cmp v0.5.4 + github.com/golang/mock v1.5.0 + github.com/golang/protobuf v1.5.1 + github.com/google/go-cmp v0.5.5 github.com/google/martian/v3 v3.1.0 - github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2 + github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5 github.com/googleapis/gax-go/v2 v2.0.5 github.com/jstemmer/go-junit-report v0.9.1 - go.opencensus.io v0.22.5 + go.opencensus.io v0.23.0 golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5 - golang.org/x/mod v0.4.0 // indirect - golang.org/x/net v0.0.0-20201209123823-ac852fbbde11 - golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5 - golang.org/x/text v0.3.4 - golang.org/x/tools v0.0.0-20201208233053-a543418bbed2 - google.golang.org/api v0.36.0 - google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc - google.golang.org/grpc v1.34.0 + golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4 + golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84 + golang.org/x/text v0.3.5 + golang.org/x/tools v0.1.0 + google.golang.org/api v0.43.0 + google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1 + google.golang.org/grpc v1.36.1 ) diff --git a/vendor/cloud.google.com/go/go.sum b/vendor/cloud.google.com/go/go.sum index 478b166..d0209b2 100644 --- a/vendor/cloud.google.com/go/go.sum +++ b/vendor/cloud.google.com/go/go.sum @@ -14,84 +14,65 @@ cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZ cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= -cloud.google.com/go/bigquery v1.0.1 h1:hL+ycaJpVE9M7nLoiXb/Pn10ENE2u+oddxbD8uu0ZVU= +cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= +cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= -cloud.google.com/go/bigquery v1.3.0 h1:sAbMqjY1PEQKZBWfbu6Y6bsupJ9c4QdHnzg/VvYTLcE= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= -cloud.google.com/go/bigquery v1.4.0 h1:xE3CPsOgttP4ACBePh79zTKALtXwn/Edhcr16R5hMWU= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= -cloud.google.com/go/bigquery v1.7.0 h1:a/O/bK/vWrYGOTFtH8di4rBxMZnmkjy+Y5LxpDwo+dA= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= -cloud.google.com/go/bigquery v1.8.0 h1:PQcPefKFdaIzjQFbiyOgAqyx8q5djaE7x9Sqe712DPA= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/datastore v1.0.0 h1:Kt+gOPPp2LEPWp8CSfxhsM8ik9CcyE/gYu+0r+RnZvM= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= -cloud.google.com/go/datastore v1.1.0 h1:/May9ojXjRkPBNVrq+oWLqmWCkr4OU5uRY29bu0mRyQ= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/pubsub v1.0.1 h1:W9tAK3E57P75u0XLLR82LZyw8VpAnhmyTOxW9qzmyj8= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= -cloud.google.com/go/pubsub v1.1.0 h1:9/vpR43S4aJaROxqQHQ3nH9lfyKKV0dC3vOmnw8ebQQ= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= -cloud.google.com/go/pubsub v1.2.0 h1:Lpy6hKgdcl7a3WGSfJIFmxmcdjSpP6OmBEfcOv1Y680= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= -cloud.google.com/go/pubsub v1.3.1 h1:ukjixP1wl0LpnZ6LWtZJ0mX5tBmjp1f8Sqer8Z2OMUU= cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= -cloud.google.com/go/storage v1.0.0 h1:VV2nUM3wwLLGh9lSABFgZMjInyUbJeaRSE64WuAIQ+4= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= -cloud.google.com/go/storage v1.5.0 h1:RPUcBvDeYgQFMfQu1eBMq6piD1SXmLH+vK3qjewZPus= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= -cloud.google.com/go/storage v1.6.0 h1:UDpwYIwla4jHGzZJaEJYx1tOejbgSoNqsAfHAUYe2r8= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= -cloud.google.com/go/storage v1.8.0 h1:86K1Gel7BQ9/WmNWn7dTKMvTLFzwtBe5FNqYbi9X35g= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0 h1:STgFzyU5/8miMl0//zKh2aQeTyeaUH3WN9bSUiJ09bA= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e h1:fY5BOSpyZCqRo5OhCuC+XN+r/bBCmeuuJtjz+bCNIf8= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.2.0 h1:28o5sBqPkBsMGnC6b4MvE2TzSr5/AT4c/1fLqVGIwlk= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.3.1 h1:qGJ6qTW+x6xX/my+8YUVl4WNpX9B7+/l2tRsHGZ7f2s= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= -github.com/golang/mock v1.4.0 h1:Rd1kQnQu0Hq3qvJppYSG0HtP+f5LPPUiDswTLiEegLg= github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.3 h1:GV+pQPG/EUUbkh47niozDcADz6go/dUwhVzdUQHIVRw= github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.4 h1:l75CXGRSwbaYNpl/Z2X1XIIAMSCquvXgpVZDhwEIJsc= github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/mock v1.5.0 h1:jlYHihg//f7RRwuPfptm04yp4s7O6Kw8EZiVYIGcH0g= +github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3 h1:gyjaxf+svBWX08ZjK86iN9geUJF0H6gp2IRKX6Nf6/I= github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= @@ -99,75 +80,59 @@ github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0 h1:oOuy+ugB+P/kBdUnG5QaMXSIyJ1q38wWSojYCb3z5VQ= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1 h1:ZFgWrT+bLgsYPirOnRfKLYJLvssAegOj/hgyMFdJZe0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c h1:964Od4U6p2jUkFxvCydnIczKteheJEzHRToSGK3Bnlw= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.1 h1:jAbXjIeW2ZSW2AwFxlGTDoc2CjI2XujLkV3ArsZFCvc= +github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0 h1:/QaMHBdZ26BB3SSst0Iwl10Epc+xhTquomWX0oZEB6w= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.1 h1:JFrFEBb2xKufg6XkJsJr+WbKb4FQlURi5RUcBveYu9k= github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.2 h1:X2ev0eStA3AbceY54o37/0PQ/UWqKEiiO2dKL5OPaFM= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.4 h1:L8R9j+yAqZuZjsqh/z+F1NCffTKKLShY6zXTItVIZ8M= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/martian/v3 v3.0.0 h1:pMen7vLs8nvgEYhywH3KDWJIJTeEr2ULsVWHWYHQyBs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/martian/v3 v3.1.0 h1:wCKgOCHuUEVfsaQLpPSJb7VdYCdTVZQAuOdYm1yc/60= github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57 h1:eqyIo2HjKhKe/mJzTG8n4VqvLXIOEG+SLdDqX7xGtkY= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20190515194954-54271f7e092f h1:Jnx61latede7zDD3DiiP4gmNz33uK0U5HDUaF0a/HVQ= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99 h1:Ak8CrdlwwXwAZxzS66vgPt4U8yUZX7JwLvVR58FN5jM= github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2 h1:HyOHhUtuB/Ruw/L5s5pG2D0kckkN2/IzBs9OClGHnHI= github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5 h1:zIaiqGYDQwa4HVx5wGRTXbx38Pqxjemn4BP98wpzpXo= +github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/gax-go/v2 v2.0.4 h1:hU4mGcQI4DaAYW+IbTun+2qEZVFxK0ySjQLTbS0VQKc= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6 h1:UDMh68UUwekSh5iP2OMhRRZJiiBccgV7axzUG8vi56c= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024 h1:rBMNdlhTLzJjJSDIjNEXX1Pz3Hmwmz91v+zycvx9PJc= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1 h1:6QPYqodiu3GuPL+7mfx+NwDdp2eTkp9IfEUpgAwUN0o= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -175,56 +140,45 @@ github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFR github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -go.opencensus.io v0.21.0 h1:mU6zScU4U1YAFPHEHYk+3JC4SY7JxgkqS10ZOSyksNg= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= -go.opencensus.io v0.22.0 h1:C9hSCOW830chIVkdja34wa6Ky+IzWllkUinR+BtRZd4= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.3 h1:8sGtKOrtQqkN1bp2AtX+misvLIlOmsEsNd+9NIcPEm8= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.4 h1:LYy1Hy3MJdrCdMwwzxA/dRok4ejH+RwNGbuoD9fCjto= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.5 h1:dntmOdLpSpHlVqbW5Eay97DelsZHe+55D+xC6i0dDS0= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= +go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4 h1:c2HOrn5iMezYjSlGPncknSEr/8x5LELb/ilJbXi9DEA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522 h1:OeRHuibLsmZkFj773W4LcfAGsSxJgfPONhr8cmO+eLA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= -golang.org/x/exp v0.0.0-20190829153037-c13cbed26979 h1:Agxu5KLo8o7Bb634SVDnhIfpTvxmzUwhbYAzBvXt6h4= golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= -golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6 h1:QE6XYQK6naiK1EPAe1g/ILLxN5RBoH5xkJk3CqlMI/Y= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f h1:hX65Cu3JDlGH3uEdK7I99Ii+9kjD6mvnnpfLdEAH0x4= golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190409202823-959b441ac422 h1:QzoH/1pFpZguR8NrRHLcO6jKqfv2zpuSqZLgdm7ZmjI= golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac h1:8R1esu+8QioDxo4E4mX6bFztO+dMTM49DNAaWfO5OeY= golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= -golang.org/x/lint v0.0.0-20200130185559-910be7a94367 h1:0IiAsCRByjO2QjX7ZPkw5oU9x+n1YqRL802rjC0c3Aw= golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b h1:Wh+f8QHJXR411sJR8/vRBTZ7YapZaRvUcLFFJhusH0k= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5 h1:2M3HP5CCK1Si9FQhwnzYhXdG6DXeebvUHFpre8QvbyI= golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= @@ -232,27 +186,22 @@ golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= -golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee h1:WG0RUwxtNT4qqaXX3DPA8zHFNm/D9xaBpxzHt1WcA/E= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.2.0 h1:KU7oHjnv3XNWfa5COkzUifxZmxp1TyI7ImMXqFxLwvQ= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.0 h1:8pl+sMODzuvGJkmj2W4kZihvVb5mKm8pB/X44PIQHv8= golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.1 h1:Kvvh58BN8Y9/lBi7hTekvtMpm07eUZ0ck5pRHpsMWrY= +golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c h1:uOCk1iQW6Vc18bnC13MfzScl+wdKBmM9Y9kU7Z83/lw= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859 h1:R/3boaszxrf1GEUWTVDzSKVwLmSJpwZ1yqXm8j0v2QI= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -263,62 +212,52 @@ golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5 h1:WQ8q63x+f/zpC8Ac1s9wLElVoHhm32p6tudrU72n1QA= golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200506145744-7e3656a0809f h1:QBjCr1Fz5kw158VqdE9JfI9cJnl/ymnJWAdMuinqL7Y= golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200625001655-4c5254603344 h1:vGXIOMxbNfDTk/aXCmfdLgkrSV+Z2tcbze+pEc3v5W4= golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200707034311-ab3426394381 h1:VXak5I6aEWmAXeQjA+QSZzlgNrpq9mjcfDemuexIKsU= golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200822124328-c89045814202 h1:VvcQYSHwXgi7W+TpUR6A9g6Up98WAHf3f/ulnJ62IyA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20201021035429-f5854403a974 h1:IX6qOQeG5uLjB/hjjwjedwfjND0hgjPMMyO1RoIXQNI= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201209123823-ac852fbbde11 h1:lwlPPsmjDKK0J6eG6xDWd5XPehI0R024zxjDnw3esPA= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4 h1:b0LrWgu8+q7z4J+0Y3Umo5q1dL7NXBkKBWkaVkAq17E= +golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421 h1:Wo7BWFiOk0QRFMLYMqJGFMd9CgUAcGx7V+qEg/h5IBI= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43 h1:ld7aEMNHoBnnDAX15v1T6z31v8HwR2A9FYOuAhWqkwc= golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58 h1:Mj83v+wSRNEar42a/MQgxk9X42TdEmrOl9i+y8WbxLo= golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5 h1:Lm4OryKCca1vehdsWogr9N4t7NfZxLbJoc/H0w4K4S4= golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84 h1:duBc5zuJsmJXYOVVE/6PxejI+N3AaCqKjtsoLn1Je5Q= +golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6 h1:bjcUS9ztw9kFmmIxJInhon/0Is3p+EHBKNgquIzo1OI= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e h1:vcxGaoTs7kV8m5Np9uUNQin4BrLOthgV7252N8V+FwY= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a h1:WXEvlFVvvGxCJLG6REjsT03iWnKLEWinaScsxF2Vm2o= golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208 h1:qwRHBd0NqMbJxfbotnDhm2ByMI1Shq4Y6oRJo21SGJA= golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9 h1:SQFwaSi55rU7vdNs9Yr0Z324VNlrF+0wMqRXT4St8ck= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b h1:ag/x1USPSsqHud38I9BAC88qdNLDHHtQ4mlgQIZPPNA= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0 h1:HyfiK1WMnHj5FXFXatD+Qs1A/xC2Run6RzeW1SyHxpc= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -328,62 +267,51 @@ golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae h1:/WDfKMnPU+m5M4xB+6x4kaepxRw6jWvR5iDRdvjHgy8= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd h1:xhmwyvizuTgC2qz7ZlMluP20uW+C3Rm0FD/WLDX8884= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200501052902-10377860bb8e h1:hq86ru83GdWTlfQFZGO4nZJTU4Bs2wfHl8oFHRaXsfc= golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25 h1:OKbAoGs4fGM5cPLlVQLZGYkFC8OnOfgo6tt0Smf9XhM= golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200523222454-059865788121 h1:rITEj+UZHYC927n8GT97eC3zrpzXdb/voyeOuVKS46o= golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200803210538-64077c9b5642 h1:B6caxRw+hozq68X2MY7jEpZh/cr4/aHLv9xU8Kkadrw= golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f h1:Fqb3ao1hUmOR3GkUOg/Y+BadLwykBIzs5q8Ez2SbHyc= golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f h1:+Nyd8tzPX9R7BWHguqsrbFdRx3WQ/1ib8I44HXV5yTA= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3 h1:kzM6+9dur93BcC2kVlYl34cHU+TYZLanmpSJHVMmL64= golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4 h1:EZ2mChiOa8udjfp6rRmswTbtZN/QzUQp4ptM4rnjHvc= +golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2 h1:z99zHgr7hKfrUcX/KsoJk5FJfjTceCKIp96+biqP4To= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.4 h1:0YWbFKbhXG/wIiuHDSKpS0Iy7FSA+u45VtBMfQcFTTc= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/time v0.0.0-20181108054448-85acf8d2951c h1:fqgJT0MGcGpPgpWU7VRdRjuArfcOvC4AoJmILihzhDg= +golang.org/x/text v0.3.5 h1:i6eZZ+zk0SOf0xgBpEpPD18qWcJda6q1sxt3S0kzyUQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20191024005414-555d28b269f0 h1:/5xXl8Y5W96D+TtHSlonuFqGHIWVuyCkGJLwGh9JJFs= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312170243-e65039ee4138 h1:H3uGjxCR/6Ds0Mjgyp7LMK81+LvmbvWWEnJhzk1Pi9E= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c h1:97SnQk1GYRXJgvwZ8fadnxDOWfKvkNQHH3CtZntPSrM= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0 h1:Dh6fw+p6FyRl5x/FvNswO1ji0lIGzm3KP8Y9VkS9PTE= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff h1:On1qIo75ByTwFJ4/W2bIqHcwJ9XAqtSWUs8GwRrIhtc= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -404,80 +332,62 @@ golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapK golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d h1:lzLdP95xJmMpwQ6LUHwrc5V7js93hTiY7gkznu0BgmY= golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88 h1:4j84u0sokprDu3IdSYHJMmou+YSLflMz8p7yAx/QI4g= golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d h1:szSOL78iTCl0LF1AMjhSWJj8tIM0KixlUUnBtYXsmd8= golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201208233053-a543418bbed2 h1:vEtypaVub6UvKkiXZ2xx9QIvp9TL7sI7xp7vdi2kezA= golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0 h1:po9/4sTYwZU9lPhi1tOrb4hCv3qrhiQ77LZfGa2OjwY= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= -google.golang.org/api v0.8.0 h1:VGGbLNyPF7dvYHhcUGYBBGCRDDK0RRJAI6KCvo0CL+E= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.9.0 h1:jbyannxz0XFD3zdjgrSUsaJbgpH4eTrkdhRChkHPfO8= google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.15.0 h1:yzlyyDW/J0w8yNFJIhiAJy4kq74S+1DOLdawELNxFMA= google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.17.0 h1:0q95w+VuFtv4PAx4PZVQdBMmYbaCHbnfKaEiDIcVyag= google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.22.0 h1:J1Pl9P2lnmYFSJvgs70DKELqHNh8CNWXPbud4njEE2s= google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.24.0 h1:cG03eaksBzhfSIk7JRGctfp3lanklcOM/mTGvow7BbQ= google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.29.0 h1:BaiDisFir8O4IJxvAabCGGkQ6yCJegNQqSVoYUNAnbk= google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= -google.golang.org/api v0.30.0 h1:yfrXXP61wVuLb0vBcG6qaOoIoqYEzOQS8jum51jkv2w= google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= -google.golang.org/api v0.35.0 h1:TBCmTTxUrRDA1iTctnK/fIeitxIZ+TQuaf0j29fmCGo= google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= -google.golang.org/api v0.36.0 h1:l2Nfbl2GPXdWorv+dT2XfinX2jOOw4zv1VhLstx+6rE= google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= +google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= +google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= +google.golang.org/api v0.43.0 h1:4sAyIHT6ZohtAQDoxws+ez7bROYmUlOVvsUscYCDTqA= +google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.5.0 h1:KxkO13IPW4Lslp2bz+KHP2E3gtFlrIGNThxkZQ3g+4c= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.1 h1:QzqyMA1tlu6CgqCDUtU9V+ZKhLFT2dkJuANu5QaxI3I= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/appengine v1.6.5 h1:tycE03LOZYQNhDpS27tcQdAzLCVMaj7QT2SXxebnpCM= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.6 h1:lMO5rYAqUxkmaj76jAkRUvt5JZgFymx/+Q5Mzfivuhc= google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19 h1:Lj2SnHtxkRGJDqnGaSjo+CCdIieEnwVazbOXILwQemk= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873 h1:nfPFGzJkUDX6uBmpN/pSw7MbOAWegH5QDQuoXFHedLg= google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64 h1:iKtrH9Y8mcbADOP0YFaEMth7OfuHY9xHOwNj4znpM1A= google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 h1:gSJIx1SDwno+2ElGhA4+qG2zF97qiUzTM+rQ0klBOcE= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51 h1:Ex1mq5jaJof+kRnYi3SlYJ8KKa9Ao3NHyIT5XJ1gF6U= google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= @@ -492,83 +402,69 @@ google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfG google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84 h1:pSLkPbrjnPyLDYUO2VM9mDLqo2V6CFBY84lFSZAfoi4= google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380 h1:xriR1EgvKfkKxIoU2uUvrMVl+H26359loFFUleSMXFo= google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c h1:Lq4llNryJoaVFRmvrIwC/ZHH7tNt4tUYIu8+se2aayY= google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc h1:BgQmMjmd7K1zov8j8lYULHW0WnmBGUIMp6+VDwlGErc= google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/grpc v1.19.0 h1:cfg4PD8YEdSFnm7qLV4++93WcmhH2nIUhMjhdCvl3j8= +google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1 h1:E7wSQBXkH3T3diucK+9Z1kjn4+/9tNG7lZLr75oOhh8= +google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.1 h1:Hz2g2wirWK7H0qIIhGIqRGTuMwTE8HEKFnDZZ7lm9NU= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.1 h1:j6XxA85m/6txkUCHvzlV5f+HBNl/1r5cZ2A/3IEFOO8= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.0 h1:rRYRFMVgRv6E0D70Skyfsr28tDXIuuPZyWGMPdMcnXg= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.1 h1:zvIju4sqAGvwKspUQOhwnpcqSbzi7/H6QomNNjTL4sk= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= -google.golang.org/grpc v1.29.1 h1:EC2SB8S04d2r73uptxphDSUG+kTKVgjRPF+N3xpxRB4= google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.0 h1:T7P4R73V3SSDPhH7WW7ATbfViLtmamH0DKrP3f9AuDI= google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.1 h1:SfXqXS5hkufcdZ/mHtYCh53P2b+92WQq/DZcKLgsFRs= google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.33.2 h1:EQyQC3sa8M+p6Ulc8yy9SWSS2GVwyRc83gAbG8lrl4o= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.34.0 h1:raiipEjMOIC/TO2AvyTxP25XFdLxNIBwzDh3FM3XztI= google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= +google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.1 h1:cmUfbeGKnz9+2DD/UYsMQXeqbHZqZDs4eQwW0sFOpBY= +google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0 h1:qdOKuR/EIArgaWNjetjgTzgVTAZ+S/WXVrq9HW9zimw= google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0 h1:cJv5/xdbk1NnMPR1VP9+HU6gupuG9MLBoH1r6RHZ2MY= google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyzM= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc h1:TnonUr8u3himcMY0vSh23jFOXA+cnucl1gB6EQTReBI= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.24.0 h1:UhZDfRO8JRQru4/+LlLE0BRKGF8L+PICnvYZmx/fEGA= google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= -google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a h1:/8zB6iBfHCl1qAnEAWwGPNrUvapuy6CPla1VM0k8hQw= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a h1:LJwr7TCTghdatWv40WobzlKXc9c4s8oGa7QKJUtHhWA= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -honnef.co/go/tools v0.0.1-2020.1.3 h1:sXmLre5bzIR6ypkjXCDI3jHPssRhc8KD/Ome589sc3U= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -rsc.io/binaryregexp v0.2.0 h1:HfqmD5MEmC0zvwBuF187nq9mdnXjXsSivRiXN7SmRkE= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -rsc.io/quote/v3 v3.1.0 h1:9JKUTTIUgS6kzR9mK1YuGKv6Nl+DijDNIc0ghT58FaY= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= -rsc.io/sampler v1.3.0 h1:7uVkIFmeBqHfdjD+gZwtXXI+RODJ2Wc4O7MPEh/QiW4= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/vendor/cloud.google.com/go/iam/credentials/apiv1/doc.go b/vendor/cloud.google.com/go/iam/credentials/apiv1/doc.go index 4d1447d..a271d6b 100644 --- a/vendor/cloud.google.com/go/iam/credentials/apiv1/doc.go +++ b/vendor/cloud.google.com/go/iam/credentials/apiv1/doc.go @@ -1,4 +1,4 @@ -// Copyright 2020 Google LLC +// Copyright 2021 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -49,7 +49,7 @@ import ( type clientHookParams struct{} type clientHook func(context.Context, clientHookParams) ([]option.ClientOption, error) -const versionClient = "20201210" +const versionClient = "20210402" func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context { out, _ := metadata.FromOutgoingContext(ctx) diff --git a/vendor/cloud.google.com/go/iam/credentials/apiv1/gapic_metadata.json b/vendor/cloud.google.com/go/iam/credentials/apiv1/gapic_metadata.json new file mode 100644 index 0000000..7e1edb5 --- /dev/null +++ b/vendor/cloud.google.com/go/iam/credentials/apiv1/gapic_metadata.json @@ -0,0 +1,38 @@ +{ + "schema": "1.0", + "comment": "This file maps proto services/RPCs to the corresponding library clients/methods.", + "language": "go", + "protoPackage": "google.iam.credentials.v1", + "libraryPackage": "cloud.google.com/go/iam/credentials/apiv1", + "services": { + "IAMCredentials": { + "clients": { + "grpc": { + "libraryClient": "IamCredentialsClient", + "rpcs": { + "GenerateAccessToken": { + "methods": [ + "GenerateAccessToken" + ] + }, + "GenerateIdToken": { + "methods": [ + "GenerateIdToken" + ] + }, + "SignBlob": { + "methods": [ + "SignBlob" + ] + }, + "SignJwt": { + "methods": [ + "SignJwt" + ] + } + } + } + } + } + } +} diff --git a/vendor/cloud.google.com/go/iam/credentials/apiv1/iam_credentials_client.go b/vendor/cloud.google.com/go/iam/credentials/apiv1/iam_credentials_client.go index 28672fb..1ccaf05 100644 --- a/vendor/cloud.google.com/go/iam/credentials/apiv1/iam_credentials_client.go +++ b/vendor/cloud.google.com/go/iam/credentials/apiv1/iam_credentials_client.go @@ -1,4 +1,4 @@ -// Copyright 2020 Google LLC +// Copyright 2021 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/cloud.google.com/go/internal/.repo-metadata-full.json b/vendor/cloud.google.com/go/internal/.repo-metadata-full.json index 4870766..ecb5f8e 100644 --- a/vendor/cloud.google.com/go/internal/.repo-metadata-full.json +++ b/vendor/cloud.google.com/go/internal/.repo-metadata-full.json @@ -1,15 +1,15 @@ { "cloud.google.com/go/accessapproval/apiv1": { "distribution_name": "cloud.google.com/go/accessapproval/apiv1", - "description": "", + "description": "Access Approval API", "language": "Go", "client_library_type": "generated", "docs_url": "https://pkg.go.dev/cloud.google.com/go/accessapproval/apiv1", - "release_level": "beta" + "release_level": "ga" }, "cloud.google.com/go/analytics/admin/apiv1alpha": { "distribution_name": "cloud.google.com/go/analytics/admin/apiv1alpha", - "description": "", + "description": "Google Analytics Admin API", "language": "Go", "client_library_type": "generated", "docs_url": "https://pkg.go.dev/cloud.google.com/go/analytics/admin/apiv1alpha", @@ -17,15 +17,31 @@ }, "cloud.google.com/go/analytics/data/apiv1alpha": { "distribution_name": "cloud.google.com/go/analytics/data/apiv1alpha", - "description": "", + "description": "Google Analytics Data API", "language": "Go", "client_library_type": "generated", "docs_url": "https://pkg.go.dev/cloud.google.com/go/analytics/data/apiv1alpha", "release_level": "alpha" }, + "cloud.google.com/go/apigateway/apiv1": { + "distribution_name": "cloud.google.com/go/apigateway/apiv1", + "description": "API Gateway API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/apigateway/apiv1", + "release_level": "ga" + }, + "cloud.google.com/go/appengine/apiv1": { + "distribution_name": "cloud.google.com/go/appengine/apiv1", + "description": "App Engine Admin API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/appengine/apiv1", + "release_level": "ga" + }, "cloud.google.com/go/area120/tables/apiv1alpha1": { "distribution_name": "cloud.google.com/go/area120/tables/apiv1alpha1", - "description": "", + "description": "Area120 Tables API", "language": "Go", "client_library_type": "generated", "docs_url": "https://pkg.go.dev/cloud.google.com/go/area120/tables/apiv1alpha1", @@ -37,7 +53,7 @@ "language": "Go", "client_library_type": "generated", "docs_url": "https://pkg.go.dev/cloud.google.com/go/artifactregistry/apiv1beta2", - "release_level": "beta" + "release_level": "ga" }, "cloud.google.com/go/asset/apiv1": { "distribution_name": "cloud.google.com/go/asset/apiv1", @@ -47,14 +63,6 @@ "docs_url": "https://pkg.go.dev/cloud.google.com/go/asset/apiv1", "release_level": "ga" }, - "cloud.google.com/go/asset/apiv1beta1": { - "distribution_name": "cloud.google.com/go/asset/apiv1beta1", - "description": "Cloud Asset API", - "language": "Go", - "client_library_type": "generated", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/asset/apiv1beta1", - "release_level": "beta" - }, "cloud.google.com/go/asset/apiv1p2beta1": { "distribution_name": "cloud.google.com/go/asset/apiv1p2beta1", "description": "Cloud Asset API", @@ -73,7 +81,7 @@ }, "cloud.google.com/go/assuredworkloads/apiv1beta1": { "distribution_name": "cloud.google.com/go/assuredworkloads/apiv1beta1", - "description": "", + "description": "Assured Workloads API", "language": "Go", "client_library_type": "generated", "docs_url": "https://pkg.go.dev/cloud.google.com/go/assuredworkloads/apiv1beta1", @@ -197,16 +205,32 @@ "language": "Go", "client_library_type": "generated", "docs_url": "https://pkg.go.dev/cloud.google.com/go/billing/budgets/apiv1", - "release_level": "beta" + "release_level": "ga" }, "cloud.google.com/go/billing/budgets/apiv1beta1": { "distribution_name": "cloud.google.com/go/billing/budgets/apiv1beta1", - "description": "", + "description": "Cloud Billing Budget API", "language": "Go", "client_library_type": "generated", "docs_url": "https://pkg.go.dev/cloud.google.com/go/billing/budgets/apiv1beta1", "release_level": "beta" }, + "cloud.google.com/go/binaryauthorization/apiv1beta1": { + "distribution_name": "cloud.google.com/go/binaryauthorization/apiv1beta1", + "description": "Binary Authorization API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/binaryauthorization/apiv1beta1", + "release_level": "beta" + }, + "cloud.google.com/go/channel/apiv1": { + "distribution_name": "cloud.google.com/go/channel/apiv1", + "description": "Cloud Channel API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/channel/apiv1", + "release_level": "ga" + }, "cloud.google.com/go/cloudbuild/apiv1/v2": { "distribution_name": "cloud.google.com/go/cloudbuild/apiv1/v2", "description": "Cloud Build API", @@ -271,6 +295,14 @@ "docs_url": "https://pkg.go.dev/cloud.google.com/go/datacatalog/apiv1beta1", "release_level": "beta" }, + "cloud.google.com/go/datalabeling/apiv1beta1": { + "distribution_name": "cloud.google.com/go/datalabeling/apiv1beta1", + "description": "Data Labeling API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/datalabeling/apiv1beta1", + "release_level": "beta" + }, "cloud.google.com/go/dataproc/apiv1": { "distribution_name": "cloud.google.com/go/dataproc/apiv1", "description": "Cloud Dataproc API", @@ -287,6 +319,14 @@ "docs_url": "https://pkg.go.dev/cloud.google.com/go/dataproc/apiv1beta2", "release_level": "beta" }, + "cloud.google.com/go/dataqna/apiv1alpha": { + "distribution_name": "cloud.google.com/go/dataqna/apiv1alpha", + "description": "Data QnA API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/dataqna/apiv1alpha", + "release_level": "alpha" + }, "cloud.google.com/go/datastore": { "distribution_name": "cloud.google.com/go/datastore", "description": "Cloud Datastore", @@ -319,6 +359,14 @@ "docs_url": "https://pkg.go.dev/cloud.google.com/go/dialogflow/apiv2", "release_level": "ga" }, + "cloud.google.com/go/dialogflow/cx/apiv3": { + "distribution_name": "cloud.google.com/go/dialogflow/cx/apiv3", + "description": "Dialogflow API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/dialogflow/cx/apiv3", + "release_level": "beta" + }, "cloud.google.com/go/dialogflow/cx/apiv3beta1": { "distribution_name": "cloud.google.com/go/dialogflow/cx/apiv3beta1", "description": "Dialogflow API", @@ -335,6 +383,30 @@ "docs_url": "https://pkg.go.dev/cloud.google.com/go/dlp/apiv2", "release_level": "ga" }, + "cloud.google.com/go/documentai/apiv1": { + "distribution_name": "cloud.google.com/go/documentai/apiv1", + "description": "Cloud Document AI API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/documentai/apiv1", + "release_level": "beta" + }, + "cloud.google.com/go/documentai/apiv1beta3": { + "distribution_name": "cloud.google.com/go/documentai/apiv1beta3", + "description": "Cloud Document AI API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/documentai/apiv1beta3", + "release_level": "beta" + }, + "cloud.google.com/go/domains/apiv1beta1": { + "distribution_name": "cloud.google.com/go/domains/apiv1beta1", + "description": "Cloud Domains API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/domains/apiv1beta1", + "release_level": "beta" + }, "cloud.google.com/go/errorreporting": { "distribution_name": "cloud.google.com/go/errorreporting", "description": "Cloud Error Reporting API", @@ -345,7 +417,7 @@ }, "cloud.google.com/go/errorreporting/apiv1beta1": { "distribution_name": "cloud.google.com/go/errorreporting/apiv1beta1", - "description": "Cloud Error Reporting API", + "description": "Error Reporting API", "language": "Go", "client_library_type": "generated", "docs_url": "https://pkg.go.dev/cloud.google.com/go/errorreporting/apiv1beta1", @@ -377,7 +449,7 @@ }, "cloud.google.com/go/functions/apiv1": { "distribution_name": "cloud.google.com/go/functions/apiv1", - "description": "", + "description": "Cloud Functions API", "language": "Go", "client_library_type": "generated", "docs_url": "https://pkg.go.dev/cloud.google.com/go/functions/apiv1", @@ -385,7 +457,7 @@ }, "cloud.google.com/go/gaming/apiv1": { "distribution_name": "cloud.google.com/go/gaming/apiv1", - "description": "", + "description": "Game Services API", "language": "Go", "client_library_type": "generated", "docs_url": "https://pkg.go.dev/cloud.google.com/go/gaming/apiv1", @@ -393,12 +465,20 @@ }, "cloud.google.com/go/gaming/apiv1beta": { "distribution_name": "cloud.google.com/go/gaming/apiv1beta", - "description": "", + "description": "Game Services API", "language": "Go", "client_library_type": "generated", "docs_url": "https://pkg.go.dev/cloud.google.com/go/gaming/apiv1beta", "release_level": "beta" }, + "cloud.google.com/go/gkehub/apiv1beta1": { + "distribution_name": "cloud.google.com/go/gkehub/apiv1beta1", + "description": "GKE Hub", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/gkehub/apiv1beta1", + "release_level": "beta" + }, "cloud.google.com/go/iam": { "distribution_name": "cloud.google.com/go/iam", "description": "Cloud IAM", @@ -477,8 +557,24 @@ "language": "Go", "client_library_type": "generated", "docs_url": "https://pkg.go.dev/cloud.google.com/go/managedidentities/apiv1", + "release_level": "ga" + }, + "cloud.google.com/go/mediatranslation/apiv1beta1": { + "distribution_name": "cloud.google.com/go/mediatranslation/apiv1beta1", + "description": "Media Translation API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/mediatranslation/apiv1beta1", "release_level": "beta" }, + "cloud.google.com/go/memcache/apiv1": { + "distribution_name": "cloud.google.com/go/memcache/apiv1", + "description": "Cloud Memorystore for Memcached API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/memcache/apiv1", + "release_level": "ga" + }, "cloud.google.com/go/memcache/apiv1beta2": { "distribution_name": "cloud.google.com/go/memcache/apiv1beta2", "description": "Cloud Memorystore for Memcached API", @@ -487,6 +583,22 @@ "docs_url": "https://pkg.go.dev/cloud.google.com/go/memcache/apiv1beta2", "release_level": "beta" }, + "cloud.google.com/go/metastore/apiv1alpha": { + "distribution_name": "cloud.google.com/go/metastore/apiv1alpha", + "description": "Dataproc Metastore API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/metastore/apiv1alpha", + "release_level": "alpha" + }, + "cloud.google.com/go/metastore/apiv1beta": { + "distribution_name": "cloud.google.com/go/metastore/apiv1beta", + "description": "Dataproc Metastore API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/metastore/apiv1beta", + "release_level": "beta" + }, "cloud.google.com/go/monitoring/apiv3/v2": { "distribution_name": "cloud.google.com/go/monitoring/apiv3/v2", "description": "Cloud Monitoring API", @@ -497,12 +609,20 @@ }, "cloud.google.com/go/monitoring/dashboard/apiv1": { "distribution_name": "cloud.google.com/go/monitoring/dashboard/apiv1", - "description": "", + "description": "Cloud Monitoring API", "language": "Go", "client_library_type": "generated", "docs_url": "https://pkg.go.dev/cloud.google.com/go/monitoring/dashboard/apiv1", "release_level": "ga" }, + "cloud.google.com/go/networkconnectivity/apiv1alpha1": { + "distribution_name": "cloud.google.com/go/networkconnectivity/apiv1alpha1", + "description": "Network Connectivity API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/networkconnectivity/apiv1alpha1", + "release_level": "alpha" + }, "cloud.google.com/go/notebooks/apiv1beta1": { "distribution_name": "cloud.google.com/go/notebooks/apiv1beta1", "description": "Notebooks API", @@ -511,6 +631,14 @@ "docs_url": "https://pkg.go.dev/cloud.google.com/go/notebooks/apiv1beta1", "release_level": "beta" }, + "cloud.google.com/go/orgpolicy/apiv2": { + "distribution_name": "cloud.google.com/go/orgpolicy/apiv2", + "description": "Organization Policy API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/orgpolicy/apiv2", + "release_level": "ga" + }, "cloud.google.com/go/osconfig/agentendpoint/apiv1": { "distribution_name": "cloud.google.com/go/osconfig/agentendpoint/apiv1", "description": "OS Config API", @@ -601,7 +729,7 @@ }, "cloud.google.com/go/pubsublite/apiv1": { "distribution_name": "cloud.google.com/go/pubsublite/apiv1", - "description": "", + "description": "Pub/Sub Lite API", "language": "Go", "client_library_type": "generated", "docs_url": "https://pkg.go.dev/cloud.google.com/go/pubsublite/apiv1", @@ -623,6 +751,14 @@ "docs_url": "https://pkg.go.dev/cloud.google.com/go/recaptchaenterprise/apiv1beta1", "release_level": "beta" }, + "cloud.google.com/go/recommendationengine/apiv1beta1": { + "distribution_name": "cloud.google.com/go/recommendationengine/apiv1beta1", + "description": "Recommendations AI", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/recommendationengine/apiv1beta1", + "release_level": "beta" + }, "cloud.google.com/go/recommender/apiv1": { "distribution_name": "cloud.google.com/go/recommender/apiv1", "description": "Recommender API", @@ -655,6 +791,30 @@ "docs_url": "https://pkg.go.dev/cloud.google.com/go/redis/apiv1beta1", "release_level": "beta" }, + "cloud.google.com/go/resourcemanager/apiv2": { + "distribution_name": "cloud.google.com/go/resourcemanager/apiv2", + "description": "Cloud Resource Manager API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/resourcemanager/apiv2", + "release_level": "ga" + }, + "cloud.google.com/go/resourcesettings/apiv1": { + "distribution_name": "cloud.google.com/go/resourcesettings/apiv1", + "description": "Resource Settings API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/resourcesettings/apiv1", + "release_level": "beta" + }, + "cloud.google.com/go/retail/apiv2": { + "distribution_name": "cloud.google.com/go/retail/apiv2", + "description": "Retail API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/retail/apiv2", + "release_level": "ga" + }, "cloud.google.com/go/rpcreplay": { "distribution_name": "cloud.google.com/go/rpcreplay", "description": "RPC Replay", @@ -697,7 +857,7 @@ }, "cloud.google.com/go/security/privateca/apiv1beta1": { "distribution_name": "cloud.google.com/go/security/privateca/apiv1beta1", - "description": "", + "description": "Certificate Authority API", "language": "Go", "client_library_type": "generated", "docs_url": "https://pkg.go.dev/cloud.google.com/go/security/privateca/apiv1beta1", @@ -735,13 +895,21 @@ "docs_url": "https://pkg.go.dev/cloud.google.com/go/securitycenter/settings/apiv1beta1", "release_level": "beta" }, + "cloud.google.com/go/servicecontrol/apiv1": { + "distribution_name": "cloud.google.com/go/servicecontrol/apiv1", + "description": "Service Control API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/servicecontrol/apiv1", + "release_level": "ga" + }, "cloud.google.com/go/servicedirectory/apiv1": { "distribution_name": "cloud.google.com/go/servicedirectory/apiv1", "description": "Service Directory API", "language": "Go", "client_library_type": "generated", "docs_url": "https://pkg.go.dev/cloud.google.com/go/servicedirectory/apiv1", - "release_level": "beta" + "release_level": "ga" }, "cloud.google.com/go/servicedirectory/apiv1beta1": { "distribution_name": "cloud.google.com/go/servicedirectory/apiv1beta1", @@ -751,6 +919,14 @@ "docs_url": "https://pkg.go.dev/cloud.google.com/go/servicedirectory/apiv1beta1", "release_level": "beta" }, + "cloud.google.com/go/servicemanagement/apiv1": { + "distribution_name": "cloud.google.com/go/servicemanagement/apiv1", + "description": "Service Management API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/servicemanagement/apiv1", + "release_level": "ga" + }, "cloud.google.com/go/spanner": { "distribution_name": "cloud.google.com/go/spanner", "description": "Cloud Spanner", @@ -809,7 +985,7 @@ }, "cloud.google.com/go/talent/apiv4": { "distribution_name": "cloud.google.com/go/talent/apiv4", - "description": "", + "description": "Cloud Talent Solution API", "language": "Go", "client_library_type": "generated", "docs_url": "https://pkg.go.dev/cloud.google.com/go/talent/apiv4", @@ -831,14 +1007,6 @@ "docs_url": "https://pkg.go.dev/cloud.google.com/go/texttospeech/apiv1", "release_level": "ga" }, - "cloud.google.com/go/trace": { - "distribution_name": "cloud.google.com/go/trace", - "description": "Stackdriver Trace", - "language": "Go", - "client_library_type": "manual", - "docs_url": "https://pkg.go.dev/cloud.google.com/go/trace", - "release_level": "ga" - }, "cloud.google.com/go/trace/apiv1": { "distribution_name": "cloud.google.com/go/trace/apiv1", "description": "Stackdriver Trace API", @@ -865,7 +1033,7 @@ }, "cloud.google.com/go/video/transcoder/apiv1beta1": { "distribution_name": "cloud.google.com/go/video/transcoder/apiv1beta1", - "description": "", + "description": "Transcoder API", "language": "Go", "client_library_type": "generated", "docs_url": "https://pkg.go.dev/cloud.google.com/go/video/transcoder/apiv1beta1", @@ -925,11 +1093,11 @@ "language": "Go", "client_library_type": "generated", "docs_url": "https://pkg.go.dev/cloud.google.com/go/websecurityscanner/apiv1", - "release_level": "beta" + "release_level": "ga" }, "cloud.google.com/go/workflows/apiv1beta": { "distribution_name": "cloud.google.com/go/workflows/apiv1beta", - "description": "", + "description": "Workflows API", "language": "Go", "client_library_type": "generated", "docs_url": "https://pkg.go.dev/cloud.google.com/go/workflows/apiv1beta", @@ -937,7 +1105,7 @@ }, "cloud.google.com/go/workflows/executions/apiv1beta": { "distribution_name": "cloud.google.com/go/workflows/executions/apiv1beta", - "description": "", + "description": "Workflow Executions API", "language": "Go", "client_library_type": "generated", "docs_url": "https://pkg.go.dev/cloud.google.com/go/workflows/executions/apiv1beta", diff --git a/vendor/cloud.google.com/go/storage/CHANGES.md b/vendor/cloud.google.com/go/storage/CHANGES.md index f6d57be..4c122dc 100644 --- a/vendor/cloud.google.com/go/storage/CHANGES.md +++ b/vendor/cloud.google.com/go/storage/CHANGES.md @@ -1,5 +1,33 @@ # Changes +## v1.14.0 + +- Updates to various dependencies. + +## [1.13.0](https://www.github.com/googleapis/google-cloud-go/compare/storage/v1.12.0...v1.13.0) (2021-02-03) + + +### Features + +* **storage:** add missing StorageClass in BucketAttrsToUpdate ([#3038](https://www.github.com/googleapis/google-cloud-go/issues/3038)) ([2fa1b72](https://www.github.com/googleapis/google-cloud-go/commit/2fa1b727f8a7b20aa62fe0990530744f6c109be0)) +* **storage:** add projection parameter for BucketHandle.Objects() ([#3549](https://www.github.com/googleapis/google-cloud-go/issues/3549)) ([9b9c3dc](https://www.github.com/googleapis/google-cloud-go/commit/9b9c3dce3ee10af5b6c4d070821bf47a861efd5b)) + + +### Bug Fixes + +* **storage:** fix endpoint selection logic ([#3172](https://www.github.com/googleapis/google-cloud-go/issues/3172)) ([99edf0d](https://www.github.com/googleapis/google-cloud-go/commit/99edf0d211a9e617f2586fbc83b6f9630da3c537)) + +## v1.12.0 +- V4 signed URL fixes: + - Fix encoding of spaces in query parameters. + - Add fields that were missing from PostPolicyV4 policy conditions. +- Fix Query to correctly list prefixes as well as objects when SetAttrSelection + is used. + +## v1.11.0 +- Add support for CustomTime and NoncurrentTime object lifecycle management + features. + ## v1.10.0 - Bump dependency on google.golang.org/api to capture changes to retry logic which will make retries on writes more resilient. diff --git a/vendor/cloud.google.com/go/storage/README.md b/vendor/cloud.google.com/go/storage/README.md index a2253c4..13c89ea 100644 --- a/vendor/cloud.google.com/go/storage/README.md +++ b/vendor/cloud.google.com/go/storage/README.md @@ -1,8 +1,8 @@ -## Cloud Storage [![GoDoc](https://godoc.org/cloud.google.com/go/storage?status.svg)](https://godoc.org/cloud.google.com/go/storage) +## Cloud Storage [![Go Reference](https://pkg.go.dev/badge/cloud.google.com/go/storage.svg)](https://pkg.go.dev/cloud.google.com/go/storage) - [About Cloud Storage](https://cloud.google.com/storage/) - [API documentation](https://cloud.google.com/storage/docs) -- [Go client documentation](https://godoc.org/cloud.google.com/go/storage) +- [Go client documentation](https://pkg.go.dev/cloud.google.com/go/storage) - [Complete sample programs](https://github.com/GoogleCloudPlatform/golang-samples/tree/master/storage) ### Example Usage @@ -29,4 +29,4 @@ body, err := ioutil.ReadAll(rc) if err != nil { log.Fatal(err) } -``` \ No newline at end of file +``` diff --git a/vendor/cloud.google.com/go/storage/bucket.go b/vendor/cloud.google.com/go/storage/bucket.go index 4784826..7b1757b 100644 --- a/vendor/cloud.google.com/go/storage/bucket.go +++ b/vendor/cloud.google.com/go/storage/bucket.go @@ -134,7 +134,7 @@ func (b *BucketHandle) DefaultObjectACL() *ACLHandle { // // name must consist entirely of valid UTF-8-encoded runes. The full specification // for valid object names can be found at: -// https://cloud.google.com/storage/docs/bucket-naming +// https://cloud.google.com/storage/docs/naming-objects func (b *BucketHandle) Object(name string) *ObjectHandle { return &ObjectHandle{ c: b.c, @@ -389,7 +389,8 @@ type RetentionPolicy struct { } const ( - // RFC3339 date with only the date segment, used for CreatedBefore in LifecycleRule. + // RFC3339 timestamp with only the date segment, used for CreatedBefore, + // CustomTimeBefore, and NoncurrentTimeBefore in LifecycleRule. rfc3339Date = "2006-01-02" // DeleteAction is a lifecycle action that deletes a live and/or archived @@ -455,6 +456,21 @@ type LifecycleCondition struct { // the specified date in UTC. CreatedBefore time.Time + // CustomTimeBefore is the CustomTime metadata field of the object. This + // condition is satisfied when an object's CustomTime timestamp is before + // midnight of the specified date in UTC. + // + // This condition can only be satisfied if CustomTime has been set. + CustomTimeBefore time.Time + + // DaysSinceCustomTime is the days elapsed since the CustomTime date of the + // object. This condition can only be satisfied if CustomTime has been set. + DaysSinceCustomTime int64 + + // DaysSinceNoncurrentTime is the days elapsed since the noncurrent timestamp + // of the object. This condition is relevant only for versioned objects. + DaysSinceNoncurrentTime int64 + // Liveness specifies the object's liveness. Relevant only for versioned objects Liveness Liveness @@ -464,6 +480,13 @@ type LifecycleCondition struct { // Values include "STANDARD", "NEARLINE", "COLDLINE" and "ARCHIVE". MatchesStorageClasses []string + // NoncurrentTimeBefore is the noncurrent timestamp of the object. This + // condition is satisfied when an object's noncurrent timestamp is before + // midnight of the specified date in UTC. + // + // This condition is relevant only for versioned objects. + NoncurrentTimeBefore time.Time + // NumNewerVersions is the condition matching objects with a number of newer versions. // // If the value is N, this condition is satisfied when there are at least N @@ -638,6 +661,14 @@ type BucketAttrsToUpdate struct { // for more information. UniformBucketLevelAccess *UniformBucketLevelAccess + // StorageClass is the default storage class of the bucket. This defines + // how objects in the bucket are stored and determines the SLA + // and the cost of storage. Typical values are "STANDARD", "NEARLINE", + // "COLDLINE" and "ARCHIVE". Defaults to "STANDARD". + // See https://cloud.google.com/storage/docs/storage-classes for all + // valid values. + StorageClass string + // If set, updates the retention policy of the bucket. Using // RetentionPolicy.RetentionPeriod = 0 will delete the existing policy. // @@ -778,6 +809,7 @@ func (ua *BucketAttrsToUpdate) toRawBucket() *raw.Bucket { rb.DefaultObjectAcl = nil rb.ForceSendFields = append(rb.ForceSendFields, "DefaultObjectAcl") } + rb.StorageClass = ua.StorageClass if ua.setLabels != nil || ua.deleteLabels != nil { rb.Labels = map[string]string{} for k, v := range ua.setLabels { @@ -946,9 +978,11 @@ func toRawLifecycle(l Lifecycle) *raw.BucketLifecycle { StorageClass: r.Action.StorageClass, }, Condition: &raw.BucketLifecycleRuleCondition{ - Age: r.Condition.AgeInDays, - MatchesStorageClass: r.Condition.MatchesStorageClasses, - NumNewerVersions: r.Condition.NumNewerVersions, + Age: r.Condition.AgeInDays, + DaysSinceCustomTime: r.Condition.DaysSinceCustomTime, + DaysSinceNoncurrentTime: r.Condition.DaysSinceNoncurrentTime, + MatchesStorageClass: r.Condition.MatchesStorageClasses, + NumNewerVersions: r.Condition.NumNewerVersions, }, } @@ -964,6 +998,12 @@ func toRawLifecycle(l Lifecycle) *raw.BucketLifecycle { if !r.Condition.CreatedBefore.IsZero() { rr.Condition.CreatedBefore = r.Condition.CreatedBefore.Format(rfc3339Date) } + if !r.Condition.CustomTimeBefore.IsZero() { + rr.Condition.CustomTimeBefore = r.Condition.CustomTimeBefore.Format(rfc3339Date) + } + if !r.Condition.NoncurrentTimeBefore.IsZero() { + rr.Condition.NoncurrentTimeBefore = r.Condition.NoncurrentTimeBefore.Format(rfc3339Date) + } rl.Rule = append(rl.Rule, rr) } return &rl @@ -981,9 +1021,11 @@ func toLifecycle(rl *raw.BucketLifecycle) Lifecycle { StorageClass: rr.Action.StorageClass, }, Condition: LifecycleCondition{ - AgeInDays: rr.Condition.Age, - MatchesStorageClasses: rr.Condition.MatchesStorageClass, - NumNewerVersions: rr.Condition.NumNewerVersions, + AgeInDays: rr.Condition.Age, + DaysSinceCustomTime: rr.Condition.DaysSinceCustomTime, + DaysSinceNoncurrentTime: rr.Condition.DaysSinceNoncurrentTime, + MatchesStorageClasses: rr.Condition.MatchesStorageClass, + NumNewerVersions: rr.Condition.NumNewerVersions, }, } @@ -998,6 +1040,12 @@ func toLifecycle(rl *raw.BucketLifecycle) Lifecycle { if rr.Condition.CreatedBefore != "" { r.Condition.CreatedBefore, _ = time.Parse(rfc3339Date, rr.Condition.CreatedBefore) } + if rr.Condition.CustomTimeBefore != "" { + r.Condition.CustomTimeBefore, _ = time.Parse(rfc3339Date, rr.Condition.CustomTimeBefore) + } + if rr.Condition.NoncurrentTimeBefore != "" { + r.Condition.NoncurrentTimeBefore, _ = time.Parse(rfc3339Date, rr.Condition.NoncurrentTimeBefore) + } l.Rules = append(l.Rules, r) } return l @@ -1091,8 +1139,9 @@ func toUniformBucketLevelAccess(b *raw.BucketIamConfiguration) UniformBucketLeve } } -// Objects returns an iterator over the objects in the bucket that match the Query q. -// If q is nil, no filtering is done. +// Objects returns an iterator over the objects in the bucket that match the +// Query q. If q is nil, no filtering is done. Objects will be iterated over +// lexicographically by name. // // Note: The returned iterator is not safe for concurrent operations without explicit synchronization. func (b *BucketHandle) Objects(ctx context.Context, q *Query) *ObjectIterator { @@ -1131,6 +1180,13 @@ func (it *ObjectIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } // there are no more results. Once Next returns iterator.Done, all subsequent // calls will return iterator.Done. // +// In addition, if Next returns an error other than iterator.Done, all +// subsequent calls will return the same error. To continue iteration, a new +// `ObjectIterator` must be created. Since objects are ordered lexicographically +// by name, `Query.StartOffset` can be used to create a new iterator which will +// start at the desired place. See +// https://pkg.go.dev/cloud.google.com/go/storage?tab=doc#hdr-Listing_objects. +// // If Query.Delimiter is non-empty, some of the ObjectAttrs returned by Next will // have a non-empty Prefix field, and a zero value for all other fields. These // represent prefixes. @@ -1148,9 +1204,15 @@ func (it *ObjectIterator) Next() (*ObjectAttrs, error) { func (it *ObjectIterator) fetch(pageSize int, pageToken string) (string, error) { req := it.bucket.c.raw.Objects.List(it.bucket.name) setClientHeader(req.Header()) - req.Projection("full") + projection := it.query.Projection + if projection == ProjectionDefault { + projection = ProjectionFull + } + req.Projection(projection.String()) req.Delimiter(it.query.Delimiter) req.Prefix(it.query.Prefix) + req.StartOffset(it.query.StartOffset) + req.EndOffset(it.query.EndOffset) req.Versions(it.query.Versions) if len(it.query.fieldSelection) > 0 { req.Fields("nextPageToken", googleapi.Field(it.query.fieldSelection)) diff --git a/vendor/cloud.google.com/go/storage/doc.go b/vendor/cloud.google.com/go/storage/doc.go index 614ea11..750e183 100644 --- a/vendor/cloud.google.com/go/storage/doc.go +++ b/vendor/cloud.google.com/go/storage/doc.go @@ -39,7 +39,9 @@ To start working with this package, create a client: // TODO: Handle error. } -The client will use your default application credentials. +The client will use your default application credentials. Clients should be +reused instead of created as needed. The methods of Client are safe for +concurrent use by multiple goroutines. If you only wish to access public data, you can create an unauthenticated client with @@ -136,6 +138,17 @@ Listing objects in a bucket is done with the Bucket.Objects method: names = append(names, attrs.Name) } +Objects are listed lexicographically by name. To filter objects +lexicographically, Query.StartOffset and/or Query.EndOffset can be used: + + query := &storage.Query{ + Prefix: "", + StartOffset: "bar/", // Only list objects lexicographically >= "bar/" + EndOffset: "foo/", // Only list objects lexicographically < "foo/" + } + + // ... as before + If only a subset of object attributes is needed when listing, specifying this subset using Query.SetAttrSelection may speed up the listing process: diff --git a/vendor/cloud.google.com/go/storage/go.mod b/vendor/cloud.google.com/go/storage/go.mod index 2eb6df3..3178878 100644 --- a/vendor/cloud.google.com/go/storage/go.mod +++ b/vendor/cloud.google.com/go/storage/go.mod @@ -3,16 +3,15 @@ module cloud.google.com/go/storage go 1.11 require ( - cloud.google.com/go v0.57.0 - cloud.google.com/go/bigquery v1.8.0 // indirect - github.com/golang/protobuf v1.4.2 - github.com/google/go-cmp v0.4.1 + cloud.google.com/go v0.75.0 + github.com/golang/protobuf v1.4.3 + github.com/google/go-cmp v0.5.4 github.com/googleapis/gax-go/v2 v2.0.5 - golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2 // indirect - golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d - golang.org/x/sys v0.0.0-20200523222454-059865788121 // indirect - golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2 // indirect - google.golang.org/api v0.28.0 - google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790 - google.golang.org/grpc v1.29.1 + golang.org/x/mod v0.4.1 // indirect + golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99 + golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073 // indirect + golang.org/x/tools v0.1.0 // indirect + google.golang.org/api v0.40.0 + google.golang.org/genproto v0.0.0-20210226172003-ab064af71705 + google.golang.org/grpc v1.35.0 ) diff --git a/vendor/cloud.google.com/go/storage/go.sum b/vendor/cloud.google.com/go/storage/go.sum index 5d3fca5..5f4f16c 100644 --- a/vendor/cloud.google.com/go/storage/go.sum +++ b/vendor/cloud.google.com/go/storage/go.sum @@ -4,49 +4,37 @@ cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSR cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= -cloud.google.com/go v0.46.3 h1:AVXDdKsrtX33oR9fbCMu/+c1o8Ofjq6Ku/MInaLVg5Y= cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= -cloud.google.com/go v0.52.0 h1:GGslhk/BU052LPlnI1vpp3fcbUs+hQ3E+Doti/3/vF8= cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= -cloud.google.com/go v0.53.0 h1:MZQCQQaRwOrAcuKjiHWHrgKykt4fZyuwF2dtiG3fGW8= cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= -cloud.google.com/go v0.54.0 h1:3ithwDMr7/3vpAMXiH+ZQnYbuIsh+OPhUPMFC9enmn0= cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= -cloud.google.com/go v0.56.0 h1:WRz29PgAsVEyPSDHyk+0fpEkwEFyfhHn+JbksT6gIL4= cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= -cloud.google.com/go v0.57.0 h1:EpMNVUorLiZIELdMZbCYX/ByTFCdoYopYAGxaGVz9ms= cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= -cloud.google.com/go/bigquery v1.0.1 h1:hL+ycaJpVE9M7nLoiXb/Pn10ENE2u+oddxbD8uu0ZVU= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= +cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go v0.75.0 h1:XgtDnVJRCPEUG21gjFiRPz4zI1Mjg16R+NYQjfmU4XY= +cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= -cloud.google.com/go/bigquery v1.3.0 h1:sAbMqjY1PEQKZBWfbu6Y6bsupJ9c4QdHnzg/VvYTLcE= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= -cloud.google.com/go/bigquery v1.4.0 h1:xE3CPsOgttP4ACBePh79zTKALtXwn/Edhcr16R5hMWU= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= -cloud.google.com/go/bigquery v1.5.0 h1:K2NyuHRuv15ku6eUpe0DQk5ZykPMnSOnvuVf6IHcjaE= cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= -cloud.google.com/go/bigquery v1.7.0 h1:a/O/bK/vWrYGOTFtH8di4rBxMZnmkjy+Y5LxpDwo+dA= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= -cloud.google.com/go/bigquery v1.8.0 h1:PQcPefKFdaIzjQFbiyOgAqyx8q5djaE7x9Sqe712DPA= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/datastore v1.0.0 h1:Kt+gOPPp2LEPWp8CSfxhsM8ik9CcyE/gYu+0r+RnZvM= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= -cloud.google.com/go/datastore v1.1.0 h1:/May9ojXjRkPBNVrq+oWLqmWCkr4OU5uRY29bu0mRyQ= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/pubsub v1.0.1 h1:W9tAK3E57P75u0XLLR82LZyw8VpAnhmyTOxW9qzmyj8= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= -cloud.google.com/go/pubsub v1.1.0 h1:9/vpR43S4aJaROxqQHQ3nH9lfyKKV0dC3vOmnw8ebQQ= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= -cloud.google.com/go/pubsub v1.2.0 h1:Lpy6hKgdcl7a3WGSfJIFmxmcdjSpP6OmBEfcOv1Y680= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= -cloud.google.com/go/pubsub v1.3.1 h1:ukjixP1wl0LpnZ6LWtZJ0mX5tBmjp1f8Sqer8Z2OMUU= cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= @@ -55,18 +43,20 @@ github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5P github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7 h1:5ZkaAPbicIKTF2I64qf5Fh8Aa83Q/dnOafMYV0OMwjA= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -76,53 +66,58 @@ github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFU github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3 h1:gyjaxf+svBWX08ZjK86iN9geUJF0H6gp2IRKX6Nf6/I= github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.4 h1:87PNWwrRvUSnqS4dlcBU/ftvOIBep4sYuBLlh6rX2wk= github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.5 h1:F768QJ1E9tib+q5Sc8MkdJi1RxLTbRcTf8LJV56aRls= github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0 h1:oOuy+ugB+P/kBdUnG5QaMXSIyJ1q38wWSojYCb3z5VQ= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1 h1:ZFgWrT+bLgsYPirOnRfKLYJLvssAegOj/hgyMFdJZe0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.4.1 h1:/exdXoGamhu5ONeUJH0deniYLWYvQwW66yvlfiiKTu0= github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4 h1:L8R9j+yAqZuZjsqh/z+F1NCffTKKLShY6zXTItVIZ8M= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.1.0 h1:wCKgOCHuUEVfsaQLpPSJb7VdYCdTVZQAuOdYm1yc/60= +github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024 h1:rBMNdlhTLzJjJSDIjNEXX1Pz3Hmwmz91v+zycvx9PJc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1 h1:6QPYqodiu3GuPL+7mfx+NwDdp2eTkp9IfEUpgAwUN0o= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= @@ -135,33 +130,32 @@ github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1: github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= -go.opencensus.io v0.22.0 h1:C9hSCOW830chIVkdja34wa6Ky+IzWllkUinR+BtRZd4= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.opencensus.io v0.22.2 h1:75k/FF0Q2YM8QYo07VPddOLBslDt1MZOdEslOHvmzAs= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.3 h1:8sGtKOrtQqkN1bp2AtX+misvLIlOmsEsNd+9NIcPEm8= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.5 h1:dntmOdLpSpHlVqbW5Eay97DelsZHe+55D+xC6i0dDS0= +go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= -golang.org/x/exp v0.0.0-20190829153037-c13cbed26979 h1:Agxu5KLo8o7Bb634SVDnhIfpTvxmzUwhbYAzBvXt6h4= golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20191227195350-da58074b4299 h1:zQpM52jfKHG6II1ISZY1ZcpygvuSFZpLwfluuF89XOg= golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a h1:7Wlg8L54In96HTWOaI4sreLJ6qfyGuvSau5el3fK41Y= golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200207192155-f17229e696bd h1:zkO/Lhoka23X63N9OSzpSeROEUQ5ODw47tM3YWjygbs= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= -golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6 h1:QE6XYQK6naiK1EPAe1g/ILLxN5RBoH5xkJk3CqlMI/Y= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= @@ -170,26 +164,24 @@ golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvx golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac h1:8R1esu+8QioDxo4E4mX6bFztO+dMTM49DNAaWfO5OeY= golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f h1:J5lckAjkw6qYlOZNj90mLYNTEKDvWeuc1yieZ8qUzUE= golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= -golang.org/x/lint v0.0.0-20200130185559-910be7a94367 h1:0IiAsCRByjO2QjX7ZPkw5oU9x+n1YqRL802rjC0c3Aw= golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b h1:Wh+f8QHJXR411sJR8/vRBTZ7YapZaRvUcLFFJhusH0k= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5 h1:2M3HP5CCK1Si9FQhwnzYhXdG6DXeebvUHFpre8QvbyI= +golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= -golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee h1:WG0RUwxtNT4qqaXX3DPA8zHFNm/D9xaBpxzHt1WcA/E= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.2.0 h1:KU7oHjnv3XNWfa5COkzUifxZmxp1TyI7ImMXqFxLwvQ= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.1 h1:Kvvh58BN8Y9/lBi7hTekvtMpm07eUZ0ck5pRHpsMWrY= +golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -199,44 +191,48 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859 h1:R/3boaszxrf1GEUWTVDzSKVwLmSJpwZ1yqXm8j0v2QI= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa h1:F+8P+gmewFQYRk6JoLQLwjBCTu3mcIURZfNkVweuRKA= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200202094626-16171245cfb2 h1:CCH4IOTTfewWjGOlSp+zGcjutRKlBEZQ6wTn8ozI/nI= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200226121028-0de0cce0169b h1:0mm1VjtFUOIlE1SbDlwjYaDxZVDP2S5ou6y0gSgXHu8= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200301022130-244492dfa37a h1:GuSPYbZzB5/dcLNCwLQLsg3obCJtX9IJhpXkvY7kzk0= golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e h1:3G+cUijn7XD+S4eJFddp53Pv7+slrESplyjG25HgL+k= golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5 h1:WQ8q63x+f/zpC8Ac1s9wLElVoHhm32p6tudrU72n1QA= golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2 h1:eDrdRpKgkcCqKZQwyZRyeFZgfqt37SL7Kv3tok06cKE= golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20201224014010-6772e930b67b h1:iFwSg7t5GZmB/Q5TjiEAsdoLDrdJRC1RiF2WhuV29Qw= +golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99 h1:5vD4XjIc0X5+kHZjx4UecYdjA6mJo+XXNoaW0EjU5Os= +golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e h1:vcxGaoTs7kV8m5Np9uUNQin4BrLOthgV7252N8V+FwY= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a h1:WXEvlFVvvGxCJLG6REjsT03iWnKLEWinaScsxF2Vm2o= golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -244,39 +240,40 @@ golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0 h1:HyfiK1WMnHj5FXFXatD+Qs1A/xC2Run6RzeW1SyHxpc= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200113162924-86b910548bc1 h1:gZpLHxUX5BdYLA08Lj4YCJNN/jk7KtquiArPoeX0WvA= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200122134326-e047566fdf82 h1:ywK/j/KkyTHcdyYSZNXGjMwgmDSfjglYZ3vStQ/gSCU= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5 h1:LfCXLvNmTYH9kEmVgqbnsWfruoXZIrh4YBgqVHtDvw0= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4 h1:sfkvUWPNGwSV+8/fNqctR5lS2AqCSqYwXdrjCxp/dXo= golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae h1:/WDfKMnPU+m5M4xB+6x4kaepxRw6jWvR5iDRdvjHgy8= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527 h1:uYVVQ9WP/Ds2ROhcaGPeIdVq0RIXVLwsHlnvJ+cT1So= golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd h1:xhmwyvizuTgC2qz7ZlMluP20uW+C3Rm0FD/WLDX8884= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200331124033-c3d80250170d h1:nc5K6ox/4lTFbMVSL9WRR81ixkcwXThoiF6yf+R9scA= golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200501052902-10377860bb8e h1:hq86ru83GdWTlfQFZGO4nZJTU4Bs2wfHl8oFHRaXsfc= golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200523222454-059865788121 h1:rITEj+UZHYC927n8GT97eC3zrpzXdb/voyeOuVKS46o= golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073 h1:8qxJSnu+7dRq6upnbntrmriWByIakBuct5OM/MdQC1M= +golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4 h1:0YWbFKbhXG/wIiuHDSKpS0Iy7FSA+u45VtBMfQcFTTc= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -293,7 +290,6 @@ golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgw golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff h1:On1qIo75ByTwFJ4/W2bIqHcwJ9XAqtSWUs8GwRrIhtc= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -303,66 +299,65 @@ golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200117161641-43d50277825c h1:2EA2K0k9bcvvEDlqD8xdlOhCOqq+O/p9Voqi4x9W1YU= golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a h1:7YaEqUc1tUg0yDwvdX+3U5bwrBg7u3FFAZ5D8gUs4/c= golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74 h1:KW20qMcLRWuIgjdCpHFJbVZA7zsDKtFXPNcm7/eI5ZA= golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56 h1:DFtSed2q3HtNuVazwVDZ4nSRS/JrZEig0gz2BY4VNrg= golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d h1:7M9AXzLrJWWGdDYtBblPHBTnHtaN6KKQ98OYb35mLlY= golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb h1:iKlO7ROJc6SttHKlxzwGytRtBUqX4VARrNTgP2YLX5M= golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d h1:3K34ovZAOnVaUPxanr0j4ghTZTPTA0CnXvjCl+5lZqk= golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4 h1:kDtqNkeBrZb8B+atrj50B5XLHpzXXqcCdZPP/ApQ5NY= golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d h1:lzLdP95xJmMpwQ6LUHwrc5V7js93hTiY7gkznu0BgmY= golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2 h1:FD4wDsP+CQUqh2V12OBOt90pLHVToe58P++fUu3ggV4= golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0 h1:po9/4sTYwZU9lPhi1tOrb4hCv3qrhiQ77LZfGa2OjwY= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.9.0 h1:jbyannxz0XFD3zdjgrSUsaJbgpH4eTrkdhRChkHPfO8= google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.15.0 h1:yzlyyDW/J0w8yNFJIhiAJy4kq74S+1DOLdawELNxFMA= google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.17.0 h1:0q95w+VuFtv4PAx4PZVQdBMmYbaCHbnfKaEiDIcVyag= google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.19.0 h1:GwFK8+l5/gdsOYKz5p6M4UK+QT8OvmHWZPJCnf+5DjA= google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.20.0 h1:jz2KixHX7EcCPiQrySzPdnYT7DbINAypCqKZ1Z7GM40= google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.22.0 h1:J1Pl9P2lnmYFSJvgs70DKELqHNh8CNWXPbud4njEE2s= google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.24.0 h1:cG03eaksBzhfSIk7JRGctfp3lanklcOM/mTGvow7BbQ= google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.28.0 h1:jMF5hhVfMkTZwHW1SDpKq5CkgWLXOb31Foaca9Zr3oM= google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= +google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= +google.golang.org/api v0.40.0 h1:uWrpz12dpVPn7cojP82mk02XDgTJLDPc2KbVTxrWb4A= +google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.1 h1:QzqyMA1tlu6CgqCDUtU9V+ZKhLFT2dkJuANu5QaxI3I= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/appengine v1.6.5 h1:tycE03LOZYQNhDpS27tcQdAzLCVMaj7QT2SXxebnpCM= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.6 h1:lMO5rYAqUxkmaj76jAkRUvt5JZgFymx/+Q5Mzfivuhc= google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= @@ -370,67 +365,64 @@ google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRn google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51 h1:Ex1mq5jaJof+kRnYi3SlYJ8KKa9Ao3NHyIT5XJ1gF6U= google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba h1:pRj9OXZbwNtbtZtOB4dLwfK4u+EVRMvP+e9zKkg2grM= google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150 h1:VPpdpQkGvFicX9yo4G5oxZPi9ALBnEOZblPSa/Wa2m4= google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90 h1:7THRSvPuzF1bql5kyFzX0JM0vpGhwuhskgJrJsbZ80Y= google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= -google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce h1:1mbrb1tUU+Zmt5C94IGKADBTJZjZXAd+BubWi7r9EiI= google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383 h1:Vo0fD5w0fUKriWlZLyrim2GXbumyN0D6euW79T9PgEE= google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200305110556-506484158171 h1:xes2Q2k+d/+YNXVw0FpZkIDJiaux4OVrRKXRAzH6A0U= google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200312145019-da6875a35672 h1:jiDSspVssiikoRPFHT6pYrL+CL6/yIc3b9AuHO/4xik= google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940 h1:MRHtG0U6SnaUb+s+LhNE1qt1FQ1wlhqr5E4usBKC0uA= google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84 h1:pSLkPbrjnPyLDYUO2VM9mDLqo2V6CFBY84lFSZAfoi4= google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790 h1:FGjyjrQGURdc98leD1P65IdQD9Zlr4McvRcqIlV6OSs= google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210226172003-ab064af71705 h1:PYBmACG+YEv8uQPW0r1kJj8tR+gkF0UWq7iFdUezwEw= +google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.1 h1:j6XxA85m/6txkUCHvzlV5f+HBNl/1r5cZ2A/3IEFOO8= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.26.0 h1:2dTRdpdFEEhJYQD8EMLB61nnrzSCTbG38PhqdhvOltg= google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.0 h1:rRYRFMVgRv6E0D70Skyfsr28tDXIuuPZyWGMPdMcnXg= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.1 h1:zvIju4sqAGvwKspUQOhwnpcqSbzi7/H6QomNNjTL4sk= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.28.0 h1:bO/TA4OxCOummhSf10siHuG7vJOiwh7SpRpFZDkOgl4= google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= -google.golang.org/grpc v1.29.1 h1:EC2SB8S04d2r73uptxphDSUG+kTKVgjRPF+N3xpxRB4= google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= +google.golang.org/grpc v1.35.0 h1:TwIQcH3es+MojMVojxxfQ3l3OF2KzlRxML2xZq0kRo8= +google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0 h1:qdOKuR/EIArgaWNjetjgTzgVTAZ+S/WXVrq9HW9zimw= google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0 h1:cJv5/xdbk1NnMPR1VP9+HU6gupuG9MLBoH1r6RHZ2MY= google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyzM= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc h1:TnonUr8u3himcMY0vSh23jFOXA+cnucl1gB6EQTReBI= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.24.0 h1:UhZDfRO8JRQru4/+LlLE0BRKGF8L+PICnvYZmx/fEGA= google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= @@ -439,11 +431,8 @@ honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -honnef.co/go/tools v0.0.1-2020.1.3 h1:sXmLre5bzIR6ypkjXCDI3jHPssRhc8KD/Ome589sc3U= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.0.1-2020.1.4 h1:UoveltGrhghAA7ePc+e+QYDHXrBps2PqFZiHkGR/xK8= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= diff --git a/vendor/cloud.google.com/go/storage/post_policy_v4.go b/vendor/cloud.google.com/go/storage/post_policy_v4.go index b9df7db..db9d138 100644 --- a/vendor/cloud.google.com/go/storage/post_policy_v4.go +++ b/vendor/cloud.google.com/go/storage/post_policy_v4.go @@ -249,10 +249,16 @@ func GenerateSignedPostPolicyV4(bucket, object string, opts *PostPolicyV4Options conds := make([]PostPolicyV4Condition, len(opts.Conditions)) copy(conds, opts.Conditions) conds = append(conds, - conditionRedirectToURLOnSuccess(descFields.RedirectToURLOnSuccess), - conditionStatusCodeOnSuccess(descFields.StatusCodeOnSuccess), + // These are ordered lexicographically. Technically the order doesn't matter + // for creating the policy, but we use this order to match the + // cross-language conformance tests for this feature. &singleValueCondition{"acl", descFields.ACL}, &singleValueCondition{"cache-control", descFields.CacheControl}, + &singleValueCondition{"content-disposition", descFields.ContentDisposition}, + &singleValueCondition{"content-encoding", descFields.ContentEncoding}, + &singleValueCondition{"content-type", descFields.ContentType}, + conditionRedirectToURLOnSuccess(descFields.RedirectToURLOnSuccess), + conditionStatusCodeOnSuccess(descFields.StatusCodeOnSuccess), ) YYYYMMDD := now.Format(yearMonthDay) @@ -261,8 +267,12 @@ func GenerateSignedPostPolicyV4(bucket, object string, opts *PostPolicyV4Options "x-goog-date": now.Format(iso8601), "x-goog-credential": opts.GoogleAccessID + "/" + YYYYMMDD + "/auto/storage/goog4_request", "x-goog-algorithm": "GOOG4-RSA-SHA256", - "success_action_redirect": descFields.RedirectToURLOnSuccess, "acl": descFields.ACL, + "cache-control": descFields.CacheControl, + "content-disposition": descFields.ContentDisposition, + "content-encoding": descFields.ContentEncoding, + "content-type": descFields.ContentType, + "success_action_redirect": descFields.RedirectToURLOnSuccess, } for key, value := range descFields.Metadata { conds = append(conds, &singleValueCondition{key, value}) diff --git a/vendor/cloud.google.com/go/storage/storage.go b/vendor/cloud.google.com/go/storage/storage.go index 20d9518..c46d7c2 100644 --- a/vendor/cloud.google.com/go/storage/storage.go +++ b/vendor/cloud.google.com/go/storage/storage.go @@ -43,6 +43,7 @@ import ( "cloud.google.com/go/internal/version" "google.golang.org/api/googleapi" "google.golang.org/api/option" + "google.golang.org/api/option/internaloption" raw "google.golang.org/api/storage/v1" htransport "google.golang.org/api/transport/http" ) @@ -97,45 +98,56 @@ type Client struct { } // NewClient creates a new Google Cloud Storage client. -// The default scope is ScopeFullControl. To use a different scope, like ScopeReadOnly, use option.WithScopes. +// The default scope is ScopeFullControl. To use a different scope, like +// ScopeReadOnly, use option.WithScopes. +// +// Clients should be reused instead of created as needed. The methods of Client +// are safe for concurrent use by multiple goroutines. func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) { var host, readHost, scheme string + // In general, it is recommended to use raw.NewService instead of htransport.NewClient + // since raw.NewService configures the correct default endpoints when initializing the + // internal http client. However, in our case, "NewRangeReader" in reader.go needs to + // access the http client directly to make requests, so we create the client manually + // here so it can be re-used by both reader.go and raw.NewService. This means we need to + // manually configure the default endpoint options on the http client. Furthermore, we + // need to account for STORAGE_EMULATOR_HOST override when setting the default endpoints. if host = os.Getenv("STORAGE_EMULATOR_HOST"); host == "" { scheme = "https" readHost = "storage.googleapis.com" // Prepend default options to avoid overriding options passed by the user. opts = append([]option.ClientOption{option.WithScopes(ScopeFullControl), option.WithUserAgent(userAgent)}, opts...) + + opts = append(opts, internaloption.WithDefaultEndpoint("https://storage.googleapis.com/storage/v1/")) + opts = append(opts, internaloption.WithDefaultMTLSEndpoint("https://storage.mtls.googleapis.com/storage/v1/")) } else { scheme = "http" readHost = host opts = append([]option.ClientOption{option.WithoutAuthentication()}, opts...) + + opts = append(opts, internaloption.WithDefaultEndpoint(host)) + opts = append(opts, internaloption.WithDefaultMTLSEndpoint(host)) } + // htransport selects the correct endpoint among WithEndpoint (user override), WithDefaultEndpoint, and WithDefaultMTLSEndpoint. hc, ep, err := htransport.NewClient(ctx, opts...) if err != nil { return nil, fmt.Errorf("dialing: %v", err) } - rawService, err := raw.NewService(ctx, option.WithHTTPClient(hc)) + // RawService should be created with the chosen endpoint to take account of user override. + rawService, err := raw.NewService(ctx, option.WithEndpoint(ep), option.WithHTTPClient(hc)) if err != nil { return nil, fmt.Errorf("storage client: %v", err) } - if ep == "" { - // Override the default value for BasePath from the raw client. - // TODO: remove when the raw client uses this endpoint as its default (~end of 2020) - rawService.BasePath = "https://storage.googleapis.com/storage/v1/" - } else { - // If the endpoint has been set explicitly, use this for the BasePath - // as well as readHost - rawService.BasePath = ep - u, err := url.Parse(ep) - if err != nil { - return nil, fmt.Errorf("supplied endpoint %v is not valid: %v", ep, err) - } - readHost = u.Host + // Update readHost with the chosen endpoint. + u, err := url.Parse(ep) + if err != nil { + return nil, fmt.Errorf("supplied endpoint %q is not valid: %v", ep, err) } + readHost = u.Host return &Client{ hc: hc, @@ -349,7 +361,7 @@ var ( ) // v2SanitizeHeaders applies the specifications for canonical extension headers at -// https://cloud.google.com/storage/docs/access-control/signed-urls#about-canonical-extension-headers. +// https://cloud.google.com/storage/docs/access-control/signed-urls-v2#about-canonical-extension-headers func v2SanitizeHeaders(hdrs []string) []string { headerMap := map[string][]string{} for _, hdr := range hdrs { @@ -397,7 +409,7 @@ func v2SanitizeHeaders(hdrs []string) []string { } // v4SanitizeHeaders applies the specifications for canonical extension headers -// at https://cloud.google.com/storage/docs/access-control/signed-urls#about-canonical-extension-headers. +// at https://cloud.google.com/storage/docs/authentication/canonical-requests#about-headers. // // V4 does a couple things differently from V2: // - Headers get sorted by key, instead of by key:value. We do this in @@ -583,8 +595,10 @@ func signedURLV4(bucket, name string, opts *SignedURLOptions, now time.Time) (st for k, v := range opts.QueryParameters { canonicalQueryString[k] = append(canonicalQueryString[k], v...) } - - fmt.Fprintf(buf, "%s\n", canonicalQueryString.Encode()) + // url.Values.Encode escaping is correct, except that a space must be replaced + // by `%20` rather than `+`. + escapedQuery := strings.Replace(canonicalQueryString.Encode(), "+", "%20", -1) + fmt.Fprintf(buf, "%s\n", escapedQuery) // Fill in the hostname based on the desired URL style. u.Host = opts.Style.host(bucket) @@ -815,8 +829,8 @@ func (o *ObjectHandle) Attrs(ctx context.Context) (attrs *ObjectAttrs, err error return newObject(obj), nil } -// Update updates an object with the provided attributes. -// All zero-value attributes are ignored. +// Update updates an object with the provided attributes. See +// ObjectAttrsToUpdate docs for details on treatment of zero values. // ErrObjectNotExist will be returned if the object is not found. func (o *ObjectHandle) Update(ctx context.Context, uattrs ObjectAttrsToUpdate) (oa *ObjectAttrs, err error) { ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Object.Update") @@ -868,6 +882,10 @@ func (o *ObjectHandle) Update(ctx context.Context, uattrs ObjectAttrsToUpdate) ( attrs.TemporaryHold = optional.ToBool(uattrs.TemporaryHold) forceSendFields = append(forceSendFields, "TemporaryHold") } + if !uattrs.CustomTime.IsZero() { + attrs.CustomTime = uattrs.CustomTime + forceSendFields = append(forceSendFields, "CustomTime") + } if uattrs.Metadata != nil { attrs.Metadata = uattrs.Metadata if len(attrs.Metadata) == 0 { @@ -923,7 +941,8 @@ func (o *ObjectHandle) ObjectName() string { // ObjectAttrsToUpdate is used to update the attributes of an object. // Only fields set to non-nil values will be updated. -// Set a field to its zero value to delete it. +// For all fields except CustomTime, set the field to its zero value to delete +// it. CustomTime cannot be deleted or changed to an earlier time once set. // // For example, to change ContentType and delete ContentEncoding and // Metadata, use @@ -940,7 +959,8 @@ type ObjectAttrsToUpdate struct { ContentEncoding optional.String ContentDisposition optional.String CacheControl optional.String - Metadata map[string]string // set to map[string]string{} to delete + CustomTime time.Time // Cannot be deleted or backdated from its current value. + Metadata map[string]string // Set to map[string]string{} to delete. ACL []ACLRule // If not empty, applies a predefined set of access controls. ACL must be nil. @@ -1047,6 +1067,10 @@ func (o *ObjectAttrs) toRawObject(bucket string) *raw.Object { if !o.RetentionExpirationTime.IsZero() { ret = o.RetentionExpirationTime.Format(time.RFC3339) } + var ct string + if !o.CustomTime.IsZero() { + ct = o.CustomTime.Format(time.RFC3339) + } return &raw.Object{ Bucket: bucket, Name: o.Name, @@ -1061,6 +1085,7 @@ func (o *ObjectAttrs) toRawObject(bucket string) *raw.Object { StorageClass: o.StorageClass, Acl: toRawObjectACL(o.ACL), Metadata: o.Metadata, + CustomTime: ct, } } @@ -1199,6 +1224,15 @@ type ObjectAttrs struct { // Etag is the HTTP/1.1 Entity tag for the object. // This field is read-only. Etag string + + // A user-specified timestamp which can be applied to an object. This is + // typically set in order to use the CustomTimeBefore and DaysSinceCustomTime + // LifecycleConditions to manage object lifecycles. + // + // CustomTime cannot be removed once set on an object. It can be updated to a + // later value but not to an earlier one. For more information see + // https://cloud.google.com/storage/docs/metadata#custom-time . + CustomTime time.Time } // convertTime converts a time in RFC3339 format to time.Time. @@ -1252,6 +1286,7 @@ func newObject(o *raw.Object) *ObjectAttrs { Deleted: convertTime(o.TimeDeleted), Updated: convertTime(o.Updated), Etag: o.Etag, + CustomTime: convertTime(o.CustomTime), } } @@ -1273,6 +1308,31 @@ func encodeUint32(u uint32) string { return base64.StdEncoding.EncodeToString(b) } +// Projection is enumerated type for Query.Projection. +type Projection int + +const ( + // ProjectionDefault returns all fields of objects. + ProjectionDefault Projection = iota + + // ProjectionFull returns all fields of objects. + ProjectionFull + + // ProjectionNoACL returns all fields of objects except for Owner and ACL. + ProjectionNoACL +) + +func (p Projection) String() string { + switch p { + case ProjectionFull: + return "full" + case ProjectionNoACL: + return "noAcl" + default: + return "" + } +} + // Query represents a query to filter objects from a bucket. type Query struct { // Delimiter returns results in a directory-like fashion. @@ -1297,6 +1357,22 @@ type Query struct { // the query. It's used internally and is populated for the user by // calling Query.SetAttrSelection fieldSelection string + + // StartOffset is used to filter results to objects whose names are + // lexicographically equal to or after startOffset. If endOffset is also set, + // the objects listed will have names between startOffset (inclusive) and + // endOffset (exclusive). + StartOffset string + + // EndOffset is used to filter results to objects whose names are + // lexicographically before endOffset. If startOffset is also set, the objects + // listed will have names between startOffset (inclusive) and endOffset (exclusive). + EndOffset string + + // Projection defines the set of properties to return. It will default to ProjectionFull, + // which returns all properties. Passing ProjectionNoACL will omit Owner and ACL, + // which may improve performance when listing many objects. + Projection Projection } // attrToFieldMap maps the field names of ObjectAttrs to the underlying field @@ -1329,6 +1405,7 @@ var attrToFieldMap = map[string]string{ "Deleted": "timeDeleted", "Updated": "updated", "Etag": "etag", + "CustomTime": "customTime", } // SetAttrSelection makes the query populate only specific attributes of @@ -1351,7 +1428,7 @@ func (q *Query) SetAttrSelection(attrs []string) error { if len(fieldSet) > 0 { var b bytes.Buffer - b.WriteString("items(") + b.WriteString("prefixes,items(") first := true for field := range fieldSet { if !first { diff --git a/vendor/github.com/Azure/azure-pipeline-go/pipeline/error.go b/vendor/github.com/Azure/azure-pipeline-go/pipeline/error.go index 4aaf066..5d3d433 100644 --- a/vendor/github.com/Azure/azure-pipeline-go/pipeline/error.go +++ b/vendor/github.com/Azure/azure-pipeline-go/pipeline/error.go @@ -49,6 +49,9 @@ func (e *ErrorNode) Error(msg string) string { // Cause returns the error that preceded this error. func (e *ErrorNode) Cause() error { return e.cause } +// Unwrap provides compatibility for Go 1.13 error chains. +func (e *ErrorNode) Unwrap() error { return e.cause } + // Temporary returns true if the error occurred due to a temporary condition. func (e ErrorNode) Temporary() bool { type temporary interface { diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/atomicmorph.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/atomicmorph.go deleted file mode 100644 index 9e18a79..0000000 --- a/vendor/github.com/Azure/azure-storage-blob-go/azblob/atomicmorph.go +++ /dev/null @@ -1,69 +0,0 @@ -package azblob - -import "sync/atomic" - -// AtomicMorpherInt32 identifies a method passed to and invoked by the AtomicMorphInt32 function. -// The AtomicMorpher callback is passed a startValue and based on this value it returns -// what the new value should be and the result that AtomicMorph should return to its caller. -type atomicMorpherInt32 func(startVal int32) (val int32, morphResult interface{}) - -const targetAndMorpherMustNotBeNil = "target and morpher must not be nil" - -// AtomicMorph atomically morphs target in to new value (and result) as indicated bythe AtomicMorpher callback function. -func atomicMorphInt32(target *int32, morpher atomicMorpherInt32) interface{} { - for { - currentVal := atomic.LoadInt32(target) - desiredVal, morphResult := morpher(currentVal) - if atomic.CompareAndSwapInt32(target, currentVal, desiredVal) { - return morphResult - } - } -} - -// AtomicMorpherUint32 identifies a method passed to and invoked by the AtomicMorph function. -// The AtomicMorpher callback is passed a startValue and based on this value it returns -// what the new value should be and the result that AtomicMorph should return to its caller. -type atomicMorpherUint32 func(startVal uint32) (val uint32, morphResult interface{}) - -// AtomicMorph atomically morphs target in to new value (and result) as indicated bythe AtomicMorpher callback function. -func atomicMorphUint32(target *uint32, morpher atomicMorpherUint32) interface{} { - for { - currentVal := atomic.LoadUint32(target) - desiredVal, morphResult := morpher(currentVal) - if atomic.CompareAndSwapUint32(target, currentVal, desiredVal) { - return morphResult - } - } -} - -// AtomicMorpherUint64 identifies a method passed to and invoked by the AtomicMorphUint64 function. -// The AtomicMorpher callback is passed a startValue and based on this value it returns -// what the new value should be and the result that AtomicMorph should return to its caller. -type atomicMorpherInt64 func(startVal int64) (val int64, morphResult interface{}) - -// AtomicMorph atomically morphs target in to new value (and result) as indicated bythe AtomicMorpher callback function. -func atomicMorphInt64(target *int64, morpher atomicMorpherInt64) interface{} { - for { - currentVal := atomic.LoadInt64(target) - desiredVal, morphResult := morpher(currentVal) - if atomic.CompareAndSwapInt64(target, currentVal, desiredVal) { - return morphResult - } - } -} - -// AtomicMorpherUint64 identifies a method passed to and invoked by the AtomicMorphUint64 function. -// The AtomicMorpher callback is passed a startValue and based on this value it returns -// what the new value should be and the result that AtomicMorph should return to its caller. -type atomicMorpherUint64 func(startVal uint64) (val uint64, morphResult interface{}) - -// AtomicMorph atomically morphs target in to new value (and result) as indicated bythe AtomicMorpher callback function. -func atomicMorphUint64(target *uint64, morpher atomicMorpherUint64) interface{} { - for { - currentVal := atomic.LoadUint64(target) - desiredVal, morphResult := morpher(currentVal) - if atomic.CompareAndSwapUint64(target, currentVal, desiredVal) { - return morphResult - } - } -} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/bytes_writer.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/bytes_writer.go new file mode 100644 index 0000000..8d82ebe --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/bytes_writer.go @@ -0,0 +1,24 @@ +package azblob + +import ( + "errors" +) + +type bytesWriter []byte + +func newBytesWriter(b []byte) bytesWriter { + return b +} + +func (c bytesWriter) WriteAt(b []byte, off int64) (int, error) { + if off >= int64(len(c)) || off < 0 { + return 0, errors.New("Offset value is out of range") + } + + n := copy(c[int(off):], b) + if n < len(b) { + return n, errors.New("Not enough space for all bytes") + } + + return n, nil +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/chunkwriting.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/chunkwriting.go new file mode 100644 index 0000000..b7dc0d7 --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/chunkwriting.go @@ -0,0 +1,219 @@ +package azblob + +import ( + "bytes" + "context" + "encoding/base64" + "encoding/binary" + "errors" + "fmt" + "io" + "sync" + "sync/atomic" + + guuid "github.com/google/uuid" +) + +// blockWriter provides methods to upload blocks that represent a file to a server and commit them. +// This allows us to provide a local implementation that fakes the server for hermetic testing. +type blockWriter interface { + StageBlock(context.Context, string, io.ReadSeeker, LeaseAccessConditions, []byte, ClientProvidedKeyOptions) (*BlockBlobStageBlockResponse, error) + CommitBlockList(context.Context, []string, BlobHTTPHeaders, Metadata, BlobAccessConditions, AccessTierType, BlobTagsMap, ClientProvidedKeyOptions) (*BlockBlobCommitBlockListResponse, error) +} + +// copyFromReader copies a source io.Reader to blob storage using concurrent uploads. +// TODO(someone): The existing model provides a buffer size and buffer limit as limiting factors. The buffer size is probably +// useless other than needing to be above some number, as the network stack is going to hack up the buffer over some size. The +// max buffers is providing a cap on how much memory we use (by multiplying it times the buffer size) and how many go routines can upload +// at a time. I think having a single max memory dial would be more efficient. We can choose an internal buffer size that works +// well, 4 MiB or 8 MiB, and autoscale to as many goroutines within the memory limit. This gives a single dial to tweak and we can +// choose a max value for the memory setting based on internal transfers within Azure (which will give us the maximum throughput model). +// We can even provide a utility to dial this number in for customer networks to optimize their copies. +func copyFromReader(ctx context.Context, from io.Reader, to blockWriter, o UploadStreamToBlockBlobOptions) (*BlockBlobCommitBlockListResponse, error) { + if err := o.defaults(); err != nil { + return nil, err + } + + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + cp := &copier{ + ctx: ctx, + cancel: cancel, + reader: from, + to: to, + id: newID(), + o: o, + errCh: make(chan error, 1), + } + + // Send all our chunks until we get an error. + var err error + for { + if err = cp.sendChunk(); err != nil { + break + } + } + // If the error is not EOF, then we have a problem. + if err != nil && !errors.Is(err, io.EOF) { + return nil, err + } + + // Close out our upload. + if err := cp.close(); err != nil { + return nil, err + } + + return cp.result, nil +} + +// copier streams a file via chunks in parallel from a reader representing a file. +// Do not use directly, instead use copyFromReader(). +type copier struct { + // ctx holds the context of a copier. This is normally a faux pas to store a Context in a struct. In this case, + // the copier has the lifetime of a function call, so its fine. + ctx context.Context + cancel context.CancelFunc + + // o contains our options for uploading. + o UploadStreamToBlockBlobOptions + + // id provides the ids for each chunk. + id *id + + // reader is the source to be written to storage. + reader io.Reader + // to is the location we are writing our chunks to. + to blockWriter + + // errCh is used to hold the first error from our concurrent writers. + errCh chan error + // wg provides a count of how many writers we are waiting to finish. + wg sync.WaitGroup + + // result holds the final result from blob storage after we have submitted all chunks. + result *BlockBlobCommitBlockListResponse +} + +type copierChunk struct { + buffer []byte + id string +} + +// getErr returns an error by priority. First, if a function set an error, it returns that error. Next, if the Context has an error +// it returns that error. Otherwise it is nil. getErr supports only returning an error once per copier. +func (c *copier) getErr() error { + select { + case err := <-c.errCh: + return err + default: + } + return c.ctx.Err() +} + +// sendChunk reads data from out internal reader, creates a chunk, and sends it to be written via a channel. +// sendChunk returns io.EOF when the reader returns an io.EOF or io.ErrUnexpectedEOF. +func (c *copier) sendChunk() error { + if err := c.getErr(); err != nil { + return err + } + + buffer := c.o.TransferManager.Get() + if len(buffer) == 0 { + return fmt.Errorf("TransferManager returned a 0 size buffer, this is a bug in the manager") + } + + n, err := io.ReadFull(c.reader, buffer) + switch { + case err == nil && n == 0: + return nil + case err == nil: + id := c.id.next() + c.wg.Add(1) + c.o.TransferManager.Run( + func() { + defer c.wg.Done() + c.write(copierChunk{buffer: buffer[0:n], id: id}) + }, + ) + return nil + case err != nil && (err == io.EOF || err == io.ErrUnexpectedEOF) && n == 0: + return io.EOF + } + + if err == io.EOF || err == io.ErrUnexpectedEOF { + id := c.id.next() + c.wg.Add(1) + c.o.TransferManager.Run( + func() { + defer c.wg.Done() + c.write(copierChunk{buffer: buffer[0:n], id: id}) + }, + ) + return io.EOF + } + if err := c.getErr(); err != nil { + return err + } + return err +} + +// write uploads a chunk to blob storage. +func (c *copier) write(chunk copierChunk) { + defer c.o.TransferManager.Put(chunk.buffer) + + if err := c.ctx.Err(); err != nil { + return + } + _, err := c.to.StageBlock(c.ctx, chunk.id, bytes.NewReader(chunk.buffer), c.o.AccessConditions.LeaseAccessConditions, nil, c.o.ClientProvidedKeyOptions) + if err != nil { + c.errCh <- fmt.Errorf("write error: %w", err) + return + } + return +} + +// close commits our blocks to blob storage and closes our writer. +func (c *copier) close() error { + c.wg.Wait() + + if err := c.getErr(); err != nil { + return err + } + + var err error + c.result, err = c.to.CommitBlockList(c.ctx, c.id.issued(), c.o.BlobHTTPHeaders, c.o.Metadata, c.o.AccessConditions, c.o.BlobAccessTier, c.o.BlobTagsMap, c.o.ClientProvidedKeyOptions) + return err +} + +// id allows the creation of unique IDs based on UUID4 + an int32. This auto-increments. +type id struct { + u [64]byte + num uint32 + all []string +} + +// newID constructs a new id. +func newID() *id { + uu := guuid.New() + u := [64]byte{} + copy(u[:], uu[:]) + return &id{u: u} +} + +// next returns the next ID. +func (id *id) next() string { + defer atomic.AddUint32(&id.num, 1) + + binary.BigEndian.PutUint32((id.u[len(guuid.UUID{}):]), atomic.LoadUint32(&id.num)) + str := base64.StdEncoding.EncodeToString(id.u[:]) + id.all = append(id.all, str) + + return str +} + +// issued returns all ids that have been issued. This returned value shares the internal slice so it is not safe to modify the return. +// The value is only valid until the next time next() is called. +func (id *id) issued() []string { + return id.all +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/common_utils.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/common_utils.go new file mode 100644 index 0000000..18c3c26 --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/common_utils.go @@ -0,0 +1 @@ +package azblob diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/highlevel.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/highlevel.go index af09443..7d5a13b 100644 --- a/vendor/github.com/Azure/azure-storage-blob-go/azblob/highlevel.go +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/highlevel.go @@ -3,6 +3,7 @@ package azblob import ( "context" "encoding/base64" + "fmt" "io" "net/http" @@ -55,24 +56,32 @@ type UploadToBlockBlobOptions struct { // AccessConditions indicates the access conditions for the block blob. AccessConditions BlobAccessConditions + // BlobAccessTier indicates the tier of blob + BlobAccessTier AccessTierType + + // BlobTagsMap + BlobTagsMap BlobTagsMap + + // ClientProvidedKeyOptions indicates the client provided key by name and/or by value to encrypt/decrypt data. + ClientProvidedKeyOptions ClientProvidedKeyOptions + // Parallelism indicates the maximum number of blocks to upload in parallel (0=default) Parallelism uint16 } -// UploadBufferToBlockBlob uploads a buffer in blocks to a block blob. -func UploadBufferToBlockBlob(ctx context.Context, b []byte, +// uploadReaderAtToBlockBlob uploads a buffer in blocks to a block blob. +func uploadReaderAtToBlockBlob(ctx context.Context, reader io.ReaderAt, readerSize int64, blockBlobURL BlockBlobURL, o UploadToBlockBlobOptions) (CommonResponse, error) { - bufferSize := int64(len(b)) if o.BlockSize == 0 { // If bufferSize > (BlockBlobMaxStageBlockBytes * BlockBlobMaxBlocks), then error - if bufferSize > BlockBlobMaxStageBlockBytes*BlockBlobMaxBlocks { + if readerSize > BlockBlobMaxStageBlockBytes*BlockBlobMaxBlocks { return nil, errors.New("buffer is too large to upload to a block blob") } // If bufferSize <= BlockBlobMaxUploadBlobBytes, then Upload should be used with just 1 I/O request - if bufferSize <= BlockBlobMaxUploadBlobBytes { + if readerSize <= BlockBlobMaxUploadBlobBytes { o.BlockSize = BlockBlobMaxUploadBlobBytes // Default if unspecified } else { - o.BlockSize = bufferSize / BlockBlobMaxBlocks // buffer / max blocks = block size to use all 50,000 blocks + o.BlockSize = readerSize / BlockBlobMaxBlocks // buffer / max blocks = block size to use all 50,000 blocks if o.BlockSize < BlobDefaultDownloadBlockSize { // If the block size is smaller than 4MB, round up to 4MB o.BlockSize = BlobDefaultDownloadBlockSize } @@ -80,31 +89,31 @@ func UploadBufferToBlockBlob(ctx context.Context, b []byte, } } - if bufferSize <= BlockBlobMaxUploadBlobBytes { + if readerSize <= BlockBlobMaxUploadBlobBytes { // If the size can fit in 1 Upload call, do it this way - var body io.ReadSeeker = bytes.NewReader(b) + var body io.ReadSeeker = io.NewSectionReader(reader, 0, readerSize) if o.Progress != nil { body = pipeline.NewRequestBodyProgress(body, o.Progress) } - return blockBlobURL.Upload(ctx, body, o.BlobHTTPHeaders, o.Metadata, o.AccessConditions) + return blockBlobURL.Upload(ctx, body, o.BlobHTTPHeaders, o.Metadata, o.AccessConditions, o.BlobAccessTier, o.BlobTagsMap, o.ClientProvidedKeyOptions) } - var numBlocks = uint16(((bufferSize - 1) / o.BlockSize) + 1) + var numBlocks = uint16(((readerSize - 1) / o.BlockSize) + 1) blockIDList := make([]string, numBlocks) // Base-64 encoded block IDs progress := int64(0) progressLock := &sync.Mutex{} err := DoBatchTransfer(ctx, BatchTransferOptions{ - OperationName: "UploadBufferToBlockBlob", - TransferSize: bufferSize, + OperationName: "uploadReaderAtToBlockBlob", + TransferSize: readerSize, ChunkSize: o.BlockSize, Parallelism: o.Parallelism, Operation: func(offset int64, count int64, ctx context.Context) error { // This function is called once per block. // It is passed this block's offset within the buffer and its count of bytes // Prepare to read the proper block/section of the buffer - var body io.ReadSeeker = bytes.NewReader(b[offset : offset+count]) + var body io.ReadSeeker = io.NewSectionReader(reader, offset, count) blockNum := offset / o.BlockSize if o.Progress != nil { blockProgress := int64(0) @@ -122,7 +131,7 @@ func UploadBufferToBlockBlob(ctx context.Context, b []byte, // Block IDs are unique values to avoid issue if 2+ clients are uploading blocks // at the same time causing PutBlockList to get a mix of blocks from all the clients. blockIDList[blockNum] = base64.StdEncoding.EncodeToString(newUUID().bytes()) - _, err := blockBlobURL.StageBlock(ctx, blockIDList[blockNum], body, o.AccessConditions.LeaseAccessConditions, nil) + _, err := blockBlobURL.StageBlock(ctx, blockIDList[blockNum], body, o.AccessConditions.LeaseAccessConditions, nil, o.ClientProvidedKeyOptions) return err }, }) @@ -130,7 +139,13 @@ func UploadBufferToBlockBlob(ctx context.Context, b []byte, return nil, err } // All put blocks were successful, call Put Block List to finalize the blob - return blockBlobURL.CommitBlockList(ctx, blockIDList, o.BlobHTTPHeaders, o.Metadata, o.AccessConditions) + return blockBlobURL.CommitBlockList(ctx, blockIDList, o.BlobHTTPHeaders, o.Metadata, o.AccessConditions, o.BlobAccessTier, o.BlobTagsMap, o.ClientProvidedKeyOptions) +} + +// UploadBufferToBlockBlob uploads a buffer in blocks to a block blob. +func UploadBufferToBlockBlob(ctx context.Context, b []byte, + blockBlobURL BlockBlobURL, o UploadToBlockBlobOptions) (CommonResponse, error) { + return uploadReaderAtToBlockBlob(ctx, bytes.NewReader(b), int64(len(b)), blockBlobURL, o) } // UploadFileToBlockBlob uploads a file in blocks to a block blob. @@ -141,15 +156,7 @@ func UploadFileToBlockBlob(ctx context.Context, file *os.File, if err != nil { return nil, err } - m := mmf{} // Default to an empty slice; used for 0-size file - if stat.Size() != 0 { - m, err = newMMF(file, false, 0, int(stat.Size())) - if err != nil { - return nil, err - } - defer m.unmap() - } - return UploadBufferToBlockBlob(ctx, m, blockBlobURL, o) + return uploadReaderAtToBlockBlob(ctx, file, stat.Size(), blockBlobURL, o) } /////////////////////////////////////////////////////////////////////////////// @@ -167,6 +174,9 @@ type DownloadFromBlobOptions struct { // AccessConditions indicates the access conditions used when making HTTP GET requests against the blob. AccessConditions BlobAccessConditions + // ClientProvidedKeyOptions indicates the client provided key by name and/or by value to encrypt/decrypt data. + ClientProvidedKeyOptions ClientProvidedKeyOptions + // Parallelism indicates the maximum number of blocks to download in parallel (0=default) Parallelism uint16 @@ -174,9 +184,9 @@ type DownloadFromBlobOptions struct { RetryReaderOptionsPerBlock RetryReaderOptions } -// downloadBlobToBuffer downloads an Azure blob to a buffer with parallel. -func downloadBlobToBuffer(ctx context.Context, blobURL BlobURL, offset int64, count int64, - b []byte, o DownloadFromBlobOptions, initialDownloadResponse *DownloadResponse) error { +// downloadBlobToWriterAt downloads an Azure blob to a buffer with parallel. +func downloadBlobToWriterAt(ctx context.Context, blobURL BlobURL, offset int64, count int64, + writer io.WriterAt, o DownloadFromBlobOptions, initialDownloadResponse *DownloadResponse) error { if o.BlockSize == 0 { o.BlockSize = BlobDefaultDownloadBlockSize } @@ -186,7 +196,7 @@ func downloadBlobToBuffer(ctx context.Context, blobURL BlobURL, offset int64, co count = initialDownloadResponse.ContentLength() - offset // if we have the length, use it } else { // If we don't have the length at all, get it - dr, err := blobURL.Download(ctx, 0, CountToEnd, o.AccessConditions, false) + dr, err := blobURL.Download(ctx, 0, CountToEnd, o.AccessConditions, false, o.ClientProvidedKeyOptions) if err != nil { return err } @@ -194,17 +204,22 @@ func downloadBlobToBuffer(ctx context.Context, blobURL BlobURL, offset int64, co } } + if count <= 0 { + // The file is empty, there is nothing to download. + return nil + } + // Prepare and do parallel download. progress := int64(0) progressLock := &sync.Mutex{} err := DoBatchTransfer(ctx, BatchTransferOptions{ - OperationName: "downloadBlobToBuffer", + OperationName: "downloadBlobToWriterAt", TransferSize: count, ChunkSize: o.BlockSize, Parallelism: o.Parallelism, Operation: func(chunkStart int64, count int64, ctx context.Context) error { - dr, err := blobURL.Download(ctx, chunkStart+offset, count, o.AccessConditions, false) + dr, err := blobURL.Download(ctx, chunkStart+offset, count, o.AccessConditions, false, o.ClientProvidedKeyOptions) if err != nil { return err } @@ -222,7 +237,7 @@ func downloadBlobToBuffer(ctx context.Context, blobURL BlobURL, offset int64, co progressLock.Unlock() }) } - _, err = io.ReadFull(body, b[chunkStart:chunkStart+count]) + _, err = io.Copy(newSectionWriter(writer, chunkStart, count), body) body.Close() return err }, @@ -237,7 +252,7 @@ func downloadBlobToBuffer(ctx context.Context, blobURL BlobURL, offset int64, co // Offset and count are optional, pass 0 for both to download the entire blob. func DownloadBlobToBuffer(ctx context.Context, blobURL BlobURL, offset int64, count int64, b []byte, o DownloadFromBlobOptions) error { - return downloadBlobToBuffer(ctx, blobURL, offset, count, b, o, nil) + return downloadBlobToWriterAt(ctx, blobURL, offset, count, newBytesWriter(b), o, nil) } // DownloadBlobToFile downloads an Azure blob to a local file. @@ -250,7 +265,7 @@ func DownloadBlobToFile(ctx context.Context, blobURL BlobURL, offset int64, coun if count == CountToEnd { // Try to get Azure blob's size - props, err := blobURL.GetProperties(ctx, o.AccessConditions) + props, err := blobURL.GetProperties(ctx, o.AccessConditions, o.ClientProvidedKeyOptions) if err != nil { return err } @@ -271,13 +286,7 @@ func DownloadBlobToFile(ctx context.Context, blobURL BlobURL, offset int64, coun } if size > 0 { - // 3. Set mmap and call downloadBlobToBuffer. - m, err := newMMF(file, true, 0, int(size)) - if err != nil { - return err - } - defer m.unmap() - return downloadBlobToBuffer(ctx, blobURL, offset, size, m, o, nil) + return downloadBlobToWriterAt(ctx, blobURL, offset, size, file, o, nil) } else { // if the blob's size is 0, there is no need in downloading it return nil } @@ -301,6 +310,10 @@ func DoBatchTransfer(ctx context.Context, o BatchTransferOptions) error { return errors.New("ChunkSize cannot be 0") } + if o.Parallelism == 0 { + o.Parallelism = 5 // default Parallelism + } + // Prepare and do parallel operations. numChunks := uint16(((o.TransferSize - 1) / o.ChunkSize) + 1) operationChannel := make(chan func() error, o.Parallelism) // Create the channel that release 'Parallelism' goroutines concurrently @@ -309,9 +322,6 @@ func DoBatchTransfer(ctx context.Context, o BatchTransferOptions) error { defer cancel() // Create the goroutines that process each operation (in parallel). - if o.Parallelism == 0 { - o.Parallelism = 5 // default Parallelism - } for g := uint16(0); g < o.Parallelism; g++ { //grIndex := g go func() { @@ -352,192 +362,205 @@ func DoBatchTransfer(ctx context.Context, o BatchTransferOptions) error { //////////////////////////////////////////////////////////////////////////////////////////////// -type UploadStreamToBlockBlobOptions struct { - BufferSize int - MaxBuffers int - BlobHTTPHeaders BlobHTTPHeaders - Metadata Metadata - AccessConditions BlobAccessConditions +// TransferManager provides a buffer and thread pool manager for certain transfer options. +// It is undefined behavior if code outside of this package call any of these methods. +type TransferManager interface { + // Get provides a buffer that will be used to read data into and write out to the stream. + // It is guaranteed by this package to not read or write beyond the size of the slice. + Get() []byte + // Put may or may not put the buffer into underlying storage, depending on settings. + // The buffer must not be touched after this has been called. + Put(b []byte) + // Run will use a goroutine pool entry to run a function. This blocks until a pool + // goroutine becomes available. + Run(func()) + // Closes shuts down all internal goroutines. This must be called when the TransferManager + // will no longer be used. Not closing it will cause a goroutine leak. + Close() } -func UploadStreamToBlockBlob(ctx context.Context, reader io.Reader, blockBlobURL BlockBlobURL, - o UploadStreamToBlockBlobOptions) (CommonResponse, error) { - result, err := uploadStream(ctx, reader, - UploadStreamOptions{BufferSize: o.BufferSize, MaxBuffers: o.MaxBuffers}, - &uploadStreamToBlockBlobOptions{b: blockBlobURL, o: o, blockIDPrefix: newUUID()}) - if err != nil { - return nil, err - } - return result.(CommonResponse), nil +type staticBuffer struct { + buffers chan []byte + size int + threadpool chan func() } -type uploadStreamToBlockBlobOptions struct { - b BlockBlobURL - o UploadStreamToBlockBlobOptions - blockIDPrefix uuid // UUID used with all blockIDs - maxBlockNum uint32 // defaults to 0 - firstBlock []byte // Used only if maxBlockNum is 0 -} - -func (t *uploadStreamToBlockBlobOptions) start(ctx context.Context) (interface{}, error) { - return nil, nil -} - -func (t *uploadStreamToBlockBlobOptions) chunk(ctx context.Context, num uint32, buffer []byte) error { - if num == 0 { - t.firstBlock = buffer - - // If whole payload fits in 1 block, don't stage it; End will upload it with 1 I/O operation - // If the payload is exactly the same size as the buffer, there may be more content coming in. - if len(buffer) < t.o.BufferSize { - return nil - } - } - // Else, upload a staged block... - atomicMorphUint32(&t.maxBlockNum, func(startVal uint32) (val uint32, morphResult interface{}) { - // Atomically remember (in t.numBlocks) the maximum block num we've ever seen - if startVal < num { - return num, nil - } - return startVal, nil - }) - blockID := newUuidBlockID(t.blockIDPrefix).WithBlockNumber(num).ToBase64() - _, err := t.b.StageBlock(ctx, blockID, bytes.NewReader(buffer), LeaseAccessConditions{}, nil) - return err -} - -func (t *uploadStreamToBlockBlobOptions) end(ctx context.Context) (interface{}, error) { - // If the first block had the exact same size as the buffer - // we would have staged it as a block thinking that there might be more data coming - if t.maxBlockNum == 0 && len(t.firstBlock) != t.o.BufferSize { - // If whole payload fits in 1 block (block #0), upload it with 1 I/O operation - return t.b.Upload(ctx, bytes.NewReader(t.firstBlock), - t.o.BlobHTTPHeaders, t.o.Metadata, t.o.AccessConditions) - } - // Multiple blocks staged, commit them all now - blockID := newUuidBlockID(t.blockIDPrefix) - blockIDs := make([]string, t.maxBlockNum+1) - for bn := uint32(0); bn <= t.maxBlockNum; bn++ { - blockIDs[bn] = blockID.WithBlockNumber(bn).ToBase64() - } - return t.b.CommitBlockList(ctx, blockIDs, t.o.BlobHTTPHeaders, t.o.Metadata, t.o.AccessConditions) -} - -//////////////////////////////////////////////////////////////////////////////////////////////////// - -type iTransfer interface { - start(ctx context.Context) (interface{}, error) - chunk(ctx context.Context, num uint32, buffer []byte) error - end(ctx context.Context) (interface{}, error) -} - -type UploadStreamOptions struct { - MaxBuffers int - BufferSize int -} - -type firstErr struct { - lock sync.Mutex - finalError error -} - -func (fe *firstErr) set(err error) { - fe.lock.Lock() - if fe.finalError == nil { - fe.finalError = err - } - fe.lock.Unlock() -} - -func (fe *firstErr) get() (err error) { - fe.lock.Lock() - err = fe.finalError - fe.lock.Unlock() - return -} - -func uploadStream(ctx context.Context, reader io.Reader, o UploadStreamOptions, t iTransfer) (interface{}, error) { - firstErr := firstErr{} - ctx, cancel := context.WithCancel(ctx) // New context so that any failure cancels everything - defer cancel() - wg := sync.WaitGroup{} // Used to know when all outgoing messages have finished processing - type OutgoingMsg struct { - chunkNum uint32 - buffer []byte +// NewStaticBuffer creates a TransferManager that will use a channel as a circular buffer +// that can hold "max" buffers of "size". The goroutine pool is also sized at max. This +// can be shared between calls if you wish to control maximum memory and concurrency with +// multiple concurrent calls. +func NewStaticBuffer(size, max int) (TransferManager, error) { + if size < 1 || max < 1 { + return nil, fmt.Errorf("cannot be called with size or max set to < 1") } - // Create a channel to hold the buffers usable for incoming datsa - incoming := make(chan []byte, o.MaxBuffers) - outgoing := make(chan OutgoingMsg, o.MaxBuffers) // Channel holding outgoing buffers - if result, err := t.start(ctx); err != nil { - return result, err + if size < _1MiB { + return nil, fmt.Errorf("cannot have size < 1MiB") } - numBuffers := 0 // The number of buffers & out going goroutines created so far - injectBuffer := func() { - // For each Buffer, create it and a goroutine to upload it - incoming <- make([]byte, o.BufferSize) // Add the new buffer to the incoming channel so this goroutine can from the reader into it - numBuffers++ + threadpool := make(chan func(), max) + buffers := make(chan []byte, max) + for i := 0; i < max; i++ { go func() { - for outgoingMsg := range outgoing { - // Upload the outgoing buffer - err := t.chunk(ctx, outgoingMsg.chunkNum, outgoingMsg.buffer) - wg.Done() // Indicate this buffer was sent - if nil != err { - // NOTE: finalErr could be assigned to multiple times here which is OK, - // some error will be returned. - firstErr.set(err) - cancel() - } - incoming <- outgoingMsg.buffer // The goroutine reading from the stream can reuse this buffer now + for f := range threadpool { + f() + } + }() + + buffers <- make([]byte, size) + } + return staticBuffer{ + buffers: buffers, + size: size, + threadpool: threadpool, + }, nil +} + +// Get implements TransferManager.Get(). +func (s staticBuffer) Get() []byte { + return <-s.buffers +} + +// Put implements TransferManager.Put(). +func (s staticBuffer) Put(b []byte) { + select { + case s.buffers <- b: + default: // This shouldn't happen, but just in case they call Put() with there own buffer. + } +} + +// Run implements TransferManager.Run(). +func (s staticBuffer) Run(f func()) { + s.threadpool <- f +} + +// Close implements TransferManager.Close(). +func (s staticBuffer) Close() { + close(s.threadpool) + close(s.buffers) +} + +type syncPool struct { + threadpool chan func() + pool sync.Pool +} + +// NewSyncPool creates a TransferManager that will use a sync.Pool +// that can hold a non-capped number of buffers constrained by concurrency. This +// can be shared between calls if you wish to share memory and concurrency. +func NewSyncPool(size, concurrency int) (TransferManager, error) { + if size < 1 || concurrency < 1 { + return nil, fmt.Errorf("cannot be called with size or max set to < 1") + } + + if size < _1MiB { + return nil, fmt.Errorf("cannot have size < 1MiB") + } + + threadpool := make(chan func(), concurrency) + for i := 0; i < concurrency; i++ { + go func() { + for f := range threadpool { + f() } }() } - injectBuffer() // Create our 1st buffer & outgoing goroutine - // This goroutine grabs a buffer, reads from the stream into the buffer, - // and inserts the buffer into the outgoing channel to be uploaded - for c := uint32(0); true; c++ { // Iterate once per chunk - var buffer []byte - if numBuffers < o.MaxBuffers { - select { - // We're not at max buffers, see if a previously-created buffer is available - case buffer = <-incoming: - break - default: - // No buffer available; inject a new buffer & go routine to process it - injectBuffer() - buffer = <-incoming // Grab the just-injected buffer - } - } else { - // We are at max buffers, block until we get to reuse one - buffer = <-incoming - } - n, err := io.ReadFull(reader, buffer) - if err != nil { // Less than len(buffer) bytes were read - buffer = buffer[:n] // Make slice match the # of read bytes - } - if len(buffer) > 0 { - // Buffer not empty, upload it - wg.Add(1) // We're posting a buffer to be sent - outgoing <- OutgoingMsg{chunkNum: c, buffer: buffer} - } - if err != nil { // The reader is done, no more outgoing buffers - if err == io.EOF || err == io.ErrUnexpectedEOF { - err = nil // This function does NOT return an error if io.ReadFull returns io.EOF or io.ErrUnexpectedEOF - } else { - firstErr.set(err) - } - break - } - } - // NOTE: Don't close the incoming channel because the outgoing goroutines post buffers into it when they are done - close(outgoing) // Make all the outgoing goroutines terminate when this channel is empty - wg.Wait() // Wait for all pending outgoing messages to complete - err := firstErr.get() - if err == nil { - // If no error, after all blocks uploaded, commit them to the blob & return the result - return t.end(ctx) - } - return nil, err + return &syncPool{ + threadpool: threadpool, + pool: sync.Pool{ + New: func() interface{} { + return make([]byte, size) + }, + }, + }, nil +} + +// Get implements TransferManager.Get(). +func (s *syncPool) Get() []byte { + return s.pool.Get().([]byte) +} + +// Put implements TransferManager.Put(). +func (s *syncPool) Put(b []byte) { + s.pool.Put(b) +} + +// Run implements TransferManager.Run(). +func (s *syncPool) Run(f func()) { + s.threadpool <- f +} + +// Close implements TransferManager.Close(). +func (s *syncPool) Close() { + close(s.threadpool) +} + +const _1MiB = 1024 * 1024 + +// UploadStreamToBlockBlobOptions is options for UploadStreamToBlockBlob. +type UploadStreamToBlockBlobOptions struct { + // TransferManager provides a TransferManager that controls buffer allocation/reuse and + // concurrency. This overrides BufferSize and MaxBuffers if set. + TransferManager TransferManager + transferMangerNotSet bool + // BufferSize sizes the buffer used to read data from source. If < 1 MiB, defaults to 1 MiB. + BufferSize int + // MaxBuffers defines the number of simultaneous uploads will be performed to upload the file. + MaxBuffers int + BlobHTTPHeaders BlobHTTPHeaders + Metadata Metadata + AccessConditions BlobAccessConditions + BlobAccessTier AccessTierType + BlobTagsMap BlobTagsMap + ClientProvidedKeyOptions ClientProvidedKeyOptions +} + +func (u *UploadStreamToBlockBlobOptions) defaults() error { + if u.TransferManager != nil { + return nil + } + + if u.MaxBuffers == 0 { + u.MaxBuffers = 1 + } + + if u.BufferSize < _1MiB { + u.BufferSize = _1MiB + } + + var err error + u.TransferManager, err = NewStaticBuffer(u.BufferSize, u.MaxBuffers) + if err != nil { + return fmt.Errorf("bug: default transfer manager could not be created: %s", err) + } + u.transferMangerNotSet = true + return nil +} + +// UploadStreamToBlockBlob copies the file held in io.Reader to the Blob at blockBlobURL. +// A Context deadline or cancellation will cause this to error. +func UploadStreamToBlockBlob(ctx context.Context, reader io.Reader, blockBlobURL BlockBlobURL, o UploadStreamToBlockBlobOptions) (CommonResponse, error) { + if err := o.defaults(); err != nil { + return nil, err + } + + // If we used the default manager, we need to close it. + if o.transferMangerNotSet { + defer o.TransferManager.Close() + } + + result, err := copyFromReader(ctx, reader, blockBlobURL, o) + if err != nil { + return nil, err + } + + return result, nil +} + +// UploadStreamOptions (defunct) was used internally. This will be removed or made private in a future version. +// TODO: Remove on next minor release in v0 or before v1. +type UploadStreamOptions struct { + BufferSize int + MaxBuffers int } diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/parsing_urls.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/parsing_urls.go index 067939b..93c71eb 100644 --- a/vendor/github.com/Azure/azure-storage-blob-go/azblob/parsing_urls.go +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/parsing_urls.go @@ -8,6 +8,7 @@ import ( const ( snapshot = "snapshot" + versionId = "versionid" SnapshotTimeFormat = "2006-01-02T15:04:05.0000000Z07:00" ) @@ -23,6 +24,7 @@ type BlobURLParts struct { Snapshot string // "" if not a snapshot SAS SASQueryParameters UnparsedParams string + VersionID string // "" if not versioning enabled } // IPEndpointStyleInfo is used for IP endpoint style URL when working with Azure storage emulator. @@ -85,12 +87,20 @@ func NewBlobURLParts(u url.URL) BlobURLParts { // Convert the query parameters to a case-sensitive map & trim whitespace paramsMap := u.Query() - up.Snapshot = "" // Assume no snapshot + up.Snapshot = "" // Assume no snapshot + up.VersionID = "" // Assume no versionID if snapshotStr, ok := caseInsensitiveValues(paramsMap).Get(snapshot); ok { up.Snapshot = snapshotStr[0] // If we recognized the query parameter, remove it from the map delete(paramsMap, snapshot) } + + if versionIDs, ok := caseInsensitiveValues(paramsMap).Get(versionId); ok { + up.VersionID = versionIDs[0] + // If we recognized the query parameter, remove it from the map + delete(paramsMap, versionId) // delete "versionid" from paramsMap + delete(paramsMap, "versionId") // delete "versionId" from paramsMap + } up.SAS = newSASQueryParameters(paramsMap, true) up.UnparsedParams = paramsMap.Encode() return up @@ -136,6 +146,15 @@ func (up BlobURLParts) URL() url.URL { } rawQuery += snapshot + "=" + up.Snapshot } + + // Concatenate blob version id query parameter (if it exists) + if up.VersionID != "" { + if len(rawQuery) > 0 { + rawQuery += "&" + } + rawQuery += versionId + "=" + up.VersionID + } + sas := up.SAS.Encode() if sas != "" { if len(rawQuery) > 0 { diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/request_common.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/request_common.go new file mode 100644 index 0000000..71ca0ec --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/request_common.go @@ -0,0 +1,33 @@ +package azblob + +// ClientProvidedKeyOptions contains headers which may be be specified from service version 2019-02-02 +// or higher to encrypts the data on the service-side with the given key. Use of customer-provided keys +// must be done over HTTPS. As the encryption key itself is provided in the request, a secure connection +// must be established to transfer the key. +// Note: Azure Storage does not store or manage customer provided encryption keys. Keys are securely discarded +// as soon as possible after they’ve been used to encrypt or decrypt the blob data. +// https://docs.microsoft.com/en-us/azure/storage/common/storage-service-encryption +// https://docs.microsoft.com/en-us/azure/storage/common/customer-managed-keys-overview +type ClientProvidedKeyOptions struct { + // A Base64-encoded AES-256 encryption key value. + EncryptionKey *string + + // The Base64-encoded SHA256 of the encryption key. + EncryptionKeySha256 *string + + // Specifies the algorithm to use when encrypting data using the given key. Must be AES256. + EncryptionAlgorithm EncryptionAlgorithmType + + // Specifies the name of the encryption scope to use to encrypt the data provided in the request + // https://docs.microsoft.com/en-us/azure/storage/blobs/encryption-scope-overview + // https://docs.microsoft.com/en-us/azure/key-vault/general/overview + EncryptionScope *string +} + +// NewClientProvidedKeyOptions function. +// By default the value of encryption algorithm params is "AES256" for service version 2019-02-02 or higher. +func NewClientProvidedKeyOptions(ek *string, eksha256 *string, es *string) (cpk ClientProvidedKeyOptions) { + cpk = ClientProvidedKeyOptions{} + cpk.EncryptionKey, cpk.EncryptionKeySha256, cpk.EncryptionAlgorithm, cpk.EncryptionScope = ek, eksha256, EncryptionAlgorithmAES256, es + return cpk +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/sas_service.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/sas_service.go index 4d45d3e..11b1830 100644 --- a/vendor/github.com/Azure/azure-storage-blob-go/azblob/sas_service.go +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/sas_service.go @@ -44,6 +44,14 @@ func (v BlobSASSignatureValues) NewSASQueryParameters(credential StorageAccountC return SASQueryParameters{}, err } v.Permissions = perms.String() + } else if v.Version != "" { + resource = "bv" + //Make sure the permission characters are in the correct order + perms := &BlobSASPermissions{} + if err := perms.Parse(v.Permissions); err != nil { + return SASQueryParameters{}, err + } + v.Permissions = perms.String() } else if v.BlobName == "" { // Make sure the permission characters are in the correct order perms := &ContainerSASPermissions{} @@ -155,7 +163,7 @@ func getCanonicalName(account string, containerName string, blobName string) str // The ContainerSASPermissions type simplifies creating the permissions string for an Azure Storage container SAS. // Initialize an instance of this type and then call its String method to set BlobSASSignatureValues's Permissions field. type ContainerSASPermissions struct { - Read, Add, Create, Write, Delete, List bool + Read, Add, Create, Write, Delete, DeletePreviousVersion, List, Tag bool } // String produces the SAS permissions string for an Azure Storage container. @@ -177,9 +185,15 @@ func (p ContainerSASPermissions) String() string { if p.Delete { b.WriteRune('d') } + if p.DeletePreviousVersion { + b.WriteRune('x') + } if p.List { b.WriteRune('l') } + if p.Tag { + b.WriteRune('t') + } return b.String() } @@ -198,10 +212,14 @@ func (p *ContainerSASPermissions) Parse(s string) error { p.Write = true case 'd': p.Delete = true + case 'x': + p.DeletePreviousVersion = true case 'l': p.List = true + case 't': + p.Tag = true default: - return fmt.Errorf("Invalid permission: '%v'", r) + return fmt.Errorf("invalid permission: '%v'", r) } } return nil @@ -209,7 +227,7 @@ func (p *ContainerSASPermissions) Parse(s string) error { // The BlobSASPermissions type simplifies creating the permissions string for an Azure Storage blob SAS. // Initialize an instance of this type and then call its String method to set BlobSASSignatureValues's Permissions field. -type BlobSASPermissions struct{ Read, Add, Create, Write, Delete bool } +type BlobSASPermissions struct{ Read, Add, Create, Write, Delete, DeletePreviousVersion, Tag bool } // String produces the SAS permissions string for an Azure Storage blob. // Call this method to set BlobSASSignatureValues's Permissions field. @@ -230,6 +248,12 @@ func (p BlobSASPermissions) String() string { if p.Delete { b.WriteRune('d') } + if p.DeletePreviousVersion { + b.WriteRune('x') + } + if p.Tag { + b.WriteRune('t') + } return b.String() } @@ -248,8 +272,12 @@ func (p *BlobSASPermissions) Parse(s string) error { p.Write = true case 'd': p.Delete = true + case 'x': + p.DeletePreviousVersion = true + case 't': + p.Tag = true default: - return fmt.Errorf("Invalid permission: '%v'", r) + return fmt.Errorf("invalid permission: '%v'", r) } } return nil diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/section_writer.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/section_writer.go new file mode 100644 index 0000000..6d86f6e --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/section_writer.go @@ -0,0 +1,47 @@ +package azblob + +import ( + "errors" + "io" +) + +type sectionWriter struct { + count int64 + offset int64 + position int64 + writerAt io.WriterAt +} + +func newSectionWriter(c io.WriterAt, off int64, count int64) *sectionWriter { + return §ionWriter{ + count: count, + offset: off, + writerAt: c, + } +} + +func (c *sectionWriter) Write(p []byte) (int, error) { + remaining := c.count - c.position + + if remaining <= 0 { + return 0, errors.New("End of section reached") + } + + slice := p + + if int64(len(slice)) > remaining { + slice = slice[:remaining] + } + + n, err := c.writerAt.WriteAt(slice, c.offset+c.position) + c.position += int64(n) + if err != nil { + return n, err + } + + if len(p) > n { + return n, errors.New("Not enough space for all bytes") + } + + return n, nil +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/service_codes_blob.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/service_codes_blob.go index d260f8a..292710c 100644 --- a/vendor/github.com/Azure/azure-storage-blob-go/azblob/service_codes_blob.go +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/service_codes_blob.go @@ -61,8 +61,11 @@ const ( // ServiceCodeIncrementalCopyBlobMismatch means the specified source blob is different than the copy source of the existing incremental copy blob. ServiceCodeIncrementalCopyBlobMismatch ServiceCodeType = "IncrementalCopyBlobMismatch" - // ServiceCodeIncrementalCopyOfEralierVersionSnapshotNotAllowed means the specified snapshot is earlier than the last snapshot copied into the incremental copy blob. - ServiceCodeIncrementalCopyOfEralierVersionSnapshotNotAllowed ServiceCodeType = "IncrementalCopyOfEralierVersionSnapshotNotAllowed" + // ServiceCodeFeatureEncryptionMismatch means the given customer specified encryption does not match the encryption used to encrypt the blob. + ServiceCodeFeatureEncryptionMismatch ServiceCodeType = "BlobCustomerSpecifiedEncryptionMismatch" + + // ServiceCodeIncrementalCopyOfEarlierVersionSnapshotNotAllowed means the specified snapshot is earlier than the last snapshot copied into the incremental copy blob. + ServiceCodeIncrementalCopyOfEarlierVersionSnapshotNotAllowed ServiceCodeType = "IncrementalCopyOfEarlierVersionSnapshotNotAllowed" // ServiceCodeIncrementalCopySourceMustBeSnapshot means the source for incremental copy request must be a snapshot. ServiceCodeIncrementalCopySourceMustBeSnapshot ServiceCodeType = "IncrementalCopySourceMustBeSnapshot" diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/url_append_blob.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/url_append_blob.go index 057c133..363353a 100644 --- a/vendor/github.com/Azure/azure-storage-blob-go/azblob/url_append_blob.go +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/url_append_blob.go @@ -42,25 +42,40 @@ func (ab AppendBlobURL) WithSnapshot(snapshot string) AppendBlobURL { return NewAppendBlobURL(p.URL(), ab.blobClient.Pipeline()) } +// WithVersionID creates a new AppendBlobURL object identical to the source but with the specified version id. +// Pass "" to remove the snapshot returning a URL to the base blob. +func (ab AppendBlobURL) WithVersionID(versionId string) AppendBlobURL { + p := NewBlobURLParts(ab.URL()) + p.VersionID = versionId + return NewAppendBlobURL(p.URL(), ab.blobClient.Pipeline()) +} + func (ab AppendBlobURL) GetAccountInfo(ctx context.Context) (*BlobGetAccountInfoResponse, error) { return ab.blobClient.GetAccountInfo(ctx) } // Create creates a 0-length append blob. Call AppendBlock to append data to an append blob. // For more information, see https://docs.microsoft.com/rest/api/storageservices/put-blob. -func (ab AppendBlobURL) Create(ctx context.Context, h BlobHTTPHeaders, metadata Metadata, ac BlobAccessConditions) (*AppendBlobCreateResponse, error) { +func (ab AppendBlobURL) Create(ctx context.Context, h BlobHTTPHeaders, metadata Metadata, ac BlobAccessConditions, blobTagsMap BlobTagsMap, cpk ClientProvidedKeyOptions) (*AppendBlobCreateResponse, error) { ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch := ac.ModifiedAccessConditions.pointers() + blobTagsString := SerializeBlobTagsHeader(blobTagsMap) return ab.abClient.Create(ctx, 0, nil, &h.ContentType, &h.ContentEncoding, &h.ContentLanguage, h.ContentMD5, &h.CacheControl, metadata, ac.LeaseAccessConditions.pointers(), &h.ContentDisposition, - ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, nil) + cpk.EncryptionKey, cpk.EncryptionKeySha256, cpk.EncryptionAlgorithm, // CPK-V + cpk.EncryptionScope, // CPK-N + ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, + nil, // Blob ifTags + nil, + blobTagsString, // Blob tags + ) } // AppendBlock writes a stream to a new block of data to the end of the existing append blob. // This method panics if the stream is not at position 0. // Note that the http client closes the body stream after the request is sent to the service. // For more information, see https://docs.microsoft.com/rest/api/storageservices/append-block. -func (ab AppendBlobURL) AppendBlock(ctx context.Context, body io.ReadSeeker, ac AppendBlobAccessConditions, transactionalMD5 []byte) (*AppendBlobAppendBlockResponse, error) { +func (ab AppendBlobURL) AppendBlock(ctx context.Context, body io.ReadSeeker, ac AppendBlobAccessConditions, transactionalMD5 []byte, cpk ClientProvidedKeyOptions) (*AppendBlobAppendBlockResponse, error) { ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers() ifAppendPositionEqual, ifMaxSizeLessThanOrEqual := ac.AppendPositionAccessConditions.pointers() count, err := validateSeekableStreamAt0AndGetCount(body) @@ -68,21 +83,32 @@ func (ab AppendBlobURL) AppendBlock(ctx context.Context, body io.ReadSeeker, ac return nil, err } return ab.abClient.AppendBlock(ctx, body, count, nil, - transactionalMD5, ac.LeaseAccessConditions.pointers(), + transactionalMD5, + nil, // CRC + ac.LeaseAccessConditions.pointers(), ifMaxSizeLessThanOrEqual, ifAppendPositionEqual, - ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil) + cpk.EncryptionKey, cpk.EncryptionKeySha256, cpk.EncryptionAlgorithm, // CPK + cpk.EncryptionScope, // CPK-N + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, + nil, // Blob ifTags + nil) } // AppendBlockFromURL copies a new block of data from source URL to the end of the existing append blob. // For more information, see https://docs.microsoft.com/rest/api/storageservices/append-block-from-url. -func (ab AppendBlobURL) AppendBlockFromURL(ctx context.Context, sourceURL url.URL, offset int64, count int64, destinationAccessConditions AppendBlobAccessConditions, sourceAccessConditions ModifiedAccessConditions, transactionalMD5 []byte) (*AppendBlobAppendBlockFromURLResponse, error) { +func (ab AppendBlobURL) AppendBlockFromURL(ctx context.Context, sourceURL url.URL, offset int64, count int64, destinationAccessConditions AppendBlobAccessConditions, sourceAccessConditions ModifiedAccessConditions, transactionalMD5 []byte, cpk ClientProvidedKeyOptions) (*AppendBlobAppendBlockFromURLResponse, error) { ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := destinationAccessConditions.ModifiedAccessConditions.pointers() sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatchETag, sourceIfNoneMatchETag := sourceAccessConditions.pointers() ifAppendPositionEqual, ifMaxSizeLessThanOrEqual := destinationAccessConditions.AppendPositionAccessConditions.pointers() return ab.abClient.AppendBlockFromURL(ctx, sourceURL.String(), 0, httpRange{offset: offset, count: count}.pointers(), - transactionalMD5, nil, destinationAccessConditions.LeaseAccessConditions.pointers(), + transactionalMD5, nil, nil, nil, + cpk.EncryptionKey, cpk.EncryptionKeySha256, cpk.EncryptionAlgorithm, // CPK + cpk.EncryptionScope, // CPK-N + destinationAccessConditions.LeaseAccessConditions.pointers(), ifMaxSizeLessThanOrEqual, ifAppendPositionEqual, - ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatchETag, sourceIfNoneMatchETag, nil) + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, + nil, // Blob ifTags + sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatchETag, sourceIfNoneMatchETag, nil) } type AppendBlobAccessConditions struct { diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/url_blob.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/url_blob.go index 6a9cb5e..6f453e6 100644 --- a/vendor/github.com/Azure/azure-storage-blob-go/azblob/url_blob.go +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/url_blob.go @@ -2,9 +2,9 @@ package azblob import ( "context" - "net/url" - "github.com/Azure/azure-pipeline-go/pipeline" + "net/url" + "strings" ) // A BlobURL represents a URL to an Azure Storage blob; the blob may be a block blob, append blob, or page blob. @@ -12,6 +12,11 @@ type BlobURL struct { blobClient blobClient } +type BlobTagsMap map[string]string + +var DefaultAccessTier AccessTierType = AccessTierNone +var DefaultPremiumBlobAccessTier PremiumPageBlobAccessTierType = PremiumPageBlobAccessTierNone + // NewBlobURL creates a BlobURL object using the specified URL and request policy pipeline. func NewBlobURL(url url.URL, p pipeline.Pipeline) BlobURL { blobClient := newBlobClient(url, p) @@ -46,6 +51,14 @@ func (b BlobURL) WithSnapshot(snapshot string) BlobURL { return NewBlobURL(p.URL(), b.blobClient.Pipeline()) } +// WithVersionID creates a new BlobURL object identical to the source but with the specified version id. +// Pass "" to remove the snapshot returning a URL to the base blob. +func (b BlobURL) WithVersionID(versionID string) BlobURL { + p := NewBlobURLParts(b.URL()) + p.VersionID = versionID + return NewBlobURL(p.URL(), b.blobClient.Pipeline()) +} + // ToAppendBlobURL creates an AppendBlobURL using the source's URL and pipeline. func (b BlobURL) ToAppendBlobURL() AppendBlobURL { return NewAppendBlobURL(b.URL(), b.blobClient.Pipeline()) @@ -61,19 +74,49 @@ func (b BlobURL) ToPageBlobURL() PageBlobURL { return NewPageBlobURL(b.URL(), b.blobClient.Pipeline()) } -// DownloadBlob reads a range of bytes from a blob. The response also includes the blob's properties and metadata. +func SerializeBlobTagsHeader(blobTagsMap BlobTagsMap) *string { + if blobTagsMap == nil { + return nil + } + tags := make([]string, 0) + for key, val := range blobTagsMap { + tags = append(tags, url.QueryEscape(key)+"="+url.QueryEscape(val)) + } + //tags = tags[:len(tags)-1] + blobTagsString := strings.Join(tags, "&") + return &blobTagsString +} + +func SerializeBlobTags(blobTagsMap BlobTagsMap) BlobTags { + if blobTagsMap == nil { + return BlobTags{} + } + blobTagSet := make([]BlobTag, 0, len(blobTagsMap)) + for key, val := range blobTagsMap { + blobTagSet = append(blobTagSet, BlobTag{Key: key, Value: val}) + } + return BlobTags{BlobTagSet: blobTagSet} +} + +// Download reads a range of bytes from a blob. The response also includes the blob's properties and metadata. // Passing azblob.CountToEnd (0) for count will download the blob from the offset to the end. +// Note: Snapshot/VersionId are optional parameters which are part of request URL query params. +// These parameters can be explicitly set by calling WithSnapshot(snapshot string)/WithVersionID(versionID string) +// Therefore it not required to pass these here. // For more information, see https://docs.microsoft.com/rest/api/storageservices/get-blob. -func (b BlobURL) Download(ctx context.Context, offset int64, count int64, ac BlobAccessConditions, rangeGetContentMD5 bool) (*DownloadResponse, error) { +func (b BlobURL) Download(ctx context.Context, offset int64, count int64, ac BlobAccessConditions, rangeGetContentMD5 bool, cpk ClientProvidedKeyOptions) (*DownloadResponse, error) { var xRangeGetContentMD5 *bool if rangeGetContentMD5 { xRangeGetContentMD5 = &rangeGetContentMD5 } ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers() - dr, err := b.blobClient.Download(ctx, nil, nil, + dr, err := b.blobClient.Download(ctx, nil, nil, nil, httpRange{offset: offset, count: count}.pointers(), - ac.LeaseAccessConditions.pointers(), xRangeGetContentMD5, - ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil) + ac.LeaseAccessConditions.pointers(), xRangeGetContentMD5, nil, + cpk.EncryptionKey, cpk.EncryptionKeySha256, cpk.EncryptionAlgorithm, // CPK + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, + nil, // Blob ifTags + nil) if err != nil { return nil, err } @@ -85,13 +128,33 @@ func (b BlobURL) Download(ctx context.Context, offset int64, count int64, ac Blo }, err } -// DeleteBlob marks the specified blob or snapshot for deletion. The blob is later deleted during garbage collection. -// Note that deleting a blob also deletes all its snapshots. +// Delete marks the specified blob or snapshot for deletion. The blob is later deleted during garbage collection. +// Note 1: that deleting a blob also deletes all its snapshots. +// Note 2: Snapshot/VersionId are optional parameters which are part of request URL query params. +// These parameters can be explicitly set by calling WithSnapshot(snapshot string)/WithVersionID(versionID string) +// Therefore it not required to pass these here. // For more information, see https://docs.microsoft.com/rest/api/storageservices/delete-blob. func (b BlobURL) Delete(ctx context.Context, deleteOptions DeleteSnapshotsOptionType, ac BlobAccessConditions) (*BlobDeleteResponse, error) { ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers() - return b.blobClient.Delete(ctx, nil, nil, ac.LeaseAccessConditions.pointers(), deleteOptions, - ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil) + return b.blobClient.Delete(ctx, nil, nil, nil, ac.LeaseAccessConditions.pointers(), deleteOptions, + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, + nil, // Blob ifTags + nil) +} + +// SetTags operation enables users to set tags on a blob or specific blob version, but not snapshot. +// Each call to this operation replaces all existing tags attached to the blob. +// To remove all tags from the blob, call this operation with no tags set. +// https://docs.microsoft.com/en-us/rest/api/storageservices/set-blob-tags +func (b BlobURL) SetTags(ctx context.Context, timeout *int32, versionID *string, transactionalContentMD5 []byte, transactionalContentCrc64 []byte, requestID *string, ifTags *string, blobTagsMap BlobTagsMap) (*BlobSetTagsResponse, error) { + tags := SerializeBlobTags(blobTagsMap) + return b.blobClient.SetTags(ctx, timeout, versionID, transactionalContentMD5, transactionalContentCrc64, requestID, ifTags, &tags) +} + +// GetTags operation enables users to get tags on a blob or specific blob version, or snapshot. +// https://docs.microsoft.com/en-us/rest/api/storageservices/get-blob-tags +func (b BlobURL) GetTags(ctx context.Context, timeout *int32, requestID *string, snapshot *string, versionID *string, ifTags *string) (*BlobTags, error) { + return b.blobClient.GetTags(ctx, timeout, requestID, snapshot, versionID, ifTags) } // Undelete restores the contents and metadata of a soft-deleted blob and any associated soft-deleted snapshots. @@ -100,50 +163,71 @@ func (b BlobURL) Undelete(ctx context.Context) (*BlobUndeleteResponse, error) { return b.blobClient.Undelete(ctx, nil, nil) } -// SetTier operation sets the tier on a blob. The operation is allowed on a page -// blob in a premium storage account and on a block blob in a blob storage account (locally -// redundant storage only). A premium page blob's tier determines the allowed size, IOPS, and -// bandwidth of the blob. A block blob's tier determines Hot/Cool/Archive storage type. This operation -// does not update the blob's ETag. +// SetTier operation sets the tier on a blob. The operation is allowed on a page blob in a premium storage account +// and on a block blob in a blob storage account (locally redundant storage only). +// A premium page blob's tier determines the allowed size, IOPS, and bandwidth of the blob. +// A block blob's tier determines Hot/Cool/Archive storage type. This operation does not update the blob's ETag. +// Note: VersionId is an optional parameter which is part of request URL query params. +// It can be explicitly set by calling WithVersionID(versionID string) function and hence it not required to pass it here. // For detailed information about block blob level tiering see https://docs.microsoft.com/en-us/azure/storage/blobs/storage-blob-storage-tiers. func (b BlobURL) SetTier(ctx context.Context, tier AccessTierType, lac LeaseAccessConditions) (*BlobSetTierResponse, error) { - return b.blobClient.SetTier(ctx, tier, nil, nil, lac.pointers()) + return b.blobClient.SetTier(ctx, tier, nil, + nil, // Blob versioning + nil, RehydratePriorityNone, nil, lac.pointers()) } -// GetBlobProperties returns the blob's properties. +// GetProperties returns the blob's properties. +// Note: Snapshot/VersionId are optional parameters which are part of request URL query params. +// These parameters can be explicitly set by calling WithSnapshot(snapshot string)/WithVersionID(versionID string) +// Therefore it not required to pass these here. // For more information, see https://docs.microsoft.com/rest/api/storageservices/get-blob-properties. -func (b BlobURL) GetProperties(ctx context.Context, ac BlobAccessConditions) (*BlobGetPropertiesResponse, error) { +func (b BlobURL) GetProperties(ctx context.Context, ac BlobAccessConditions, cpk ClientProvidedKeyOptions) (*BlobGetPropertiesResponse, error) { ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers() - return b.blobClient.GetProperties(ctx, nil, nil, ac.LeaseAccessConditions.pointers(), - ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil) + return b.blobClient.GetProperties(ctx, nil, + nil, // Blob versioning + nil, ac.LeaseAccessConditions.pointers(), + cpk.EncryptionKey, cpk.EncryptionKeySha256, cpk.EncryptionAlgorithm, // CPK + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, + nil, // Blob ifTags + nil) } -// SetBlobHTTPHeaders changes a blob's HTTP headers. +// SetHTTPHeaders changes a blob's HTTP headers. // For more information, see https://docs.microsoft.com/rest/api/storageservices/set-blob-properties. func (b BlobURL) SetHTTPHeaders(ctx context.Context, h BlobHTTPHeaders, ac BlobAccessConditions) (*BlobSetHTTPHeadersResponse, error) { ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers() return b.blobClient.SetHTTPHeaders(ctx, nil, &h.CacheControl, &h.ContentType, h.ContentMD5, &h.ContentEncoding, &h.ContentLanguage, ac.LeaseAccessConditions.pointers(), ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, + nil, // Blob ifTags &h.ContentDisposition, nil) } -// SetBlobMetadata changes a blob's metadata. +// SetMetadata changes a blob's metadata. // https://docs.microsoft.com/rest/api/storageservices/set-blob-metadata. -func (b BlobURL) SetMetadata(ctx context.Context, metadata Metadata, ac BlobAccessConditions) (*BlobSetMetadataResponse, error) { +func (b BlobURL) SetMetadata(ctx context.Context, metadata Metadata, ac BlobAccessConditions, cpk ClientProvidedKeyOptions) (*BlobSetMetadataResponse, error) { ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers() return b.blobClient.SetMetadata(ctx, nil, metadata, ac.LeaseAccessConditions.pointers(), - ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil) + cpk.EncryptionKey, cpk.EncryptionKeySha256, cpk.EncryptionAlgorithm, // CPK-V + cpk.EncryptionScope, // CPK-N + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, + nil, // Blob ifTags + nil) } // CreateSnapshot creates a read-only snapshot of a blob. // For more information, see https://docs.microsoft.com/rest/api/storageservices/snapshot-blob. -func (b BlobURL) CreateSnapshot(ctx context.Context, metadata Metadata, ac BlobAccessConditions) (*BlobCreateSnapshotResponse, error) { +func (b BlobURL) CreateSnapshot(ctx context.Context, metadata Metadata, ac BlobAccessConditions, cpk ClientProvidedKeyOptions) (*BlobCreateSnapshotResponse, error) { // CreateSnapshot does NOT panic if the user tries to create a snapshot using a URL that already has a snapshot query parameter // because checking this would be a performance hit for a VERY unusual path and I don't think the common case should suffer this // performance hit. ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers() - return b.blobClient.CreateSnapshot(ctx, nil, metadata, ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, ac.LeaseAccessConditions.pointers(), nil) + return b.blobClient.CreateSnapshot(ctx, nil, metadata, + cpk.EncryptionKey, cpk.EncryptionKeySha256, cpk.EncryptionAlgorithm, // CPK-V + cpk.EncryptionScope, // CPK-N + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, + nil, // Blob ifTags + ac.LeaseAccessConditions.pointers(), nil) } // AcquireLease acquires a lease on the blob for write and delete operations. The lease duration must be between @@ -152,7 +236,9 @@ func (b BlobURL) CreateSnapshot(ctx context.Context, metadata Metadata, ac BlobA func (b BlobURL) AcquireLease(ctx context.Context, proposedID string, duration int32, ac ModifiedAccessConditions) (*BlobAcquireLeaseResponse, error) { ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.pointers() return b.blobClient.AcquireLease(ctx, nil, &duration, &proposedID, - ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil) + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, + nil, // Blob ifTags + nil) } // RenewLease renews the blob's previously-acquired lease. @@ -160,7 +246,9 @@ func (b BlobURL) AcquireLease(ctx context.Context, proposedID string, duration i func (b BlobURL) RenewLease(ctx context.Context, leaseID string, ac ModifiedAccessConditions) (*BlobRenewLeaseResponse, error) { ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.pointers() return b.blobClient.RenewLease(ctx, leaseID, nil, - ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil) + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, + nil, // Blob ifTags + nil) } // ReleaseLease releases the blob's previously-acquired lease. @@ -168,7 +256,9 @@ func (b BlobURL) RenewLease(ctx context.Context, leaseID string, ac ModifiedAcce func (b BlobURL) ReleaseLease(ctx context.Context, leaseID string, ac ModifiedAccessConditions) (*BlobReleaseLeaseResponse, error) { ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.pointers() return b.blobClient.ReleaseLease(ctx, leaseID, nil, - ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil) + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, + nil, // Blob ifTags + nil) } // BreakLease breaks the blob's previously-acquired lease (if it exists). Pass the LeaseBreakDefault (-1) @@ -177,7 +267,9 @@ func (b BlobURL) ReleaseLease(ctx context.Context, leaseID string, ac ModifiedAc func (b BlobURL) BreakLease(ctx context.Context, breakPeriodInSeconds int32, ac ModifiedAccessConditions) (*BlobBreakLeaseResponse, error) { ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.pointers() return b.blobClient.BreakLease(ctx, nil, leasePeriodPointer(breakPeriodInSeconds), - ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil) + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, + nil, // Blob ifTags + nil) } // ChangeLease changes the blob's lease ID. @@ -185,7 +277,9 @@ func (b BlobURL) BreakLease(ctx context.Context, breakPeriodInSeconds int32, ac func (b BlobURL) ChangeLease(ctx context.Context, leaseID string, proposedID string, ac ModifiedAccessConditions) (*BlobChangeLeaseResponse, error) { ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.pointers() return b.blobClient.ChangeLease(ctx, leaseID, proposedID, - nil, ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil) + nil, ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, + nil, // Blob ifTags + nil) } // LeaseBreakNaturally tells ContainerURL's or BlobURL's BreakLease method to break the lease using service semantics. @@ -200,17 +294,22 @@ func leasePeriodPointer(period int32) (p *int32) { // StartCopyFromURL copies the data at the source URL to a blob. // For more information, see https://docs.microsoft.com/rest/api/storageservices/copy-blob. -func (b BlobURL) StartCopyFromURL(ctx context.Context, source url.URL, metadata Metadata, srcac ModifiedAccessConditions, dstac BlobAccessConditions) (*BlobStartCopyFromURLResponse, error) { +func (b BlobURL) StartCopyFromURL(ctx context.Context, source url.URL, metadata Metadata, srcac ModifiedAccessConditions, dstac BlobAccessConditions, tier AccessTierType, blobTagsMap BlobTagsMap) (*BlobStartCopyFromURLResponse, error) { srcIfModifiedSince, srcIfUnmodifiedSince, srcIfMatchETag, srcIfNoneMatchETag := srcac.pointers() dstIfModifiedSince, dstIfUnmodifiedSince, dstIfMatchETag, dstIfNoneMatchETag := dstac.ModifiedAccessConditions.pointers() dstLeaseID := dstac.LeaseAccessConditions.pointers() - + blobTagsString := SerializeBlobTagsHeader(blobTagsMap) return b.blobClient.StartCopyFromURL(ctx, source.String(), nil, metadata, - srcIfModifiedSince, srcIfUnmodifiedSince, + tier, RehydratePriorityNone, srcIfModifiedSince, srcIfUnmodifiedSince, srcIfMatchETag, srcIfNoneMatchETag, + nil, // source ifTags dstIfModifiedSince, dstIfUnmodifiedSince, dstIfMatchETag, dstIfNoneMatchETag, - dstLeaseID, nil) + nil, // Blob ifTags + dstLeaseID, + nil, + blobTagsString, // Blob tags + nil) } // AbortCopyFromURL stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata. diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/url_block_blob.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/url_block_blob.go index 184c07d..c47ed81 100644 --- a/vendor/github.com/Azure/azure-storage-blob-go/azblob/url_block_blob.go +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/url_block_blob.go @@ -5,9 +5,6 @@ import ( "io" "net/url" - "encoding/base64" - "encoding/binary" - "github.com/Azure/azure-pipeline-go/pipeline" ) @@ -16,7 +13,7 @@ const ( BlockBlobMaxUploadBlobBytes = 256 * 1024 * 1024 // 256MB // BlockBlobMaxStageBlockBytes indicates the maximum number of bytes that can be sent in a call to StageBlock. - BlockBlobMaxStageBlockBytes = 100 * 1024 * 1024 // 100MB + BlockBlobMaxStageBlockBytes = 4000 * 1024 * 1024 // 4000MiB // BlockBlobMaxBlocks indicates the maximum number of blocks allowed in a block blob. BlockBlobMaxBlocks = 50000 @@ -48,6 +45,14 @@ func (bb BlockBlobURL) WithSnapshot(snapshot string) BlockBlobURL { return NewBlockBlobURL(p.URL(), bb.blobClient.Pipeline()) } +// WithVersionID creates a new BlockBlobURRL object identical to the source but with the specified version id. +// Pass "" to remove the snapshot returning a URL to the base blob. +func (bb BlockBlobURL) WithVersionID(versionId string) BlockBlobURL { + p := NewBlobURLParts(bb.URL()) + p.VersionID = versionId + return NewBlockBlobURL(p.URL(), bb.blobClient.Pipeline()) +} + func (bb BlockBlobURL) GetAccountInfo(ctx context.Context) (*BlobGetAccountInfoResponse, error) { return bb.blobClient.GetAccountInfo(ctx) } @@ -59,36 +64,48 @@ func (bb BlockBlobURL) GetAccountInfo(ctx context.Context) (*BlobGetAccountInfoR // This method panics if the stream is not at position 0. // Note that the http client closes the body stream after the request is sent to the service. // For more information, see https://docs.microsoft.com/rest/api/storageservices/put-blob. -func (bb BlockBlobURL) Upload(ctx context.Context, body io.ReadSeeker, h BlobHTTPHeaders, metadata Metadata, ac BlobAccessConditions) (*BlockBlobUploadResponse, error) { +func (bb BlockBlobURL) Upload(ctx context.Context, body io.ReadSeeker, h BlobHTTPHeaders, metadata Metadata, ac BlobAccessConditions, tier AccessTierType, blobTagsMap BlobTagsMap, cpk ClientProvidedKeyOptions) (*BlockBlobUploadResponse, error) { ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers() count, err := validateSeekableStreamAt0AndGetCount(body) + blobTagsString := SerializeBlobTagsHeader(blobTagsMap) if err != nil { return nil, err } - return bb.bbClient.Upload(ctx, body, count, nil, + return bb.bbClient.Upload(ctx, body, count, nil, nil, &h.ContentType, &h.ContentEncoding, &h.ContentLanguage, h.ContentMD5, - &h.CacheControl, metadata, ac.LeaseAccessConditions.pointers(), - &h.ContentDisposition, ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, - nil) + &h.CacheControl, metadata, ac.LeaseAccessConditions.pointers(), &h.ContentDisposition, + cpk.EncryptionKey, cpk.EncryptionKeySha256, cpk.EncryptionAlgorithm, // CPK-V + cpk.EncryptionScope, // CPK-N + tier, ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, + nil, // Blob ifTags + nil, + blobTagsString, // Blob tags + ) } // StageBlock uploads the specified block to the block blob's "staging area" to be later committed by a call to CommitBlockList. // Note that the http client closes the body stream after the request is sent to the service. // For more information, see https://docs.microsoft.com/rest/api/storageservices/put-block. -func (bb BlockBlobURL) StageBlock(ctx context.Context, base64BlockID string, body io.ReadSeeker, ac LeaseAccessConditions, transactionalMD5 []byte) (*BlockBlobStageBlockResponse, error) { +func (bb BlockBlobURL) StageBlock(ctx context.Context, base64BlockID string, body io.ReadSeeker, ac LeaseAccessConditions, transactionalMD5 []byte, cpk ClientProvidedKeyOptions) (*BlockBlobStageBlockResponse, error) { count, err := validateSeekableStreamAt0AndGetCount(body) if err != nil { return nil, err } - return bb.bbClient.StageBlock(ctx, base64BlockID, count, body, transactionalMD5, nil, ac.pointers(), nil) + return bb.bbClient.StageBlock(ctx, base64BlockID, count, body, transactionalMD5, nil, nil, ac.pointers(), + cpk.EncryptionKey, cpk.EncryptionKeySha256, cpk.EncryptionAlgorithm, // CPK-V + cpk.EncryptionScope, // CPK-N + nil) } // StageBlockFromURL copies the specified block from a source URL to the block blob's "staging area" to be later committed by a call to CommitBlockList. // If count is CountToEnd (0), then data is read from specified offset to the end. // For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/put-block-from-url. -func (bb BlockBlobURL) StageBlockFromURL(ctx context.Context, base64BlockID string, sourceURL url.URL, offset int64, count int64, destinationAccessConditions LeaseAccessConditions, sourceAccessConditions ModifiedAccessConditions) (*BlockBlobStageBlockFromURLResponse, error) { +func (bb BlockBlobURL) StageBlockFromURL(ctx context.Context, base64BlockID string, sourceURL url.URL, offset int64, count int64, destinationAccessConditions LeaseAccessConditions, sourceAccessConditions ModifiedAccessConditions, cpk ClientProvidedKeyOptions) (*BlockBlobStageBlockFromURLResponse, error) { sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatchETag, sourceIfNoneMatchETag := sourceAccessConditions.pointers() - return bb.bbClient.StageBlockFromURL(ctx, base64BlockID, 0, sourceURL.String(), httpRange{offset: offset, count: count}.pointers(), nil, nil, destinationAccessConditions.pointers(), sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatchETag, sourceIfNoneMatchETag, nil) + return bb.bbClient.StageBlockFromURL(ctx, base64BlockID, 0, sourceURL.String(), httpRange{offset: offset, count: count}.pointers(), nil, nil, nil, + cpk.EncryptionKey, cpk.EncryptionKeySha256, cpk.EncryptionAlgorithm, // CPK + cpk.EncryptionScope, // CPK-N + destinationAccessConditions.pointers(), sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatchETag, sourceIfNoneMatchETag, nil) } // CommitBlockList writes a blob by specifying the list of block IDs that make up the blob. @@ -97,70 +114,46 @@ func (bb BlockBlobURL) StageBlockFromURL(ctx context.Context, base64BlockID stri // by uploading only those blocks that have changed, then committing the new and existing // blocks together. Any blocks not specified in the block list and permanently deleted. // For more information, see https://docs.microsoft.com/rest/api/storageservices/put-block-list. -func (bb BlockBlobURL) CommitBlockList(ctx context.Context, base64BlockIDs []string, h BlobHTTPHeaders, - metadata Metadata, ac BlobAccessConditions) (*BlockBlobCommitBlockListResponse, error) { +func (bb BlockBlobURL) CommitBlockList(ctx context.Context, base64BlockIDs []string, h BlobHTTPHeaders, metadata Metadata, ac BlobAccessConditions, tier AccessTierType, blobTagsMap BlobTagsMap, cpk ClientProvidedKeyOptions) (*BlockBlobCommitBlockListResponse, error) { ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers() + blobTagsString := SerializeBlobTagsHeader(blobTagsMap) return bb.bbClient.CommitBlockList(ctx, BlockLookupList{Latest: base64BlockIDs}, nil, - &h.CacheControl, &h.ContentType, &h.ContentEncoding, &h.ContentLanguage, h.ContentMD5, + &h.CacheControl, &h.ContentType, &h.ContentEncoding, &h.ContentLanguage, h.ContentMD5, nil, nil, metadata, ac.LeaseAccessConditions.pointers(), &h.ContentDisposition, - ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil) + cpk.EncryptionKey, cpk.EncryptionKeySha256, cpk.EncryptionAlgorithm, // CPK + cpk.EncryptionScope, // CPK-N + tier, + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, + nil, // Blob ifTags + nil, + blobTagsString, // Blob tags + ) } // GetBlockList returns the list of blocks that have been uploaded as part of a block blob using the specified block list filter. // For more information, see https://docs.microsoft.com/rest/api/storageservices/get-block-list. func (bb BlockBlobURL) GetBlockList(ctx context.Context, listType BlockListType, ac LeaseAccessConditions) (*BlockList, error) { - return bb.bbClient.GetBlockList(ctx, listType, nil, nil, ac.pointers(), nil) + return bb.bbClient.GetBlockList(ctx, listType, nil, nil, ac.pointers(), + nil, // Blob ifTags + nil) } -////////////////////////////////////////////////////////////////////////////////////////////////////////////// +// CopyFromURL synchronously copies the data at the source URL to a block blob, with sizes up to 256 MB. +// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/copy-blob-from-url. +func (bb BlockBlobURL) CopyFromURL(ctx context.Context, source url.URL, metadata Metadata, srcac ModifiedAccessConditions, dstac BlobAccessConditions, srcContentMD5 []byte, tier AccessTierType, blobTagsMap BlobTagsMap) (*BlobCopyFromURLResponse, error) { -type BlockID [64]byte - -func (blockID BlockID) ToBase64() string { - return base64.StdEncoding.EncodeToString(blockID[:]) -} - -func (blockID *BlockID) FromBase64(s string) error { - *blockID = BlockID{} // Zero out the block ID - _, err := base64.StdEncoding.Decode(blockID[:], ([]byte)(s)) - return err -} - -////////////////////////////////////////////////////////////////////////////////////////////////////////////// - -type uuidBlockID BlockID - -func (ubi uuidBlockID) UUID() uuid { - u := uuid{} - copy(u[:], ubi[:len(u)]) - return u -} - -func (ubi uuidBlockID) Number() uint32 { - return binary.BigEndian.Uint32(ubi[len(uuid{}):]) -} - -func newUuidBlockID(u uuid) uuidBlockID { - ubi := uuidBlockID{} // Create a new uuidBlockID - copy(ubi[:len(u)], u[:]) // Copy the specified UUID into it - // Block number defaults to 0 - return ubi -} - -func (ubi *uuidBlockID) SetUUID(u uuid) *uuidBlockID { - copy(ubi[:len(u)], u[:]) - return ubi -} - -func (ubi uuidBlockID) WithBlockNumber(blockNumber uint32) uuidBlockID { - binary.BigEndian.PutUint32(ubi[len(uuid{}):], blockNumber) // Put block number after UUID - return ubi // Return the passed-in copy -} - -func (ubi uuidBlockID) ToBase64() string { - return BlockID(ubi).ToBase64() -} - -func (ubi *uuidBlockID) FromBase64(s string) error { - return (*BlockID)(ubi).FromBase64(s) + srcIfModifiedSince, srcIfUnmodifiedSince, srcIfMatchETag, srcIfNoneMatchETag := srcac.pointers() + dstIfModifiedSince, dstIfUnmodifiedSince, dstIfMatchETag, dstIfNoneMatchETag := dstac.ModifiedAccessConditions.pointers() + dstLeaseID := dstac.LeaseAccessConditions.pointers() + blobTagsString := SerializeBlobTagsHeader(blobTagsMap) + return bb.blobClient.CopyFromURL(ctx, source.String(), nil, metadata, tier, + srcIfModifiedSince, srcIfUnmodifiedSince, + srcIfMatchETag, srcIfNoneMatchETag, + dstIfModifiedSince, dstIfUnmodifiedSince, + dstIfMatchETag, dstIfNoneMatchETag, + nil, // Blob ifTags + dstLeaseID, nil, srcContentMD5, + blobTagsString, // Blob tags + nil, // seal Blob + ) } diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/url_container.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/url_container.go index 801239d..39fb5a1 100644 --- a/vendor/github.com/Azure/azure-storage-blob-go/azblob/url_container.go +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/url_container.go @@ -84,7 +84,9 @@ func (c ContainerURL) NewPageBlobURL(blobName string) PageBlobURL { // Create creates a new container within a storage account. If a container with the same name already exists, the operation fails. // For more information, see https://docs.microsoft.com/rest/api/storageservices/create-container. func (c ContainerURL) Create(ctx context.Context, metadata Metadata, publicAccessType PublicAccessType) (*ContainerCreateResponse, error) { - return c.client.Create(ctx, nil, metadata, publicAccessType, nil) + return c.client.Create(ctx, nil, metadata, publicAccessType, nil, + nil, nil, // container encryption + ) } // Delete marks the specified container for deletion. The container and any blobs contained within it are later deleted during garbage collection. @@ -273,7 +275,7 @@ func (o *ListBlobsSegmentOptions) pointers() (prefix *string, include []ListBlob // BlobListingDetails indicates what additional information the service should return with each blob. type BlobListingDetails struct { - Copy, Metadata, Snapshots, UncommittedBlobs, Deleted bool + Copy, Metadata, Snapshots, UncommittedBlobs, Deleted, Tags, Versions bool } // string produces the Include query parameter's value. @@ -295,5 +297,11 @@ func (d *BlobListingDetails) slice() []ListBlobsIncludeItemType { if d.UncommittedBlobs { items = append(items, ListBlobsIncludeItemUncommittedblobs) } + if d.Tags { + items = append(items, ListBlobsIncludeItemTags) + } + if d.Versions { + items = append(items, ListBlobsIncludeItemVersions) + } return items } diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/url_page_blob.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/url_page_blob.go index b74e1b4..d02eff4 100644 --- a/vendor/github.com/Azure/azure-storage-blob-go/azblob/url_page_blob.go +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/url_page_blob.go @@ -14,7 +14,7 @@ const ( // PageBlobPageBytes indicates the number of bytes in a page (512). PageBlobPageBytes = 512 - // PageBlobMaxPutPagesBytes indicates the maximum number of bytes that can be sent in a call to PutPage. + // PageBlobMaxUploadPagesBytes indicates the maximum number of bytes that can be sent in a call to PutPage. PageBlobMaxUploadPagesBytes = 4 * 1024 * 1024 // 4MB ) @@ -44,36 +44,55 @@ func (pb PageBlobURL) WithSnapshot(snapshot string) PageBlobURL { return NewPageBlobURL(p.URL(), pb.blobClient.Pipeline()) } +// WithVersionID creates a new PageBlobURL object identical to the source but with the specified snapshot timestamp. +// Pass "" to remove the snapshot returning a URL to the base blob. +func (pb PageBlobURL) WithVersionID(versionId string) PageBlobURL { + p := NewBlobURLParts(pb.URL()) + p.VersionID = versionId + return NewPageBlobURL(p.URL(), pb.blobClient.Pipeline()) +} + func (pb PageBlobURL) GetAccountInfo(ctx context.Context) (*BlobGetAccountInfoResponse, error) { return pb.blobClient.GetAccountInfo(ctx) } -// Create creates a page blob of the specified length. Call PutPage to upload data data to a page blob. +// Create creates a page blob of the specified length. Call PutPage to upload data to a page blob. // For more information, see https://docs.microsoft.com/rest/api/storageservices/put-blob. -func (pb PageBlobURL) Create(ctx context.Context, size int64, sequenceNumber int64, h BlobHTTPHeaders, metadata Metadata, ac BlobAccessConditions) (*PageBlobCreateResponse, error) { +func (pb PageBlobURL) Create(ctx context.Context, size int64, sequenceNumber int64, h BlobHTTPHeaders, metadata Metadata, ac BlobAccessConditions, tier PremiumPageBlobAccessTierType, blobTagsMap BlobTagsMap, cpk ClientProvidedKeyOptions) (*PageBlobCreateResponse, error) { ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers() - return pb.pbClient.Create(ctx, 0, size, nil, + blobTagsString := SerializeBlobTagsHeader(blobTagsMap) + return pb.pbClient.Create(ctx, 0, size, nil, tier, &h.ContentType, &h.ContentEncoding, &h.ContentLanguage, h.ContentMD5, &h.CacheControl, - metadata, ac.LeaseAccessConditions.pointers(), - &h.ContentDisposition, ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, &sequenceNumber, nil) + metadata, ac.LeaseAccessConditions.pointers(), &h.ContentDisposition, + cpk.EncryptionKey, cpk.EncryptionKeySha256, cpk.EncryptionAlgorithm, // CPK-V + cpk.EncryptionScope, // CPK-N + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, + nil, // Blob tags + &sequenceNumber, nil, + blobTagsString, // Blob tags + ) } // UploadPages writes 1 or more pages to the page blob. The start offset and the stream size must be a multiple of 512 bytes. // This method panics if the stream is not at position 0. // Note that the http client closes the body stream after the request is sent to the service. // For more information, see https://docs.microsoft.com/rest/api/storageservices/put-page. -func (pb PageBlobURL) UploadPages(ctx context.Context, offset int64, body io.ReadSeeker, ac PageBlobAccessConditions, transactionalMD5 []byte) (*PageBlobUploadPagesResponse, error) { +func (pb PageBlobURL) UploadPages(ctx context.Context, offset int64, body io.ReadSeeker, ac PageBlobAccessConditions, transactionalMD5 []byte, cpk ClientProvidedKeyOptions) (*PageBlobUploadPagesResponse, error) { count, err := validateSeekableStreamAt0AndGetCount(body) if err != nil { return nil, err } ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers() ifSequenceNumberLessThanOrEqual, ifSequenceNumberLessThan, ifSequenceNumberEqual := ac.SequenceNumberAccessConditions.pointers() - return pb.pbClient.UploadPages(ctx, body, count, transactionalMD5, nil, + return pb.pbClient.UploadPages(ctx, body, count, transactionalMD5, nil, nil, PageRange{Start: offset, End: offset + count - 1}.pointers(), ac.LeaseAccessConditions.pointers(), + cpk.EncryptionKey, cpk.EncryptionKeySha256, cpk.EncryptionAlgorithm, // CPK + cpk.EncryptionScope, // CPK-N ifSequenceNumberLessThanOrEqual, ifSequenceNumberLessThan, ifSequenceNumberEqual, - ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil) + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, + nil, // Blob ifTags + nil) } // UploadPagesFromURL copies 1 or more pages from a source URL to the page blob. @@ -81,24 +100,31 @@ func (pb PageBlobURL) UploadPages(ctx context.Context, offset int64, body io.Rea // The destOffset specifies the start offset of data in page blob will be written to. // The count must be a multiple of 512 bytes. // For more information, see https://docs.microsoft.com/rest/api/storageservices/put-page-from-url. -func (pb PageBlobURL) UploadPagesFromURL(ctx context.Context, sourceURL url.URL, sourceOffset int64, destOffset int64, count int64, transactionalMD5 []byte, destinationAccessConditions PageBlobAccessConditions, sourceAccessConditions ModifiedAccessConditions) (*PageBlobUploadPagesFromURLResponse, error) { +func (pb PageBlobURL) UploadPagesFromURL(ctx context.Context, sourceURL url.URL, sourceOffset int64, destOffset int64, count int64, transactionalMD5 []byte, destinationAccessConditions PageBlobAccessConditions, sourceAccessConditions ModifiedAccessConditions, cpk ClientProvidedKeyOptions) (*PageBlobUploadPagesFromURLResponse, error) { ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := destinationAccessConditions.ModifiedAccessConditions.pointers() sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatchETag, sourceIfNoneMatchETag := sourceAccessConditions.pointers() ifSequenceNumberLessThanOrEqual, ifSequenceNumberLessThan, ifSequenceNumberEqual := destinationAccessConditions.SequenceNumberAccessConditions.pointers() return pb.pbClient.UploadPagesFromURL(ctx, sourceURL.String(), *PageRange{Start: sourceOffset, End: sourceOffset + count - 1}.pointers(), 0, - *PageRange{Start: destOffset, End: destOffset + count - 1}.pointers(), transactionalMD5, nil, destinationAccessConditions.LeaseAccessConditions.pointers(), + *PageRange{Start: destOffset, End: destOffset + count - 1}.pointers(), transactionalMD5, nil, nil, + cpk.EncryptionKey, cpk.EncryptionKeySha256, cpk.EncryptionAlgorithm, // CPK-V + cpk.EncryptionScope, // CPK-N + destinationAccessConditions.LeaseAccessConditions.pointers(), ifSequenceNumberLessThanOrEqual, ifSequenceNumberLessThan, ifSequenceNumberEqual, - ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatchETag, sourceIfNoneMatchETag, nil) + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, + nil, // Blob ifTags + sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatchETag, sourceIfNoneMatchETag, nil) } // ClearPages frees the specified pages from the page blob. // For more information, see https://docs.microsoft.com/rest/api/storageservices/put-page. -func (pb PageBlobURL) ClearPages(ctx context.Context, offset int64, count int64, ac PageBlobAccessConditions) (*PageBlobClearPagesResponse, error) { +func (pb PageBlobURL) ClearPages(ctx context.Context, offset int64, count int64, ac PageBlobAccessConditions, cpk ClientProvidedKeyOptions) (*PageBlobClearPagesResponse, error) { ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers() ifSequenceNumberLessThanOrEqual, ifSequenceNumberLessThan, ifSequenceNumberEqual := ac.SequenceNumberAccessConditions.pointers() return pb.pbClient.ClearPages(ctx, 0, nil, PageRange{Start: offset, End: offset + count - 1}.pointers(), ac.LeaseAccessConditions.pointers(), + cpk.EncryptionKey, cpk.EncryptionKeySha256, cpk.EncryptionAlgorithm, // CPK + cpk.EncryptionScope, // CPK-N ifSequenceNumberLessThanOrEqual, ifSequenceNumberLessThan, ifSequenceNumberEqual, ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil) } @@ -110,7 +136,23 @@ func (pb PageBlobURL) GetPageRanges(ctx context.Context, offset int64, count int return pb.pbClient.GetPageRanges(ctx, nil, nil, httpRange{offset: offset, count: count}.pointers(), ac.LeaseAccessConditions.pointers(), - ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil) + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, + nil, // Blob ifTags + nil) +} + +// GetManagedDiskPageRangesDiff gets the collection of page ranges that differ between a specified snapshot and this page blob representing managed disk. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-page-ranges. +func (pb PageBlobURL) GetManagedDiskPageRangesDiff(ctx context.Context, offset int64, count int64, prevSnapshot *string, prevSnapshotURL *string, ac BlobAccessConditions) (*PageList, error) { + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers() + + return pb.pbClient.GetPageRangesDiff(ctx, nil, nil, prevSnapshot, + prevSnapshotURL, // Get managed disk diff + httpRange{offset: offset, count: count}.pointers(), + ac.LeaseAccessConditions.pointers(), + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, + nil, // Blob ifTags + nil) } // GetPageRangesDiff gets the collection of page ranges that differ between a specified snapshot and this page blob. @@ -118,21 +160,25 @@ func (pb PageBlobURL) GetPageRanges(ctx context.Context, offset int64, count int func (pb PageBlobURL) GetPageRangesDiff(ctx context.Context, offset int64, count int64, prevSnapshot string, ac BlobAccessConditions) (*PageList, error) { ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers() return pb.pbClient.GetPageRangesDiff(ctx, nil, nil, &prevSnapshot, + nil, // Get managed disk diff httpRange{offset: offset, count: count}.pointers(), ac.LeaseAccessConditions.pointers(), ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, + nil, // Blob ifTags nil) } // Resize resizes the page blob to the specified size (which must be a multiple of 512). // For more information, see https://docs.microsoft.com/rest/api/storageservices/set-blob-properties. -func (pb PageBlobURL) Resize(ctx context.Context, size int64, ac BlobAccessConditions) (*PageBlobResizeResponse, error) { +func (pb PageBlobURL) Resize(ctx context.Context, size int64, ac BlobAccessConditions, cpk ClientProvidedKeyOptions) (*PageBlobResizeResponse, error) { ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers() return pb.pbClient.Resize(ctx, size, nil, ac.LeaseAccessConditions.pointers(), + cpk.EncryptionKey, cpk.EncryptionKeySha256, cpk.EncryptionAlgorithm, // CPK + cpk.EncryptionScope, // CPK-N ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil) } -// SetSequenceNumber sets the page blob's sequence number. +// UpdateSequenceNumber sets the page blob's sequence number. func (pb PageBlobURL) UpdateSequenceNumber(ctx context.Context, action SequenceNumberActionType, sequenceNumber int64, ac BlobAccessConditions) (*PageBlobUpdateSequenceNumberResponse, error) { sn := &sequenceNumber @@ -145,7 +191,7 @@ func (pb PageBlobURL) UpdateSequenceNumber(ctx context.Context, action SequenceN sn, nil) } -// StartIncrementalCopy begins an operation to start an incremental copy from one page blob's snapshot to this page blob. +// StartCopyIncremental begins an operation to start an incremental copy from one page blob's snapshot to this page blob. // The snapshot is copied such that only the differential changes between the previously copied snapshot are transferred to the destination. // The copied snapshots are complete copies of the original snapshot and can be read or copied from as usual. // For more information, see https://docs.microsoft.com/rest/api/storageservices/incremental-copy-blob and diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/url_service.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/url_service.go index cd62618..2d75678 100644 --- a/vendor/github.com/Azure/azure-storage-blob-go/azblob/url_service.go +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/url_service.go @@ -4,6 +4,7 @@ import ( "context" "net/url" "strings" + "time" "github.com/Azure/azure-pipeline-go/pipeline" ) @@ -38,6 +39,15 @@ func (s ServiceURL) GetUserDelegationCredential(ctx context.Context, info KeyInf return NewUserDelegationCredential(strings.Split(s.client.url.Host, ".")[0], *udk), nil } +//TODO this was supposed to be generated +//NewKeyInfo creates a new KeyInfo struct with the correct time formatting & conversion +func NewKeyInfo(Start, Expiry time.Time) KeyInfo { + return KeyInfo{ + Start: Start.UTC().Format(SASTimeFormat), + Expiry: Expiry.UTC().Format(SASTimeFormat), + } +} + func (s ServiceURL) GetAccountInfo(ctx context.Context) (*ServiceGetAccountInfoResponse, error) { return s.client.GetAccountInfo(ctx) } @@ -106,14 +116,14 @@ type ListContainersSegmentOptions struct { // TODO: update swagger to generate this type? } -func (o *ListContainersSegmentOptions) pointers() (prefix *string, include ListContainersIncludeType, maxResults *int32) { +func (o *ListContainersSegmentOptions) pointers() (prefix *string, include []ListContainersIncludeType, maxResults *int32) { if o.Prefix != "" { prefix = &o.Prefix } if o.MaxResults != 0 { maxResults = &o.MaxResults } - include = ListContainersIncludeType(o.Detail.string()) + include = []ListContainersIncludeType{ListContainersIncludeType(o.Detail.string())} return } @@ -121,15 +131,21 @@ func (o *ListContainersSegmentOptions) pointers() (prefix *string, include ListC type ListContainersDetail struct { // Tells the service whether to return metadata for each container. Metadata bool + + // Show containers that have been deleted when the soft-delete feature is enabled. + // Deleted bool } // string produces the Include query parameter's value. func (d *ListContainersDetail) string() string { - items := make([]string, 0, 1) + items := make([]string, 0, 2) // NOTE: Multiple strings MUST be appended in alphabetic order or signing the string for authentication fails! if d.Metadata { items = append(items, string(ListContainersIncludeMetadata)) } + // if d.Deleted { + // items = append(items, string(ListContainersIncludeDeleted)) + // } if len(items) > 0 { return strings.Join(items, ",") } @@ -147,3 +163,12 @@ func (bsu ServiceURL) SetProperties(ctx context.Context, properties StorageServi func (bsu ServiceURL) GetStatistics(ctx context.Context) (*StorageServiceStats, error) { return bsu.client.GetStatistics(ctx, nil, nil) } + +// FindBlobsByTags operation finds all blobs in the storage account whose tags match a given search expression. +// Filter blobs searches across all containers within a storage account but can be scoped within the expression to a single container. +// https://docs.microsoft.com/en-us/rest/api/storageservices/find-blobs-by-tags +// eg. "dog='germanshepherd' and penguin='emperorpenguin'" +// To specify a container, eg. "@container=’containerName’ and Name = ‘C’" +func (bsu ServiceURL) FindBlobsByTags(ctx context.Context, timeout *int32, requestID *string, where *string, marker Marker, maxResults *int32) (*FilterBlobSegment, error) { + return bsu.client.FilterBlobs(ctx, timeout, requestID, where, marker.Val, maxResults) +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/version.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/version.go index 7095516..287e1e4 100644 --- a/vendor/github.com/Azure/azure-storage-blob-go/azblob/version.go +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/version.go @@ -1,3 +1,3 @@ package azblob -const serviceLibVersion = "0.9" +const serviceLibVersion = "0.13" diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_mmf_unix.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_mmf_unix.go deleted file mode 100644 index 3e8c7cb..0000000 --- a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_mmf_unix.go +++ /dev/null @@ -1,27 +0,0 @@ -// +build linux darwin freebsd openbsd netbsd dragonfly - -package azblob - -import ( - "os" - "syscall" -) - -type mmf []byte - -func newMMF(file *os.File, writable bool, offset int64, length int) (mmf, error) { - prot, flags := syscall.PROT_READ, syscall.MAP_SHARED // Assume read-only - if writable { - prot, flags = syscall.PROT_READ|syscall.PROT_WRITE, syscall.MAP_SHARED - } - addr, err := syscall.Mmap(int(file.Fd()), offset, length, prot, flags) - return mmf(addr), err -} - -func (m *mmf) unmap() { - err := syscall.Munmap(*m) - *m = nil - if err != nil { - panic("if we are unable to unmap the memory-mapped file, there is serious concern for memory corruption") - } -} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_mmf_windows.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_mmf_windows.go deleted file mode 100644 index 2743644..0000000 --- a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_mmf_windows.go +++ /dev/null @@ -1,38 +0,0 @@ -package azblob - -import ( - "os" - "reflect" - "syscall" - "unsafe" -) - -type mmf []byte - -func newMMF(file *os.File, writable bool, offset int64, length int) (mmf, error) { - prot, access := uint32(syscall.PAGE_READONLY), uint32(syscall.FILE_MAP_READ) // Assume read-only - if writable { - prot, access = uint32(syscall.PAGE_READWRITE), uint32(syscall.FILE_MAP_WRITE) - } - hMMF, errno := syscall.CreateFileMapping(syscall.Handle(file.Fd()), nil, prot, uint32(int64(length)>>32), uint32(int64(length)&0xffffffff), nil) - if hMMF == 0 { - return nil, os.NewSyscallError("CreateFileMapping", errno) - } - defer syscall.CloseHandle(hMMF) - addr, errno := syscall.MapViewOfFile(hMMF, access, uint32(offset>>32), uint32(offset&0xffffffff), uintptr(length)) - m := mmf{} - h := (*reflect.SliceHeader)(unsafe.Pointer(&m)) - h.Data = addr - h.Len = length - h.Cap = h.Len - return m, nil -} - -func (m *mmf) unmap() { - addr := uintptr(unsafe.Pointer(&(([]byte)(*m)[0]))) - *m = mmf{} - err := syscall.UnmapViewOfFile(addr) - if err != nil { - panic("if we are unable to unmap the memory-mapped file, there is serious concern for memory corruption") - } -} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_pipeline.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_pipeline.go index 7c249a2..ba99255 100644 --- a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_pipeline.go +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_pipeline.go @@ -41,6 +41,5 @@ func NewPipeline(c Credential, o PipelineOptions) pipeline.Pipeline { NewRequestLogPolicyFactory(o.RequestLog), pipeline.MethodFactoryMarker()) // indicates at what stage in the pipeline the method factory is invoked - return pipeline.NewPipeline(f, pipeline.Options{HTTPSender: o.HTTPSender, Log: o.Log}) } diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_policy_request_log.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_policy_request_log.go index 0a362ea..29a99a8 100644 --- a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_policy_request_log.go +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_policy_request_log.go @@ -62,15 +62,21 @@ func NewRequestLogPolicyFactory(o RequestLogOptions) pipeline.Factory { logLevel, forceLog = pipeline.LogWarning, true } - if err == nil { // We got a response from the service - sc := response.Response().StatusCode - if ((sc >= 400 && sc <= 499) && sc != http.StatusNotFound && sc != http.StatusConflict && sc != http.StatusPreconditionFailed && sc != http.StatusRequestedRangeNotSatisfiable) || (sc >= 500 && sc <= 599) { - logLevel, forceLog = pipeline.LogError, true // Promote to Error any 4xx (except those listed is an error) or any 5xx - } else { - // For other status codes, we leave the level as is. + var sc int + if err == nil { // We got a valid response from the service + sc = response.Response().StatusCode + } else { // We got an error, so we should inspect if we got a response + if se, ok := err.(StorageError); ok { + if r := se.Response(); r != nil { + sc = r.StatusCode + } } - } else { // This error did not get an HTTP response from the service; upgrade the severity to Error - logLevel, forceLog = pipeline.LogError, true + } + + if sc == 0 || ((sc >= 400 && sc <= 499) && sc != http.StatusNotFound && sc != http.StatusConflict && sc != http.StatusPreconditionFailed && sc != http.StatusRequestedRangeNotSatisfiable) || (sc >= 500 && sc <= 599) { + logLevel, forceLog = pipeline.LogError, true // Promote to Error any 4xx (except those listed is an error) or any 5xx + } else { + // For other status codes, we leave the level as is. } if shouldLog := po.ShouldLog(logLevel); forceLog || shouldLog { diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_policy_retry.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_policy_retry.go index 00531fe..0894fcc 100644 --- a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_policy_retry.go +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_policy_retry.go @@ -240,6 +240,8 @@ func NewRetryPolicyFactory(o RetryOptions) pipeline.Factory { } else { action = "NoRetry: net.Error and in the non-retriable list" } + } else if err == io.ErrUnexpectedEOF { + action = "Retry: unexpected EOF" } else { action = "NoRetry: unrecognized error" } diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_policy_unique_request_id.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_policy_unique_request_id.go index a75c7d1..db8cee7 100644 --- a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_policy_unique_request_id.go +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_policy_unique_request_id.go @@ -2,7 +2,7 @@ package azblob import ( "context" - + "errors" "github.com/Azure/azure-pipeline-go/pipeline" ) @@ -14,9 +14,22 @@ func NewUniqueRequestIDPolicyFactory() pipeline.Factory { return func(ctx context.Context, request pipeline.Request) (pipeline.Response, error) { id := request.Header.Get(xMsClientRequestID) if id == "" { // Add a unique request ID if the caller didn't specify one already - request.Header.Set(xMsClientRequestID, newUUID().String()) + id = newUUID().String() + request.Header.Set(xMsClientRequestID, id) } - return next.Do(ctx, request) + + resp, err := next.Do(ctx, request) + + if err == nil && resp != nil { + val := resp.Response().Header.Values(xMsClientRequestID) + if len(val) > 0 { + if val[0] != id { + err = errors.New("client Request ID from request and response does not match") + } + } + } + + return resp, err } }) } diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_retry_reader.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_retry_reader.go index 3247aca..ad38f59 100644 --- a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_retry_reader.go +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_retry_reader.go @@ -41,6 +41,7 @@ type RetryReaderOptions struct { MaxRetryRequests int doInjectError bool doInjectErrorRound int + injectedError error // NotifyFailedRead is called, if non-nil, after any failure to read. Expected usage is diagnostic logging. NotifyFailedRead FailedReadNotifier @@ -55,6 +56,8 @@ type RetryReaderOptions struct { // from the same "thread" (goroutine) as Read. Concurrent Close calls from other goroutines may instead produce network errors // which will be retried. TreatEarlyCloseAsError bool + + ClientProvidedKeyOptions ClientProvidedKeyOptions } // retryReader implements io.ReaderCloser methods. @@ -117,7 +120,11 @@ func (s *retryReader) Read(p []byte) (n int, err error) { // Injection mechanism for testing. if s.o.doInjectError && try == s.o.doInjectErrorRound { - err = &net.DNSError{IsTemporary: true} + if s.o.injectedError != nil { + err = s.o.injectedError + } else { + err = &net.DNSError{IsTemporary: true} + } } // We successfully read data or end EOF. @@ -134,7 +141,8 @@ func (s *retryReader) Read(p []byte) (n int, err error) { // Check the retry count and error code, and decide whether to retry. retriesExhausted := try >= s.o.MaxRetryRequests _, isNetError := err.(net.Error) - willRetry := (isNetError || s.wasRetryableEarlyClose(err)) && !retriesExhausted + isUnexpectedEOF := err == io.ErrUnexpectedEOF + willRetry := (isNetError || isUnexpectedEOF || s.wasRetryableEarlyClose(err)) && !retriesExhausted // Notify, for logging purposes, of any failures if s.o.NotifyFailedRead != nil { diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_sas_account.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_sas_account.go index c000c48..3010a6a 100644 --- a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_sas_account.go +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_sas_account.go @@ -76,7 +76,7 @@ func (v AccountSASSignatureValues) NewSASQueryParameters(sharedKeyCredential *Sh // The AccountSASPermissions type simplifies creating the permissions string for an Azure Storage Account SAS. // Initialize an instance of this type and then call its String method to set AccountSASSignatureValues's Permissions field. type AccountSASPermissions struct { - Read, Write, Delete, List, Add, Create, Update, Process bool + Read, Write, Delete, DeletePreviousVersion, List, Add, Create, Update, Process, Tag, FilterByTags bool } // String produces the SAS permissions string for an Azure Storage account. @@ -92,6 +92,9 @@ func (p AccountSASPermissions) String() string { if p.Delete { buffer.WriteRune('d') } + if p.DeletePreviousVersion { + buffer.WriteRune('x') + } if p.List { buffer.WriteRune('l') } @@ -107,6 +110,12 @@ func (p AccountSASPermissions) String() string { if p.Process { buffer.WriteRune('p') } + if p.Tag { + buffer.WriteRune('t') + } + if p.FilterByTags { + buffer.WriteRune('f') + } return buffer.String() } @@ -131,8 +140,14 @@ func (p *AccountSASPermissions) Parse(s string) error { p.Update = true case 'p': p.Process = true + case 'x': + p.Process = true + case 't': + p.Tag = true + case 'f': + p.FilterByTags = true default: - return fmt.Errorf("Invalid permission character: '%v'", r) + return fmt.Errorf("invalid permission character: '%v'", r) } } return nil diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_sas_query_params.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_sas_query_params.go index 11b1b2b..f87ef2b 100644 --- a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_sas_query_params.go +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_sas_query_params.go @@ -1,6 +1,7 @@ package azblob import ( + "errors" "net" "net/url" "strings" @@ -25,11 +26,11 @@ const ( func FormatTimesForSASSigning(startTime, expiryTime, snapshotTime time.Time) (string, string, string) { ss := "" if !startTime.IsZero() { - ss = startTime.Format(SASTimeFormat) // "yyyy-MM-ddTHH:mm:ssZ" + ss = formatSASTimeWithDefaultFormat(&startTime) } se := "" if !expiryTime.IsZero() { - se = expiryTime.Format(SASTimeFormat) // "yyyy-MM-ddTHH:mm:ssZ" + se = formatSASTimeWithDefaultFormat(&expiryTime) } sh := "" if !snapshotTime.IsZero() { @@ -39,7 +40,38 @@ func FormatTimesForSASSigning(startTime, expiryTime, snapshotTime time.Time) (st } // SASTimeFormat represents the format of a SAS start or expiry time. Use it when formatting/parsing a time.Time. -const SASTimeFormat = "2006-01-02T15:04:05Z" //"2017-07-27T00:00:00Z" // ISO 8601 +const SASTimeFormat = "2006-01-02T15:04:05Z" //"2017-07-27T00:00:00Z" // ISO 8601 +var SASTimeFormats = []string{"2006-01-02T15:04:05.0000000Z", SASTimeFormat, "2006-01-02T15:04Z", "2006-01-02"} // ISO 8601 formats, please refer to https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas for more details. + +// formatSASTimeWithDefaultFormat format time with ISO 8601 in "yyyy-MM-ddTHH:mm:ssZ". +func formatSASTimeWithDefaultFormat(t *time.Time) string { + return formatSASTime(t, SASTimeFormat) // By default, "yyyy-MM-ddTHH:mm:ssZ" is used +} + +// formatSASTime format time with given format, use ISO 8601 in "yyyy-MM-ddTHH:mm:ssZ" by default. +func formatSASTime(t *time.Time, format string) string { + if format != "" { + return t.Format(format) + } + return t.Format(SASTimeFormat) // By default, "yyyy-MM-ddTHH:mm:ssZ" is used +} + +// parseSASTimeString try to parse sas time string. +func parseSASTimeString(val string) (t time.Time, timeFormat string, err error) { + for _, sasTimeFormat := range SASTimeFormats { + t, err = time.Parse(sasTimeFormat, val) + if err == nil { + timeFormat = sasTimeFormat + break + } + } + + if err != nil { + err = errors.New("fail to parse time with IOS 8601 formats, please refer to https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas for more details") + } + + return +} // https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas @@ -74,6 +106,10 @@ type SASQueryParameters struct { signedExpiry time.Time `param:"ske"` signedService string `param:"sks"` signedVersion string `param:"skv"` + + // private member used for startTime and expiryTime formatting. + stTimeFormat string + seTimeFormat string } func (p *SASQueryParameters) SignedOid() string { @@ -202,9 +238,9 @@ func newSASQueryParameters(values url.Values, deleteSASParametersFromValues bool case "snapshot": p.snapshotTime, _ = time.Parse(SnapshotTimeFormat, val) case "st": - p.startTime, _ = time.Parse(SASTimeFormat, val) + p.startTime, p.stTimeFormat, _ = parseSASTimeString(val) case "se": - p.expiryTime, _ = time.Parse(SASTimeFormat, val) + p.expiryTime, p.seTimeFormat, _ = parseSASTimeString(val) case "sip": dashIndex := strings.Index(val, "-") if dashIndex == -1 { @@ -268,10 +304,10 @@ func (p *SASQueryParameters) addToValues(v url.Values) url.Values { v.Add("spr", string(p.protocol)) } if !p.startTime.IsZero() { - v.Add("st", p.startTime.Format(SASTimeFormat)) + v.Add("st", formatSASTime(&(p.startTime), p.stTimeFormat)) } if !p.expiryTime.IsZero() { - v.Add("se", p.expiryTime.Format(SASTimeFormat)) + v.Add("se", formatSASTime(&(p.expiryTime), p.seTimeFormat)) } if len(p.ipRange.Start) > 0 { v.Add("sip", p.ipRange.String()) diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_service_codes_common.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_service_codes_common.go index 765beb2..d09ddcf 100644 --- a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_service_codes_common.go +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_service_codes_common.go @@ -114,6 +114,9 @@ const ( // ServiceCodeResourceNotFound means the specified resource does not exist (404). ServiceCodeResourceNotFound ServiceCodeType = "ResourceNotFound" + // ServiceCodeNoAuthenticationInformation means the specified authentication for the resource does not exist (401). + ServiceCodeNoAuthenticationInformation ServiceCodeType = "NoAuthenticationInformation" + // ServiceCodeServerBusy means the server is currently unable to receive requests. Please retry your request or Ingress/egress is over the account limit or operations per second is over the account limit (503). ServiceCodeServerBusy ServiceCodeType = "ServerBusy" diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_storage_error.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_storage_error.go index e7872a8..a3cbd98 100644 --- a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_storage_error.go +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_storage_error.go @@ -79,7 +79,7 @@ func (e *storageError) Error() string { // Temporary returns true if the error occurred due to a temporary condition (including an HTTP status of 500 or 503). func (e *storageError) Temporary() bool { if e.response != nil { - if (e.response.StatusCode == http.StatusInternalServerError) || (e.response.StatusCode == http.StatusServiceUnavailable) { + if (e.response.StatusCode == http.StatusInternalServerError) || (e.response.StatusCode == http.StatusServiceUnavailable) || (e.response.StatusCode == http.StatusBadGateway) { return true } } diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_append_blob.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_append_blob.go index 719bcb6..cb92f7e 100644 --- a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_append_blob.go +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_append_blob.go @@ -34,20 +34,30 @@ func newAppendBlobClient(url url.URL, p pipeline.Pipeline) appendBlobClient { // information, see Setting // Timeouts for Blob Service Operations. transactionalContentMD5 is specify the transactional md5 for the body, to -// be validated by the service. leaseID is if specified, the operation only succeeds if the resource's lease is active -// and matches this ID. maxSize is optional conditional header. The max length in bytes permitted for the append blob. -// If the Append Block operation would cause the blob to exceed that limit or if the blob size is already greater than -// the value specified in this header, the request will fail with MaxBlobSizeConditionNotMet error (HTTP status code -// 412 - Precondition Failed). appendPosition is optional conditional header, used only for the Append Block operation. -// A number indicating the byte offset to compare. Append Block will succeed only if the append position is equal to -// this number. If it is not, the request will fail with the AppendPositionConditionNotMet error (HTTP status code 412 -// - Precondition Failed). ifModifiedSince is specify this header value to operate only on a blob if it has been -// modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if -// it has not been modified since the specified date/time. ifMatch is specify an ETag value to operate only on blobs -// with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. -// requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics -// logs when storage analytics logging is enabled. -func (client appendBlobClient) AppendBlock(ctx context.Context, body io.ReadSeeker, contentLength int64, timeout *int32, transactionalContentMD5 []byte, leaseID *string, maxSize *int64, appendPosition *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*AppendBlobAppendBlockResponse, error) { +// be validated by the service. transactionalContentCrc64 is specify the transactional crc64 for the body, to be +// validated by the service. leaseID is if specified, the operation only succeeds if the resource's lease is active and +// matches this ID. maxSize is optional conditional header. The max length in bytes permitted for the append blob. If +// the Append Block operation would cause the blob to exceed that limit or if the blob size is already greater than the +// value specified in this header, the request will fail with MaxBlobSizeConditionNotMet error (HTTP status code 412 - +// Precondition Failed). appendPosition is optional conditional header, used only for the Append Block operation. A +// number indicating the byte offset to compare. Append Block will succeed only if the append position is equal to this +// number. If it is not, the request will fail with the AppendPositionConditionNotMet error (HTTP status code 412 - +// Precondition Failed). encryptionKey is optional. Specifies the encryption key to use to encrypt the data provided in +// the request. If not specified, encryption is performed with the root account encryption key. For more information, +// see Encryption at Rest for Azure Storage Services. encryptionKeySha256 is the SHA-256 hash of the provided +// encryption key. Must be provided if the x-ms-encryption-key header is provided. encryptionAlgorithm is the algorithm +// used to produce the encryption key hash. Currently, the only accepted value is "AES256". Must be provided if the +// x-ms-encryption-key header is provided. encryptionScope is optional. Version 2019-07-07 and later. Specifies the +// name of the encryption scope to use to encrypt the data provided in the request. If not specified, encryption is +// performed with the default account encryption scope. For more information, see Encryption at Rest for Azure Storage +// Services. ifModifiedSince is specify this header value to operate only on a blob if it has been modified since the +// specified date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been +// modified since the specified date/time. ifMatch is specify an ETag value to operate only on blobs with a matching +// value. ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. ifTags is specify a +// SQL where clause on blob tags to operate only on blobs with a matching value. requestID is provides a +// client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage +// analytics logging is enabled. +func (client appendBlobClient) AppendBlock(ctx context.Context, body io.ReadSeeker, contentLength int64, timeout *int32, transactionalContentMD5 []byte, transactionalContentCrc64 []byte, leaseID *string, maxSize *int64, appendPosition *int64, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (*AppendBlobAppendBlockResponse, error) { if err := validate([]validation{ {targetValue: body, constraints: []constraint{{target: "body", name: null, rule: true, chain: nil}}}, @@ -56,7 +66,7 @@ func (client appendBlobClient) AppendBlock(ctx context.Context, body io.ReadSeek chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { return nil, err } - req, err := client.appendBlockPreparer(body, contentLength, timeout, transactionalContentMD5, leaseID, maxSize, appendPosition, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID) + req, err := client.appendBlockPreparer(body, contentLength, timeout, transactionalContentMD5, transactionalContentCrc64, leaseID, maxSize, appendPosition, encryptionKey, encryptionKeySha256, encryptionAlgorithm, encryptionScope, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, requestID) if err != nil { return nil, err } @@ -68,7 +78,7 @@ func (client appendBlobClient) AppendBlock(ctx context.Context, body io.ReadSeek } // appendBlockPreparer prepares the AppendBlock request. -func (client appendBlobClient) appendBlockPreparer(body io.ReadSeeker, contentLength int64, timeout *int32, transactionalContentMD5 []byte, leaseID *string, maxSize *int64, appendPosition *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) { +func (client appendBlobClient) appendBlockPreparer(body io.ReadSeeker, contentLength int64, timeout *int32, transactionalContentMD5 []byte, transactionalContentCrc64 []byte, leaseID *string, maxSize *int64, appendPosition *int64, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (pipeline.Request, error) { req, err := pipeline.NewRequest("PUT", client.url, body) if err != nil { return req, pipeline.NewError(err, "failed to create request") @@ -83,6 +93,9 @@ func (client appendBlobClient) appendBlockPreparer(body io.ReadSeeker, contentLe if transactionalContentMD5 != nil { req.Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(transactionalContentMD5)) } + if transactionalContentCrc64 != nil { + req.Header.Set("x-ms-content-crc64", base64.StdEncoding.EncodeToString(transactionalContentCrc64)) + } if leaseID != nil { req.Header.Set("x-ms-lease-id", *leaseID) } @@ -92,6 +105,18 @@ func (client appendBlobClient) appendBlockPreparer(body io.ReadSeeker, contentLe if appendPosition != nil { req.Header.Set("x-ms-blob-condition-appendpos", strconv.FormatInt(*appendPosition, 10)) } + if encryptionKey != nil { + req.Header.Set("x-ms-encryption-key", *encryptionKey) + } + if encryptionKeySha256 != nil { + req.Header.Set("x-ms-encryption-key-sha256", *encryptionKeySha256) + } + if encryptionAlgorithm != EncryptionAlgorithmNone { + req.Header.Set("x-ms-encryption-algorithm", string(encryptionAlgorithm)) + } + if encryptionScope != nil { + req.Header.Set("x-ms-encryption-scope", *encryptionScope) + } if ifModifiedSince != nil { req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) } @@ -104,6 +129,9 @@ func (client appendBlobClient) appendBlockPreparer(body io.ReadSeeker, contentLe if ifNoneMatch != nil { req.Header.Set("If-None-Match", string(*ifNoneMatch)) } + if ifTags != nil { + req.Header.Set("x-ms-if-tags", *ifTags) + } req.Header.Set("x-ms-version", ServiceVersion) if requestID != nil { req.Header.Set("x-ms-client-request-id", *requestID) @@ -128,33 +156,44 @@ func (client appendBlobClient) appendBlockResponder(resp pipeline.Response) (pip // // sourceURL is specify a URL to the copy source. contentLength is the length of the request. sourceRange is bytes of // source data in the specified range. sourceContentMD5 is specify the md5 calculated for the range of bytes that must -// be read from the copy source. timeout is the timeout parameter is expressed in seconds. For more information, see Setting -// Timeouts for Blob Service Operations. leaseID is if specified, the operation only succeeds if the resource's -// lease is active and matches this ID. maxSize is optional conditional header. The max length in bytes permitted for -// the append blob. If the Append Block operation would cause the blob to exceed that limit or if the blob size is -// already greater than the value specified in this header, the request will fail with MaxBlobSizeConditionNotMet error -// (HTTP status code 412 - Precondition Failed). appendPosition is optional conditional header, used only for the -// Append Block operation. A number indicating the byte offset to compare. Append Block will succeed only if the append -// position is equal to this number. If it is not, the request will fail with the AppendPositionConditionNotMet error -// (HTTP status code 412 - Precondition Failed). ifModifiedSince is specify this header value to operate only on a blob -// if it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate -// only on a blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value to -// operate only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a -// matching value. sourceIfModifiedSince is specify this header value to operate only on a blob if it has been modified -// since the specified date/time. sourceIfUnmodifiedSince is specify this header value to operate only on a blob if it -// has not been modified since the specified date/time. sourceIfMatch is specify an ETag value to operate only on blobs -// with a matching value. sourceIfNoneMatch is specify an ETag value to operate only on blobs without a matching value. -// requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics -// logs when storage analytics logging is enabled. -func (client appendBlobClient) AppendBlockFromURL(ctx context.Context, sourceURL string, contentLength int64, sourceRange *string, sourceContentMD5 []byte, timeout *int32, leaseID *string, maxSize *int64, appendPosition *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, requestID *string) (*AppendBlobAppendBlockFromURLResponse, error) { +// Timeouts for Blob Service Operations. transactionalContentMD5 is specify the transactional md5 for the body, to +// be validated by the service. encryptionKey is optional. Specifies the encryption key to use to encrypt the data +// provided in the request. If not specified, encryption is performed with the root account encryption key. For more +// information, see Encryption at Rest for Azure Storage Services. encryptionKeySha256 is the SHA-256 hash of the +// provided encryption key. Must be provided if the x-ms-encryption-key header is provided. encryptionAlgorithm is the +// algorithm used to produce the encryption key hash. Currently, the only accepted value is "AES256". Must be provided +// if the x-ms-encryption-key header is provided. encryptionScope is optional. Version 2019-07-07 and later. Specifies +// the name of the encryption scope to use to encrypt the data provided in the request. If not specified, encryption is +// performed with the default account encryption scope. For more information, see Encryption at Rest for Azure Storage +// Services. leaseID is if specified, the operation only succeeds if the resource's lease is active and matches this +// ID. maxSize is optional conditional header. The max length in bytes permitted for the append blob. If the Append +// Block operation would cause the blob to exceed that limit or if the blob size is already greater than the value +// specified in this header, the request will fail with MaxBlobSizeConditionNotMet error (HTTP status code 412 - +// Precondition Failed). appendPosition is optional conditional header, used only for the Append Block operation. A +// number indicating the byte offset to compare. Append Block will succeed only if the append position is equal to this +// number. If it is not, the request will fail with the AppendPositionConditionNotMet error (HTTP status code 412 - +// Precondition Failed). ifModifiedSince is specify this header value to operate only on a blob if it has been modified +// since the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if it has +// not been modified since the specified date/time. ifMatch is specify an ETag value to operate only on blobs with a +// matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. ifTags is +// specify a SQL where clause on blob tags to operate only on blobs with a matching value. sourceIfModifiedSince is +// specify this header value to operate only on a blob if it has been modified since the specified date/time. +// sourceIfUnmodifiedSince is specify this header value to operate only on a blob if it has not been modified since the +// specified date/time. sourceIfMatch is specify an ETag value to operate only on blobs with a matching value. +// sourceIfNoneMatch is specify an ETag value to operate only on blobs without a matching value. requestID is provides +// a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage +// analytics logging is enabled. +func (client appendBlobClient) AppendBlockFromURL(ctx context.Context, sourceURL string, contentLength int64, sourceRange *string, sourceContentMD5 []byte, sourceContentcrc64 []byte, timeout *int32, transactionalContentMD5 []byte, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, leaseID *string, maxSize *int64, appendPosition *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, requestID *string) (*AppendBlobAppendBlockFromURLResponse, error) { if err := validate([]validation{ {targetValue: timeout, constraints: []constraint{{target: "timeout", name: null, rule: false, chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { return nil, err } - req, err := client.appendBlockFromURLPreparer(sourceURL, contentLength, sourceRange, sourceContentMD5, timeout, leaseID, maxSize, appendPosition, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatch, sourceIfNoneMatch, requestID) + req, err := client.appendBlockFromURLPreparer(sourceURL, contentLength, sourceRange, sourceContentMD5, sourceContentcrc64, timeout, transactionalContentMD5, encryptionKey, encryptionKeySha256, encryptionAlgorithm, encryptionScope, leaseID, maxSize, appendPosition, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatch, sourceIfNoneMatch, requestID) if err != nil { return nil, err } @@ -166,7 +205,7 @@ func (client appendBlobClient) AppendBlockFromURL(ctx context.Context, sourceURL } // appendBlockFromURLPreparer prepares the AppendBlockFromURL request. -func (client appendBlobClient) appendBlockFromURLPreparer(sourceURL string, contentLength int64, sourceRange *string, sourceContentMD5 []byte, timeout *int32, leaseID *string, maxSize *int64, appendPosition *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, requestID *string) (pipeline.Request, error) { +func (client appendBlobClient) appendBlockFromURLPreparer(sourceURL string, contentLength int64, sourceRange *string, sourceContentMD5 []byte, sourceContentcrc64 []byte, timeout *int32, transactionalContentMD5 []byte, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, leaseID *string, maxSize *int64, appendPosition *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, requestID *string) (pipeline.Request, error) { req, err := pipeline.NewRequest("PUT", client.url, nil) if err != nil { return req, pipeline.NewError(err, "failed to create request") @@ -184,7 +223,25 @@ func (client appendBlobClient) appendBlockFromURLPreparer(sourceURL string, cont if sourceContentMD5 != nil { req.Header.Set("x-ms-source-content-md5", base64.StdEncoding.EncodeToString(sourceContentMD5)) } + if sourceContentcrc64 != nil { + req.Header.Set("x-ms-source-content-crc64", base64.StdEncoding.EncodeToString(sourceContentcrc64)) + } req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) + if transactionalContentMD5 != nil { + req.Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(transactionalContentMD5)) + } + if encryptionKey != nil { + req.Header.Set("x-ms-encryption-key", *encryptionKey) + } + if encryptionKeySha256 != nil { + req.Header.Set("x-ms-encryption-key-sha256", *encryptionKeySha256) + } + if encryptionAlgorithm != EncryptionAlgorithmNone { + req.Header.Set("x-ms-encryption-algorithm", string(encryptionAlgorithm)) + } + if encryptionScope != nil { + req.Header.Set("x-ms-encryption-scope", *encryptionScope) + } if leaseID != nil { req.Header.Set("x-ms-lease-id", *leaseID) } @@ -206,6 +263,9 @@ func (client appendBlobClient) appendBlockFromURLPreparer(sourceURL string, cont if ifNoneMatch != nil { req.Header.Set("If-None-Match", string(*ifNoneMatch)) } + if ifTags != nil { + req.Header.Set("x-ms-if-tags", *ifTags) + } if sourceIfModifiedSince != nil { req.Header.Set("x-ms-source-if-modified-since", (*sourceIfModifiedSince).In(gmt).Format(time.RFC1123)) } @@ -255,20 +315,29 @@ func (client appendBlobClient) appendBlockFromURLResponder(resp pipeline.Respons // metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, Blobs, and // Metadata for more information. leaseID is if specified, the operation only succeeds if the resource's lease is // active and matches this ID. blobContentDisposition is optional. Sets the blob's Content-Disposition header. -// ifModifiedSince is specify this header value to operate only on a blob if it has been modified since the specified -// date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been modified -// since the specified date/time. ifMatch is specify an ETag value to operate only on blobs with a matching value. -// ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. requestID is provides a -// client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage -// analytics logging is enabled. -func (client appendBlobClient) Create(ctx context.Context, contentLength int64, timeout *int32, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*AppendBlobCreateResponse, error) { +// encryptionKey is optional. Specifies the encryption key to use to encrypt the data provided in the request. If not +// specified, encryption is performed with the root account encryption key. For more information, see Encryption at +// Rest for Azure Storage Services. encryptionKeySha256 is the SHA-256 hash of the provided encryption key. Must be +// provided if the x-ms-encryption-key header is provided. encryptionAlgorithm is the algorithm used to produce the +// encryption key hash. Currently, the only accepted value is "AES256". Must be provided if the x-ms-encryption-key +// header is provided. encryptionScope is optional. Version 2019-07-07 and later. Specifies the name of the encryption +// scope to use to encrypt the data provided in the request. If not specified, encryption is performed with the default +// account encryption scope. For more information, see Encryption at Rest for Azure Storage Services. ifModifiedSince +// is specify this header value to operate only on a blob if it has been modified since the specified date/time. +// ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been modified since the +// specified date/time. ifMatch is specify an ETag value to operate only on blobs with a matching value. ifNoneMatch is +// specify an ETag value to operate only on blobs without a matching value. ifTags is specify a SQL where clause on +// blob tags to operate only on blobs with a matching value. requestID is provides a client-generated, opaque value +// with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. +// blobTagsString is optional. Used to set blob tags in various blob operations. +func (client appendBlobClient) Create(ctx context.Context, contentLength int64, timeout *int32, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string, blobTagsString *string) (*AppendBlobCreateResponse, error) { if err := validate([]validation{ {targetValue: timeout, constraints: []constraint{{target: "timeout", name: null, rule: false, chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { return nil, err } - req, err := client.createPreparer(contentLength, timeout, blobContentType, blobContentEncoding, blobContentLanguage, blobContentMD5, blobCacheControl, metadata, leaseID, blobContentDisposition, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID) + req, err := client.createPreparer(contentLength, timeout, blobContentType, blobContentEncoding, blobContentLanguage, blobContentMD5, blobCacheControl, metadata, leaseID, blobContentDisposition, encryptionKey, encryptionKeySha256, encryptionAlgorithm, encryptionScope, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, requestID, blobTagsString) if err != nil { return nil, err } @@ -280,7 +349,7 @@ func (client appendBlobClient) Create(ctx context.Context, contentLength int64, } // createPreparer prepares the Create request. -func (client appendBlobClient) createPreparer(contentLength int64, timeout *int32, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) { +func (client appendBlobClient) createPreparer(contentLength int64, timeout *int32, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string, blobTagsString *string) (pipeline.Request, error) { req, err := pipeline.NewRequest("PUT", client.url, nil) if err != nil { return req, pipeline.NewError(err, "failed to create request") @@ -317,6 +386,18 @@ func (client appendBlobClient) createPreparer(contentLength int64, timeout *int3 if blobContentDisposition != nil { req.Header.Set("x-ms-blob-content-disposition", *blobContentDisposition) } + if encryptionKey != nil { + req.Header.Set("x-ms-encryption-key", *encryptionKey) + } + if encryptionKeySha256 != nil { + req.Header.Set("x-ms-encryption-key-sha256", *encryptionKeySha256) + } + if encryptionAlgorithm != EncryptionAlgorithmNone { + req.Header.Set("x-ms-encryption-algorithm", string(encryptionAlgorithm)) + } + if encryptionScope != nil { + req.Header.Set("x-ms-encryption-scope", *encryptionScope) + } if ifModifiedSince != nil { req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) } @@ -329,10 +410,16 @@ func (client appendBlobClient) createPreparer(contentLength int64, timeout *int3 if ifNoneMatch != nil { req.Header.Set("If-None-Match", string(*ifNoneMatch)) } + if ifTags != nil { + req.Header.Set("x-ms-if-tags", *ifTags) + } req.Header.Set("x-ms-version", ServiceVersion) if requestID != nil { req.Header.Set("x-ms-client-request-id", *requestID) } + if blobTagsString != nil { + req.Header.Set("x-ms-tags", *blobTagsString) + } req.Header.Set("x-ms-blob-type", "AppendBlob") return req, nil } @@ -347,3 +434,84 @@ func (client appendBlobClient) createResponder(resp pipeline.Response) (pipeline resp.Response().Body.Close() return &AppendBlobCreateResponse{rawResponse: resp.Response()}, err } + +// Seal the Seal operation seals the Append Blob to make it read-only. Seal is supported only on version 2019-12-12 +// version or later. +// +// timeout is the timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. requestID is provides a client-generated, opaque value with a 1 KB +// character limit that is recorded in the analytics logs when storage analytics logging is enabled. leaseID is if +// specified, the operation only succeeds if the resource's lease is active and matches this ID. ifModifiedSince is +// specify this header value to operate only on a blob if it has been modified since the specified date/time. +// ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been modified since the +// specified date/time. ifMatch is specify an ETag value to operate only on blobs with a matching value. ifNoneMatch is +// specify an ETag value to operate only on blobs without a matching value. appendPosition is optional conditional +// header, used only for the Append Block operation. A number indicating the byte offset to compare. Append Block will +// succeed only if the append position is equal to this number. If it is not, the request will fail with the +// AppendPositionConditionNotMet error (HTTP status code 412 - Precondition Failed). +func (client appendBlobClient) Seal(ctx context.Context, timeout *int32, requestID *string, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, appendPosition *int64) (*AppendBlobSealResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.sealPreparer(timeout, requestID, leaseID, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, appendPosition) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.sealResponder}, req) + if err != nil { + return nil, err + } + return resp.(*AppendBlobSealResponse), err +} + +// sealPreparer prepares the Seal request. +func (client appendBlobClient) sealPreparer(timeout *int32, requestID *string, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, appendPosition *int64) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("comp", "seal") + req.URL.RawQuery = params.Encode() + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + if leaseID != nil { + req.Header.Set("x-ms-lease-id", *leaseID) + } + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifMatch != nil { + req.Header.Set("If-Match", string(*ifMatch)) + } + if ifNoneMatch != nil { + req.Header.Set("If-None-Match", string(*ifNoneMatch)) + } + if appendPosition != nil { + req.Header.Set("x-ms-blob-condition-appendpos", strconv.FormatInt(*appendPosition, 10)) + } + return req, nil +} + +// sealResponder handles the response to the Seal request. +func (client appendBlobClient) sealResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &AppendBlobSealResponse{rawResponse: resp.Response()}, err +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_blob.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_blob.go index 5e30263..036bbfc 100644 --- a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_blob.go +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_blob.go @@ -4,8 +4,10 @@ package azblob // Changes may cause incorrect behavior and will be lost if the code is regenerated. import ( + "bytes" "context" "encoding/base64" + "encoding/xml" "github.com/Azure/azure-pipeline-go/pipeline" "io" "io/ioutil" @@ -100,16 +102,17 @@ func (client blobClient) abortCopyFromURLResponder(resp pipeline.Response) (pipe // blob if it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to // operate only on a blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value // to operate only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs -// without a matching value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is -// recorded in the analytics logs when storage analytics logging is enabled. -func (client blobClient) AcquireLease(ctx context.Context, timeout *int32, duration *int32, proposedLeaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*BlobAcquireLeaseResponse, error) { +// without a matching value. ifTags is specify a SQL where clause on blob tags to operate only on blobs with a matching +// value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the +// analytics logs when storage analytics logging is enabled. +func (client blobClient) AcquireLease(ctx context.Context, timeout *int32, duration *int32, proposedLeaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (*BlobAcquireLeaseResponse, error) { if err := validate([]validation{ {targetValue: timeout, constraints: []constraint{{target: "timeout", name: null, rule: false, chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { return nil, err } - req, err := client.acquireLeasePreparer(timeout, duration, proposedLeaseID, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID) + req, err := client.acquireLeasePreparer(timeout, duration, proposedLeaseID, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, requestID) if err != nil { return nil, err } @@ -121,7 +124,7 @@ func (client blobClient) AcquireLease(ctx context.Context, timeout *int32, durat } // acquireLeasePreparer prepares the AcquireLease request. -func (client blobClient) acquireLeasePreparer(timeout *int32, duration *int32, proposedLeaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) { +func (client blobClient) acquireLeasePreparer(timeout *int32, duration *int32, proposedLeaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (pipeline.Request, error) { req, err := pipeline.NewRequest("PUT", client.url, nil) if err != nil { return req, pipeline.NewError(err, "failed to create request") @@ -150,6 +153,9 @@ func (client blobClient) acquireLeasePreparer(timeout *int32, duration *int32, p if ifNoneMatch != nil { req.Header.Set("If-None-Match", string(*ifNoneMatch)) } + if ifTags != nil { + req.Header.Set("x-ms-if-tags", *ifTags) + } req.Header.Set("x-ms-version", ServiceVersion) if requestID != nil { req.Header.Set("x-ms-client-request-id", *requestID) @@ -183,16 +189,17 @@ func (client blobClient) acquireLeaseResponder(resp pipeline.Response) (pipeline // been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a // blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value to operate only on // blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. -// requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics -// logs when storage analytics logging is enabled. -func (client blobClient) BreakLease(ctx context.Context, timeout *int32, breakPeriod *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*BlobBreakLeaseResponse, error) { +// ifTags is specify a SQL where clause on blob tags to operate only on blobs with a matching value. requestID is +// provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when +// storage analytics logging is enabled. +func (client blobClient) BreakLease(ctx context.Context, timeout *int32, breakPeriod *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (*BlobBreakLeaseResponse, error) { if err := validate([]validation{ {targetValue: timeout, constraints: []constraint{{target: "timeout", name: null, rule: false, chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { return nil, err } - req, err := client.breakLeasePreparer(timeout, breakPeriod, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID) + req, err := client.breakLeasePreparer(timeout, breakPeriod, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, requestID) if err != nil { return nil, err } @@ -204,7 +211,7 @@ func (client blobClient) BreakLease(ctx context.Context, timeout *int32, breakPe } // breakLeasePreparer prepares the BreakLease request. -func (client blobClient) breakLeasePreparer(timeout *int32, breakPeriod *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) { +func (client blobClient) breakLeasePreparer(timeout *int32, breakPeriod *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (pipeline.Request, error) { req, err := pipeline.NewRequest("PUT", client.url, nil) if err != nil { return req, pipeline.NewError(err, "failed to create request") @@ -230,6 +237,9 @@ func (client blobClient) breakLeasePreparer(timeout *int32, breakPeriod *int32, if ifNoneMatch != nil { req.Header.Set("If-None-Match", string(*ifNoneMatch)) } + if ifTags != nil { + req.Header.Set("x-ms-if-tags", *ifTags) + } req.Header.Set("x-ms-version", ServiceVersion) if requestID != nil { req.Header.Set("x-ms-client-request-id", *requestID) @@ -261,16 +271,17 @@ func (client blobClient) breakLeaseResponder(resp pipeline.Response) (pipeline.R // it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only // on a blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value to operate // only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a -// matching value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded -// in the analytics logs when storage analytics logging is enabled. -func (client blobClient) ChangeLease(ctx context.Context, leaseID string, proposedLeaseID string, timeout *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*BlobChangeLeaseResponse, error) { +// matching value. ifTags is specify a SQL where clause on blob tags to operate only on blobs with a matching value. +// requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics +// logs when storage analytics logging is enabled. +func (client blobClient) ChangeLease(ctx context.Context, leaseID string, proposedLeaseID string, timeout *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (*BlobChangeLeaseResponse, error) { if err := validate([]validation{ {targetValue: timeout, constraints: []constraint{{target: "timeout", name: null, rule: false, chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { return nil, err } - req, err := client.changeLeasePreparer(leaseID, proposedLeaseID, timeout, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID) + req, err := client.changeLeasePreparer(leaseID, proposedLeaseID, timeout, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, requestID) if err != nil { return nil, err } @@ -282,7 +293,7 @@ func (client blobClient) ChangeLease(ctx context.Context, leaseID string, propos } // changeLeasePreparer prepares the ChangeLease request. -func (client blobClient) changeLeasePreparer(leaseID string, proposedLeaseID string, timeout *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) { +func (client blobClient) changeLeasePreparer(leaseID string, proposedLeaseID string, timeout *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (pipeline.Request, error) { req, err := pipeline.NewRequest("PUT", client.url, nil) if err != nil { return req, pipeline.NewError(err, "failed to create request") @@ -307,6 +318,9 @@ func (client blobClient) changeLeasePreparer(leaseID string, proposedLeaseID str if ifNoneMatch != nil { req.Header.Set("If-None-Match", string(*ifNoneMatch)) } + if ifTags != nil { + req.Header.Set("x-ms-if-tags", *ifTags) + } req.Header.Set("x-ms-version", ServiceVersion) if requestID != nil { req.Header.Set("x-ms-client-request-id", *requestID) @@ -339,25 +353,29 @@ func (client blobClient) changeLeaseResponder(resp pipeline.Response) (pipeline. // file to the destination blob. If one or more name-value pairs are specified, the destination blob is created with // the specified metadata, and metadata is not copied from the source blob or file. Note that beginning with version // 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing -// Containers, Blobs, and Metadata for more information. sourceIfModifiedSince is specify this header value to operate -// only on a blob if it has been modified since the specified date/time. sourceIfUnmodifiedSince is specify this header -// value to operate only on a blob if it has not been modified since the specified date/time. sourceIfMatch is specify -// an ETag value to operate only on blobs with a matching value. sourceIfNoneMatch is specify an ETag value to operate -// only on blobs without a matching value. ifModifiedSince is specify this header value to operate only on a blob if it -// has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a -// blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value to operate only on -// blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. -// leaseID is if specified, the operation only succeeds if the resource's lease is active and matches this ID. -// requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics -// logs when storage analytics logging is enabled. -func (client blobClient) CopyFromURL(ctx context.Context, copySource string, timeout *int32, metadata map[string]string, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, leaseID *string, requestID *string) (*BlobCopyFromURLResponse, error) { +// Containers, Blobs, and Metadata for more information. tier is optional. Indicates the tier to be set on the blob. +// sourceIfModifiedSince is specify this header value to operate only on a blob if it has been modified since the +// specified date/time. sourceIfUnmodifiedSince is specify this header value to operate only on a blob if it has not +// been modified since the specified date/time. sourceIfMatch is specify an ETag value to operate only on blobs with a +// matching value. sourceIfNoneMatch is specify an ETag value to operate only on blobs without a matching value. +// ifModifiedSince is specify this header value to operate only on a blob if it has been modified since the specified +// date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been modified +// since the specified date/time. ifMatch is specify an ETag value to operate only on blobs with a matching value. +// ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. ifTags is specify a SQL +// where clause on blob tags to operate only on blobs with a matching value. leaseID is if specified, the operation +// only succeeds if the resource's lease is active and matches this ID. requestID is provides a client-generated, +// opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is +// enabled. sourceContentMD5 is specify the md5 calculated for the range of bytes that must be read from the copy +// source. blobTagsString is optional. Used to set blob tags in various blob operations. sealBlob is overrides the +// sealed state of the destination blob. Service version 2019-12-12 and newer. +func (client blobClient) CopyFromURL(ctx context.Context, copySource string, timeout *int32, metadata map[string]string, tier AccessTierType, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, leaseID *string, requestID *string, sourceContentMD5 []byte, blobTagsString *string, sealBlob *bool) (*BlobCopyFromURLResponse, error) { if err := validate([]validation{ {targetValue: timeout, constraints: []constraint{{target: "timeout", name: null, rule: false, chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { return nil, err } - req, err := client.copyFromURLPreparer(copySource, timeout, metadata, sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatch, sourceIfNoneMatch, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, leaseID, requestID) + req, err := client.copyFromURLPreparer(copySource, timeout, metadata, tier, sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatch, sourceIfNoneMatch, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, leaseID, requestID, sourceContentMD5, blobTagsString, sealBlob) if err != nil { return nil, err } @@ -369,7 +387,7 @@ func (client blobClient) CopyFromURL(ctx context.Context, copySource string, tim } // copyFromURLPreparer prepares the CopyFromURL request. -func (client blobClient) copyFromURLPreparer(copySource string, timeout *int32, metadata map[string]string, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, leaseID *string, requestID *string) (pipeline.Request, error) { +func (client blobClient) copyFromURLPreparer(copySource string, timeout *int32, metadata map[string]string, tier AccessTierType, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, leaseID *string, requestID *string, sourceContentMD5 []byte, blobTagsString *string, sealBlob *bool) (pipeline.Request, error) { req, err := pipeline.NewRequest("PUT", client.url, nil) if err != nil { return req, pipeline.NewError(err, "failed to create request") @@ -384,6 +402,9 @@ func (client blobClient) copyFromURLPreparer(copySource string, timeout *int32, req.Header.Set("x-ms-meta-"+k, v) } } + if tier != AccessTierNone { + req.Header.Set("x-ms-access-tier", string(tier)) + } if sourceIfModifiedSince != nil { req.Header.Set("x-ms-source-if-modified-since", (*sourceIfModifiedSince).In(gmt).Format(time.RFC1123)) } @@ -408,6 +429,9 @@ func (client blobClient) copyFromURLPreparer(copySource string, timeout *int32, if ifNoneMatch != nil { req.Header.Set("If-None-Match", string(*ifNoneMatch)) } + if ifTags != nil { + req.Header.Set("x-ms-if-tags", *ifTags) + } req.Header.Set("x-ms-copy-source", copySource) if leaseID != nil { req.Header.Set("x-ms-lease-id", *leaseID) @@ -416,6 +440,15 @@ func (client blobClient) copyFromURLPreparer(copySource string, timeout *int32, if requestID != nil { req.Header.Set("x-ms-client-request-id", *requestID) } + if sourceContentMD5 != nil { + req.Header.Set("x-ms-source-content-md5", base64.StdEncoding.EncodeToString(sourceContentMD5)) + } + if blobTagsString != nil { + req.Header.Set("x-ms-tags", *blobTagsString) + } + if sealBlob != nil { + req.Header.Set("x-ms-seal-blob", strconv.FormatBool(*sealBlob)) + } req.Header.Set("x-ms-requires-sync", "true") return req, nil } @@ -440,21 +473,30 @@ func (client blobClient) copyFromURLResponder(resp pipeline.Response) (pipeline. // file to the destination blob. If one or more name-value pairs are specified, the destination blob is created with // the specified metadata, and metadata is not copied from the source blob or file. Note that beginning with version // 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing -// Containers, Blobs, and Metadata for more information. ifModifiedSince is specify this header value to operate only -// on a blob if it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to +// Containers, Blobs, and Metadata for more information. encryptionKey is optional. Specifies the encryption key to use +// to encrypt the data provided in the request. If not specified, encryption is performed with the root account +// encryption key. For more information, see Encryption at Rest for Azure Storage Services. encryptionKeySha256 is the +// SHA-256 hash of the provided encryption key. Must be provided if the x-ms-encryption-key header is provided. +// encryptionAlgorithm is the algorithm used to produce the encryption key hash. Currently, the only accepted value is +// "AES256". Must be provided if the x-ms-encryption-key header is provided. encryptionScope is optional. Version +// 2019-07-07 and later. Specifies the name of the encryption scope to use to encrypt the data provided in the +// request. If not specified, encryption is performed with the default account encryption scope. For more information, +// see Encryption at Rest for Azure Storage Services. ifModifiedSince is specify this header value to operate only on a +// blob if it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to // operate only on a blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value // to operate only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs -// without a matching value. leaseID is if specified, the operation only succeeds if the resource's lease is active and -// matches this ID. requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded -// in the analytics logs when storage analytics logging is enabled. -func (client blobClient) CreateSnapshot(ctx context.Context, timeout *int32, metadata map[string]string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, leaseID *string, requestID *string) (*BlobCreateSnapshotResponse, error) { +// without a matching value. ifTags is specify a SQL where clause on blob tags to operate only on blobs with a matching +// value. leaseID is if specified, the operation only succeeds if the resource's lease is active and matches this ID. +// requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics +// logs when storage analytics logging is enabled. +func (client blobClient) CreateSnapshot(ctx context.Context, timeout *int32, metadata map[string]string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, leaseID *string, requestID *string) (*BlobCreateSnapshotResponse, error) { if err := validate([]validation{ {targetValue: timeout, constraints: []constraint{{target: "timeout", name: null, rule: false, chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { return nil, err } - req, err := client.createSnapshotPreparer(timeout, metadata, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, leaseID, requestID) + req, err := client.createSnapshotPreparer(timeout, metadata, encryptionKey, encryptionKeySha256, encryptionAlgorithm, encryptionScope, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, leaseID, requestID) if err != nil { return nil, err } @@ -466,7 +508,7 @@ func (client blobClient) CreateSnapshot(ctx context.Context, timeout *int32, met } // createSnapshotPreparer prepares the CreateSnapshot request. -func (client blobClient) createSnapshotPreparer(timeout *int32, metadata map[string]string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, leaseID *string, requestID *string) (pipeline.Request, error) { +func (client blobClient) createSnapshotPreparer(timeout *int32, metadata map[string]string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, leaseID *string, requestID *string) (pipeline.Request, error) { req, err := pipeline.NewRequest("PUT", client.url, nil) if err != nil { return req, pipeline.NewError(err, "failed to create request") @@ -482,6 +524,18 @@ func (client blobClient) createSnapshotPreparer(timeout *int32, metadata map[str req.Header.Set("x-ms-meta-"+k, v) } } + if encryptionKey != nil { + req.Header.Set("x-ms-encryption-key", *encryptionKey) + } + if encryptionKeySha256 != nil { + req.Header.Set("x-ms-encryption-key-sha256", *encryptionKeySha256) + } + if encryptionAlgorithm != EncryptionAlgorithmNone { + req.Header.Set("x-ms-encryption-algorithm", string(encryptionAlgorithm)) + } + if encryptionScope != nil { + req.Header.Set("x-ms-encryption-scope", *encryptionScope) + } if ifModifiedSince != nil { req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) } @@ -494,6 +548,9 @@ func (client blobClient) createSnapshotPreparer(timeout *int32, metadata map[str if ifNoneMatch != nil { req.Header.Set("If-None-Match", string(*ifNoneMatch)) } + if ifTags != nil { + req.Header.Set("x-ms-if-tags", *ifTags) + } if leaseID != nil { req.Header.Set("x-ms-lease-id", *leaseID) } @@ -529,7 +586,9 @@ func (client blobClient) createSnapshotResponder(resp pipeline.Response) (pipeli // snapshot is the snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to // retrieve. For more information on working with blob snapshots, see Creating -// a Snapshot of a Blob. timeout is the timeout parameter is expressed in seconds. For more information, see versionID is the version id parameter is an opaque DateTime value that, when present, +// specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer. timeout is the +// timeout parameter is expressed in seconds. For more information, see Setting // Timeouts for Blob Service Operations. leaseID is if specified, the operation only succeeds if the resource's // lease is active and matches this ID. deleteSnapshots is required if the blob has associated snapshots. Specify one @@ -538,16 +597,17 @@ func (client blobClient) createSnapshotResponder(resp pipeline.Response) (pipeli // been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a // blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value to operate only on // blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. -// requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics -// logs when storage analytics logging is enabled. -func (client blobClient) Delete(ctx context.Context, snapshot *string, timeout *int32, leaseID *string, deleteSnapshots DeleteSnapshotsOptionType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*BlobDeleteResponse, error) { +// ifTags is specify a SQL where clause on blob tags to operate only on blobs with a matching value. requestID is +// provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when +// storage analytics logging is enabled. +func (client blobClient) Delete(ctx context.Context, snapshot *string, versionID *string, timeout *int32, leaseID *string, deleteSnapshots DeleteSnapshotsOptionType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (*BlobDeleteResponse, error) { if err := validate([]validation{ {targetValue: timeout, constraints: []constraint{{target: "timeout", name: null, rule: false, chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { return nil, err } - req, err := client.deletePreparer(snapshot, timeout, leaseID, deleteSnapshots, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID) + req, err := client.deletePreparer(snapshot, versionID, timeout, leaseID, deleteSnapshots, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, requestID) if err != nil { return nil, err } @@ -559,7 +619,7 @@ func (client blobClient) Delete(ctx context.Context, snapshot *string, timeout * } // deletePreparer prepares the Delete request. -func (client blobClient) deletePreparer(snapshot *string, timeout *int32, leaseID *string, deleteSnapshots DeleteSnapshotsOptionType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) { +func (client blobClient) deletePreparer(snapshot *string, versionID *string, timeout *int32, leaseID *string, deleteSnapshots DeleteSnapshotsOptionType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (pipeline.Request, error) { req, err := pipeline.NewRequest("DELETE", client.url, nil) if err != nil { return req, pipeline.NewError(err, "failed to create request") @@ -568,6 +628,9 @@ func (client blobClient) deletePreparer(snapshot *string, timeout *int32, leaseI if snapshot != nil && len(*snapshot) > 0 { params.Set("snapshot", *snapshot) } + if versionID != nil && len(*versionID) > 0 { + params.Set("versionid", *versionID) + } if timeout != nil { params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) } @@ -590,6 +653,9 @@ func (client blobClient) deletePreparer(snapshot *string, timeout *int32, leaseI if ifNoneMatch != nil { req.Header.Set("If-None-Match", string(*ifNoneMatch)) } + if ifTags != nil { + req.Header.Set("x-ms-if-tags", *ifTags) + } req.Header.Set("x-ms-version", ServiceVersion) if requestID != nil { req.Header.Set("x-ms-client-request-id", *requestID) @@ -614,25 +680,35 @@ func (client blobClient) deleteResponder(resp pipeline.Response) (pipeline.Respo // snapshot is the snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to // retrieve. For more information on working with blob snapshots, see Creating -// a Snapshot of a Blob. timeout is the timeout parameter is expressed in seconds. For more information, see versionID is the version id parameter is an opaque DateTime value that, when present, +// specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer. timeout is the +// timeout parameter is expressed in seconds. For more information, see Setting // Timeouts for Blob Service Operations. rangeParameter is return only the bytes of the blob in the specified // range. leaseID is if specified, the operation only succeeds if the resource's lease is active and matches this ID. // rangeGetContentMD5 is when set to true and specified together with the Range, the service returns the MD5 hash for -// the range, as long as the range is less than or equal to 4 MB in size. ifModifiedSince is specify this header value -// to operate only on a blob if it has been modified since the specified date/time. ifUnmodifiedSince is specify this -// header value to operate only on a blob if it has not been modified since the specified date/time. ifMatch is specify -// an ETag value to operate only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only -// on blobs without a matching value. requestID is provides a client-generated, opaque value with a 1 KB character -// limit that is recorded in the analytics logs when storage analytics logging is enabled. -func (client blobClient) Download(ctx context.Context, snapshot *string, timeout *int32, rangeParameter *string, leaseID *string, rangeGetContentMD5 *bool, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*downloadResponse, error) { +// the range, as long as the range is less than or equal to 4 MB in size. rangeGetContentCRC64 is when set to true and +// specified together with the Range, the service returns the CRC64 hash for the range, as long as the range is less +// than or equal to 4 MB in size. encryptionKey is optional. Specifies the encryption key to use to encrypt the data +// provided in the request. If not specified, encryption is performed with the root account encryption key. For more +// information, see Encryption at Rest for Azure Storage Services. encryptionKeySha256 is the SHA-256 hash of the +// provided encryption key. Must be provided if the x-ms-encryption-key header is provided. encryptionAlgorithm is the +// algorithm used to produce the encryption key hash. Currently, the only accepted value is "AES256". Must be provided +// if the x-ms-encryption-key header is provided. ifModifiedSince is specify this header value to operate only on a +// blob if it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to +// operate only on a blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value +// to operate only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs +// without a matching value. ifTags is specify a SQL where clause on blob tags to operate only on blobs with a matching +// value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the +// analytics logs when storage analytics logging is enabled. +func (client blobClient) Download(ctx context.Context, snapshot *string, versionID *string, timeout *int32, rangeParameter *string, leaseID *string, rangeGetContentMD5 *bool, rangeGetContentCRC64 *bool, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (*downloadResponse, error) { if err := validate([]validation{ {targetValue: timeout, constraints: []constraint{{target: "timeout", name: null, rule: false, chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { return nil, err } - req, err := client.downloadPreparer(snapshot, timeout, rangeParameter, leaseID, rangeGetContentMD5, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID) + req, err := client.downloadPreparer(snapshot, versionID, timeout, rangeParameter, leaseID, rangeGetContentMD5, rangeGetContentCRC64, encryptionKey, encryptionKeySha256, encryptionAlgorithm, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, requestID) if err != nil { return nil, err } @@ -644,7 +720,7 @@ func (client blobClient) Download(ctx context.Context, snapshot *string, timeout } // downloadPreparer prepares the Download request. -func (client blobClient) downloadPreparer(snapshot *string, timeout *int32, rangeParameter *string, leaseID *string, rangeGetContentMD5 *bool, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) { +func (client blobClient) downloadPreparer(snapshot *string, versionID *string, timeout *int32, rangeParameter *string, leaseID *string, rangeGetContentMD5 *bool, rangeGetContentCRC64 *bool, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (pipeline.Request, error) { req, err := pipeline.NewRequest("GET", client.url, nil) if err != nil { return req, pipeline.NewError(err, "failed to create request") @@ -653,6 +729,9 @@ func (client blobClient) downloadPreparer(snapshot *string, timeout *int32, rang if snapshot != nil && len(*snapshot) > 0 { params.Set("snapshot", *snapshot) } + if versionID != nil && len(*versionID) > 0 { + params.Set("versionid", *versionID) + } if timeout != nil { params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) } @@ -666,6 +745,18 @@ func (client blobClient) downloadPreparer(snapshot *string, timeout *int32, rang if rangeGetContentMD5 != nil { req.Header.Set("x-ms-range-get-content-md5", strconv.FormatBool(*rangeGetContentMD5)) } + if rangeGetContentCRC64 != nil { + req.Header.Set("x-ms-range-get-content-crc64", strconv.FormatBool(*rangeGetContentCRC64)) + } + if encryptionKey != nil { + req.Header.Set("x-ms-encryption-key", *encryptionKey) + } + if encryptionKeySha256 != nil { + req.Header.Set("x-ms-encryption-key-sha256", *encryptionKeySha256) + } + if encryptionAlgorithm != EncryptionAlgorithmNone { + req.Header.Set("x-ms-encryption-algorithm", string(encryptionAlgorithm)) + } if ifModifiedSince != nil { req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) } @@ -678,6 +769,9 @@ func (client blobClient) downloadPreparer(snapshot *string, timeout *int32, rang if ifNoneMatch != nil { req.Header.Set("If-None-Match", string(*ifNoneMatch)) } + if ifTags != nil { + req.Header.Set("x-ms-if-tags", *ifTags) + } req.Header.Set("x-ms-version", ServiceVersion) if requestID != nil { req.Header.Set("x-ms-client-request-id", *requestID) @@ -694,6 +788,86 @@ func (client blobClient) downloadResponder(resp pipeline.Response) (pipeline.Res return &downloadResponse{rawResponse: resp.Response()}, err } +// GetAccessControl get the owner, group, permissions, or access control list for a blob. +// +// timeout is the timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. upn is optional. Valid only when Hierarchical Namespace is enabled for the +// account. If "true", the identity values returned in the x-ms-owner, x-ms-group, and x-ms-acl response headers will +// be transformed from Azure Active Directory Object IDs to User Principal Names. If "false", the values will be +// returned as Azure Active Directory Object IDs. The default value is false. leaseID is if specified, the operation +// only succeeds if the resource's lease is active and matches this ID. ifMatch is specify an ETag value to operate +// only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a +// matching value. ifModifiedSince is specify this header value to operate only on a blob if it has been modified since +// the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been +// modified since the specified date/time. requestID is provides a client-generated, opaque value with a 1 KB character +// limit that is recorded in the analytics logs when storage analytics logging is enabled. +func (client blobClient) GetAccessControl(ctx context.Context, timeout *int32, upn *bool, leaseID *string, ifMatch *ETag, ifNoneMatch *ETag, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, requestID *string) (*BlobGetAccessControlResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.getAccessControlPreparer(timeout, upn, leaseID, ifMatch, ifNoneMatch, ifModifiedSince, ifUnmodifiedSince, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.getAccessControlResponder}, req) + if err != nil { + return nil, err + } + return resp.(*BlobGetAccessControlResponse), err +} + +// getAccessControlPreparer prepares the GetAccessControl request. +func (client blobClient) getAccessControlPreparer(timeout *int32, upn *bool, leaseID *string, ifMatch *ETag, ifNoneMatch *ETag, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("HEAD", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + if upn != nil { + params.Set("upn", strconv.FormatBool(*upn)) + } + params.Set("action", "getAccessControl") + req.URL.RawQuery = params.Encode() + if leaseID != nil { + req.Header.Set("x-ms-lease-id", *leaseID) + } + if ifMatch != nil { + req.Header.Set("If-Match", string(*ifMatch)) + } + if ifNoneMatch != nil { + req.Header.Set("If-None-Match", string(*ifNoneMatch)) + } + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + req.Header.Set("x-ms-version", ServiceVersion) + return req, nil +} + +// getAccessControlResponder handles the response to the GetAccessControl request. +func (client blobClient) getAccessControlResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &BlobGetAccessControlResponse{rawResponse: resp.Response()}, err +} + // GetAccountInfo returns the sku name and account kind func (client blobClient) GetAccountInfo(ctx context.Context) (*BlobGetAccountInfoResponse, error) { req, err := client.getAccountInfoPreparer() @@ -738,23 +912,31 @@ func (client blobClient) getAccountInfoResponder(resp pipeline.Response) (pipeli // snapshot is the snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to // retrieve. For more information on working with blob snapshots, see Creating -// a Snapshot of a Blob. timeout is the timeout parameter is expressed in seconds. For more information, see versionID is the version id parameter is an opaque DateTime value that, when present, +// specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer. timeout is the +// timeout parameter is expressed in seconds. For more information, see Setting // Timeouts for Blob Service Operations. leaseID is if specified, the operation only succeeds if the resource's -// lease is active and matches this ID. ifModifiedSince is specify this header value to operate only on a blob if it -// has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a -// blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value to operate only on -// blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. -// requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics -// logs when storage analytics logging is enabled. -func (client blobClient) GetProperties(ctx context.Context, snapshot *string, timeout *int32, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*BlobGetPropertiesResponse, error) { +// lease is active and matches this ID. encryptionKey is optional. Specifies the encryption key to use to encrypt the +// data provided in the request. If not specified, encryption is performed with the root account encryption key. For +// more information, see Encryption at Rest for Azure Storage Services. encryptionKeySha256 is the SHA-256 hash of the +// provided encryption key. Must be provided if the x-ms-encryption-key header is provided. encryptionAlgorithm is the +// algorithm used to produce the encryption key hash. Currently, the only accepted value is "AES256". Must be provided +// if the x-ms-encryption-key header is provided. ifModifiedSince is specify this header value to operate only on a +// blob if it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to +// operate only on a blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value +// to operate only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs +// without a matching value. ifTags is specify a SQL where clause on blob tags to operate only on blobs with a matching +// value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the +// analytics logs when storage analytics logging is enabled. +func (client blobClient) GetProperties(ctx context.Context, snapshot *string, versionID *string, timeout *int32, leaseID *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (*BlobGetPropertiesResponse, error) { if err := validate([]validation{ {targetValue: timeout, constraints: []constraint{{target: "timeout", name: null, rule: false, chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { return nil, err } - req, err := client.getPropertiesPreparer(snapshot, timeout, leaseID, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID) + req, err := client.getPropertiesPreparer(snapshot, versionID, timeout, leaseID, encryptionKey, encryptionKeySha256, encryptionAlgorithm, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, requestID) if err != nil { return nil, err } @@ -766,7 +948,7 @@ func (client blobClient) GetProperties(ctx context.Context, snapshot *string, ti } // getPropertiesPreparer prepares the GetProperties request. -func (client blobClient) getPropertiesPreparer(snapshot *string, timeout *int32, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) { +func (client blobClient) getPropertiesPreparer(snapshot *string, versionID *string, timeout *int32, leaseID *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (pipeline.Request, error) { req, err := pipeline.NewRequest("HEAD", client.url, nil) if err != nil { return req, pipeline.NewError(err, "failed to create request") @@ -775,6 +957,9 @@ func (client blobClient) getPropertiesPreparer(snapshot *string, timeout *int32, if snapshot != nil && len(*snapshot) > 0 { params.Set("snapshot", *snapshot) } + if versionID != nil && len(*versionID) > 0 { + params.Set("versionid", *versionID) + } if timeout != nil { params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) } @@ -782,6 +967,15 @@ func (client blobClient) getPropertiesPreparer(snapshot *string, timeout *int32, if leaseID != nil { req.Header.Set("x-ms-lease-id", *leaseID) } + if encryptionKey != nil { + req.Header.Set("x-ms-encryption-key", *encryptionKey) + } + if encryptionKeySha256 != nil { + req.Header.Set("x-ms-encryption-key-sha256", *encryptionKeySha256) + } + if encryptionAlgorithm != EncryptionAlgorithmNone { + req.Header.Set("x-ms-encryption-algorithm", string(encryptionAlgorithm)) + } if ifModifiedSince != nil { req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) } @@ -794,6 +988,9 @@ func (client blobClient) getPropertiesPreparer(snapshot *string, timeout *int32, if ifNoneMatch != nil { req.Header.Set("If-None-Match", string(*ifNoneMatch)) } + if ifTags != nil { + req.Header.Set("x-ms-if-tags", *ifTags) + } req.Header.Set("x-ms-version", ServiceVersion) if requestID != nil { req.Header.Set("x-ms-client-request-id", *requestID) @@ -812,6 +1009,191 @@ func (client blobClient) getPropertiesResponder(resp pipeline.Response) (pipelin return &BlobGetPropertiesResponse{rawResponse: resp.Response()}, err } +// GetTags the Get Tags operation enables users to get the tags associated with a blob. +// +// timeout is the timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. requestID is provides a client-generated, opaque value with a 1 KB +// character limit that is recorded in the analytics logs when storage analytics logging is enabled. snapshot is the +// snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more +// information on working with blob snapshots, see Creating +// a Snapshot of a Blob. versionID is the version id parameter is an opaque DateTime value that, when present, +// specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer. ifTags is specify a +// SQL where clause on blob tags to operate only on blobs with a matching value. +func (client blobClient) GetTags(ctx context.Context, timeout *int32, requestID *string, snapshot *string, versionID *string, ifTags *string) (*BlobTags, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.getTagsPreparer(timeout, requestID, snapshot, versionID, ifTags) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.getTagsResponder}, req) + if err != nil { + return nil, err + } + return resp.(*BlobTags), err +} + +// getTagsPreparer prepares the GetTags request. +func (client blobClient) getTagsPreparer(timeout *int32, requestID *string, snapshot *string, versionID *string, ifTags *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("GET", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + if snapshot != nil && len(*snapshot) > 0 { + params.Set("snapshot", *snapshot) + } + if versionID != nil && len(*versionID) > 0 { + params.Set("versionid", *versionID) + } + params.Set("comp", "tags") + req.URL.RawQuery = params.Encode() + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + if ifTags != nil { + req.Header.Set("x-ms-if-tags", *ifTags) + } + return req, nil +} + +// getTagsResponder handles the response to the GetTags request. +func (client blobClient) getTagsResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK) + if resp == nil { + return nil, err + } + result := &BlobTags{rawResponse: resp.Response()} + if err != nil { + return result, err + } + defer resp.Response().Body.Close() + b, err := ioutil.ReadAll(resp.Response().Body) + if err != nil { + return result, err + } + if len(b) > 0 { + b = removeBOM(b) + err = xml.Unmarshal(b, result) + if err != nil { + return result, NewResponseError(err, resp.Response(), "failed to unmarshal response body") + } + } + return result, nil +} + +// TODO funky quick query code +//// Query the Query operation enables users to select/project on blob data by providing simple query expressions. +//// +//// snapshot is the snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to +//// retrieve. For more information on working with blob snapshots, see Creating +//// a Snapshot of a Blob. timeout is the timeout parameter is expressed in seconds. For more information, see Setting +//// Timeouts for Blob Service Operations. leaseID is if specified, the operation only succeeds if the resource's +//// lease is active and matches this ID. encryptionKey is optional. Specifies the encryption key to use to encrypt the +//// data provided in the request. If not specified, encryption is performed with the root account encryption key. For +//// more information, see Encryption at Rest for Azure Storage Services. encryptionKeySha256 is the SHA-256 hash of the +//// provided encryption key. Must be provided if the x-ms-encryption-key header is provided. encryptionAlgorithm is the +//// algorithm used to produce the encryption key hash. Currently, the only accepted value is "AES256". Must be provided +//// if the x-ms-encryption-key header is provided. ifModifiedSince is specify this header value to operate only on a +//// blob if it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to +//// operate only on a blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value +//// to operate only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs +//// without a matching value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is +//// recorded in the analytics logs when storage analytics logging is enabled. +//func (client blobClient) Query(ctx context.Context, snapshot *string, timeout *int32, leaseID *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*QueryResponse, error) { +// if err := validate([]validation{ +// {targetValue: timeout, +// constraints: []constraint{{target: "timeout", name: null, rule: false, +// chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { +// return nil, err +// } +// req, err := client.queryPreparer(snapshot, timeout, leaseID, encryptionKey, encryptionKeySha256, encryptionAlgorithm, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID) +// if err != nil { +// return nil, err +// } +// resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.queryResponder}, req) +// if err != nil { +// return nil, err +// } +// return resp.(*QueryResponse), err +//} +// +//// queryPreparer prepares the Query request. +//func (client blobClient) queryPreparer(snapshot *string, timeout *int32, leaseID *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) { +// req, err := pipeline.NewRequest("POST", client.url, nil) +// if err != nil { +// return req, pipeline.NewError(err, "failed to create request") +// } +// params := req.URL.Query() +// if snapshot != nil && len(*snapshot) > 0 { +// params.Set("snapshot", *snapshot) +// } +// if timeout != nil { +// params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) +// } +// params.Set("comp", "query") +// req.URL.RawQuery = params.Encode() +// if leaseID != nil { +// req.Header.Set("x-ms-lease-id", *leaseID) +// } +// if encryptionKey != nil { +// req.Header.Set("x-ms-encryption-key", *encryptionKey) +// } +// if encryptionKeySha256 != nil { +// req.Header.Set("x-ms-encryption-key-sha256", *encryptionKeySha256) +// } +// if encryptionAlgorithm != EncryptionAlgorithmNone { +// req.Header.Set("x-ms-encryption-algorithm", string(encryptionAlgorithm)) +// } +// if ifModifiedSince != nil { +// req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) +// } +// if ifUnmodifiedSince != nil { +// req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) +// } +// if ifMatch != nil { +// req.Header.Set("If-Match", string(*ifMatch)) +// } +// if ifNoneMatch != nil { +// req.Header.Set("If-None-Match", string(*ifNoneMatch)) +// } +// req.Header.Set("x-ms-version", ServiceVersion) +// if requestID != nil { +// req.Header.Set("x-ms-client-request-id", *requestID) +// } +// b, err := xml.Marshal(queryRequest) +// if err != nil { +// return req, pipeline.NewError(err, "failed to marshal request body") +// } +// req.Header.Set("Content-Type", "application/xml") +// err = req.SetBody(bytes.NewReader(b)) +// if err != nil { +// return req, pipeline.NewError(err, "failed to set request body") +// } +// return req, nil +//} +// +//// queryResponder handles the response to the Query request. +//func (client blobClient) queryResponder(resp pipeline.Response) (pipeline.Response, error) { +// err := validateResponse(resp, http.StatusOK, http.StatusPartialContent) +// if resp == nil { +// return nil, err +// } +// return &QueryResponse{rawResponse: resp.Response()}, err +//} + // ReleaseLease [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete // operations // @@ -822,16 +1204,17 @@ func (client blobClient) getPropertiesResponder(resp pipeline.Response) (pipelin // it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only // on a blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value to operate // only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a -// matching value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded -// in the analytics logs when storage analytics logging is enabled. -func (client blobClient) ReleaseLease(ctx context.Context, leaseID string, timeout *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*BlobReleaseLeaseResponse, error) { +// matching value. ifTags is specify a SQL where clause on blob tags to operate only on blobs with a matching value. +// requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics +// logs when storage analytics logging is enabled. +func (client blobClient) ReleaseLease(ctx context.Context, leaseID string, timeout *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (*BlobReleaseLeaseResponse, error) { if err := validate([]validation{ {targetValue: timeout, constraints: []constraint{{target: "timeout", name: null, rule: false, chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { return nil, err } - req, err := client.releaseLeasePreparer(leaseID, timeout, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID) + req, err := client.releaseLeasePreparer(leaseID, timeout, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, requestID) if err != nil { return nil, err } @@ -843,7 +1226,7 @@ func (client blobClient) ReleaseLease(ctx context.Context, leaseID string, timeo } // releaseLeasePreparer prepares the ReleaseLease request. -func (client blobClient) releaseLeasePreparer(leaseID string, timeout *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) { +func (client blobClient) releaseLeasePreparer(leaseID string, timeout *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (pipeline.Request, error) { req, err := pipeline.NewRequest("PUT", client.url, nil) if err != nil { return req, pipeline.NewError(err, "failed to create request") @@ -867,6 +1250,9 @@ func (client blobClient) releaseLeasePreparer(leaseID string, timeout *int32, if if ifNoneMatch != nil { req.Header.Set("If-None-Match", string(*ifNoneMatch)) } + if ifTags != nil { + req.Header.Set("x-ms-if-tags", *ifTags) + } req.Header.Set("x-ms-version", ServiceVersion) if requestID != nil { req.Header.Set("x-ms-client-request-id", *requestID) @@ -886,6 +1272,147 @@ func (client blobClient) releaseLeaseResponder(resp pipeline.Response) (pipeline return &BlobReleaseLeaseResponse{rawResponse: resp.Response()}, err } +// TODO funky rename API +//// Rename rename a blob/file. By default, the destination is overwritten and if the destination already exists and has +//// a lease the lease is broken. This operation supports conditional HTTP requests. For more information, see +//// [Specifying Conditional Headers for Blob Service +//// Operations](https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations). +//// To fail if the destination already exists, use a conditional request with If-None-Match: "*". +//// +//// renameSource is the file or directory to be renamed. The value must have the following format: +//// "/{filesysystem}/{path}". If "x-ms-properties" is specified, the properties will overwrite the existing properties; +//// otherwise, the existing properties will be preserved. timeout is the timeout parameter is expressed in seconds. For +//// more information, see Setting +//// Timeouts for Blob Service Operations. directoryProperties is optional. User-defined properties to be stored +//// with the file or directory, in the format of a comma-separated list of name and value pairs "n1=v1, n2=v2, ...", +//// where each value is base64 encoded. posixPermissions is optional and only valid if Hierarchical Namespace is enabled +//// for the account. Sets POSIX access permissions for the file owner, the file owning group, and others. Each class may +//// be granted read, write, or execute permission. The sticky bit is also supported. Both symbolic (rwxrw-rw-) and +//// 4-digit octal notation (e.g. 0766) are supported. posixUmask is only valid if Hierarchical Namespace is enabled for +//// the account. This umask restricts permission settings for file and directory, and will only be applied when default +//// Acl does not exist in parent directory. If the umask bit has set, it means that the corresponding permission will be +//// disabled. Otherwise the corresponding permission will be determined by the permission. A 4-digit octal notation +//// (e.g. 0022) is supported here. If no umask was specified, a default umask - 0027 will be used. cacheControl is cache +//// control for given resource contentType is content type for given resource contentEncoding is content encoding for +//// given resource contentLanguage is content language for given resource contentDisposition is content disposition for +//// given resource leaseID is if specified, the operation only succeeds if the resource's lease is active and matches +//// this ID. sourceLeaseID is a lease ID for the source path. If specified, the source path must have an active lease +//// and the lease ID must match. ifModifiedSince is specify this header value to operate only on a blob if it has been +//// modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if +//// it has not been modified since the specified date/time. ifMatch is specify an ETag value to operate only on blobs +//// with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. +//// sourceIfModifiedSince is specify this header value to operate only on a blob if it has been modified since the +//// specified date/time. sourceIfUnmodifiedSince is specify this header value to operate only on a blob if it has not +//// been modified since the specified date/time. sourceIfMatch is specify an ETag value to operate only on blobs with a +//// matching value. sourceIfNoneMatch is specify an ETag value to operate only on blobs without a matching value. +//// requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics +//// logs when storage analytics logging is enabled. +//func (client blobClient) Rename(ctx context.Context, renameSource string, timeout *int32, directoryProperties *string, posixPermissions *string, posixUmask *string, cacheControl *string, contentType *string, contentEncoding *string, contentLanguage *string, contentDisposition *string, leaseID *string, sourceLeaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, requestID *string) (*BlobRenameResponse, error) { +// if err := validate([]validation{ +// {targetValue: timeout, +// constraints: []constraint{{target: "timeout", name: null, rule: false, +// chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { +// return nil, err +// } +// req, err := client.renamePreparer(renameSource, timeout, directoryProperties, posixPermissions, posixUmask, cacheControl, contentType, contentEncoding, contentLanguage, contentDisposition, leaseID, sourceLeaseID, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatch, sourceIfNoneMatch, requestID) +// if err != nil { +// return nil, err +// } +// resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.renameResponder}, req) +// if err != nil { +// return nil, err +// } +// return resp.(*BlobRenameResponse), err +//} +// +//// renamePreparer prepares the Rename request. +//func (client blobClient) renamePreparer(renameSource string, timeout *int32, directoryProperties *string, posixPermissions *string, posixUmask *string, cacheControl *string, contentType *string, contentEncoding *string, contentLanguage *string, contentDisposition *string, leaseID *string, sourceLeaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, requestID *string) (pipeline.Request, error) { +// req, err := pipeline.NewRequest("PUT", client.url, nil) +// if err != nil { +// return req, pipeline.NewError(err, "failed to create request") +// } +// params := req.URL.Query() +// if timeout != nil { +// params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) +// } +// if pathRenameMode != PathRenameModeNone { +// params.Set("mode", string(client.PathRenameMode)) +// } +// req.URL.RawQuery = params.Encode() +// req.Header.Set("x-ms-rename-source", renameSource) +// if directoryProperties != nil { +// req.Header.Set("x-ms-properties", *directoryProperties) +// } +// if posixPermissions != nil { +// req.Header.Set("x-ms-permissions", *posixPermissions) +// } +// if posixUmask != nil { +// req.Header.Set("x-ms-umask", *posixUmask) +// } +// if cacheControl != nil { +// req.Header.Set("x-ms-cache-control", *cacheControl) +// } +// if contentType != nil { +// req.Header.Set("x-ms-content-type", *contentType) +// } +// if contentEncoding != nil { +// req.Header.Set("x-ms-content-encoding", *contentEncoding) +// } +// if contentLanguage != nil { +// req.Header.Set("x-ms-content-language", *contentLanguage) +// } +// if contentDisposition != nil { +// req.Header.Set("x-ms-content-disposition", *contentDisposition) +// } +// if leaseID != nil { +// req.Header.Set("x-ms-lease-id", *leaseID) +// } +// if sourceLeaseID != nil { +// req.Header.Set("x-ms-source-lease-id", *sourceLeaseID) +// } +// if ifModifiedSince != nil { +// req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) +// } +// if ifUnmodifiedSince != nil { +// req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) +// } +// if ifMatch != nil { +// req.Header.Set("If-Match", string(*ifMatch)) +// } +// if ifNoneMatch != nil { +// req.Header.Set("If-None-Match", string(*ifNoneMatch)) +// } +// if sourceIfModifiedSince != nil { +// req.Header.Set("x-ms-source-if-modified-since", (*sourceIfModifiedSince).In(gmt).Format(time.RFC1123)) +// } +// if sourceIfUnmodifiedSince != nil { +// req.Header.Set("x-ms-source-if-unmodified-since", (*sourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)) +// } +// if sourceIfMatch != nil { +// req.Header.Set("x-ms-source-if-match", string(*sourceIfMatch)) +// } +// if sourceIfNoneMatch != nil { +// req.Header.Set("x-ms-source-if-none-match", string(*sourceIfNoneMatch)) +// } +// req.Header.Set("x-ms-version", ServiceVersion) +// if requestID != nil { +// req.Header.Set("x-ms-client-request-id", *requestID) +// } +// return req, nil +//} +// +//// renameResponder handles the response to the Rename request. +//func (client blobClient) renameResponder(resp pipeline.Response) (pipeline.Response, error) { +// err := validateResponse(resp, http.StatusOK, http.StatusCreated) +// if resp == nil { +// return nil, err +// } +// io.Copy(ioutil.Discard, resp.Response().Body) +// resp.Response().Body.Close() +// return &BlobRenameResponse{rawResponse: resp.Response()}, err +//} + // RenewLease [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete // operations // @@ -896,16 +1423,17 @@ func (client blobClient) releaseLeaseResponder(resp pipeline.Response) (pipeline // it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only // on a blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value to operate // only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a -// matching value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded -// in the analytics logs when storage analytics logging is enabled. -func (client blobClient) RenewLease(ctx context.Context, leaseID string, timeout *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*BlobRenewLeaseResponse, error) { +// matching value. ifTags is specify a SQL where clause on blob tags to operate only on blobs with a matching value. +// requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics +// logs when storage analytics logging is enabled. +func (client blobClient) RenewLease(ctx context.Context, leaseID string, timeout *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (*BlobRenewLeaseResponse, error) { if err := validate([]validation{ {targetValue: timeout, constraints: []constraint{{target: "timeout", name: null, rule: false, chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { return nil, err } - req, err := client.renewLeasePreparer(leaseID, timeout, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID) + req, err := client.renewLeasePreparer(leaseID, timeout, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, requestID) if err != nil { return nil, err } @@ -917,7 +1445,7 @@ func (client blobClient) RenewLease(ctx context.Context, leaseID string, timeout } // renewLeasePreparer prepares the RenewLease request. -func (client blobClient) renewLeasePreparer(leaseID string, timeout *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) { +func (client blobClient) renewLeasePreparer(leaseID string, timeout *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (pipeline.Request, error) { req, err := pipeline.NewRequest("PUT", client.url, nil) if err != nil { return req, pipeline.NewError(err, "failed to create request") @@ -941,6 +1469,9 @@ func (client blobClient) renewLeasePreparer(leaseID string, timeout *int32, ifMo if ifNoneMatch != nil { req.Header.Set("If-None-Match", string(*ifNoneMatch)) } + if ifTags != nil { + req.Header.Set("x-ms-if-tags", *ifTags) + } req.Header.Set("x-ms-version", ServiceVersion) if requestID != nil { req.Header.Set("x-ms-client-request-id", *requestID) @@ -960,6 +1491,159 @@ func (client blobClient) renewLeaseResponder(resp pipeline.Response) (pipeline.R return &BlobRenewLeaseResponse{rawResponse: resp.Response()}, err } +// SetAccessControl set the owner, group, permissions, or access control list for a blob. +// +// timeout is the timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. leaseID is if specified, the operation only succeeds if the resource's +// lease is active and matches this ID. owner is optional. The owner of the blob or directory. group is optional. The +// owning group of the blob or directory. posixPermissions is optional and only valid if Hierarchical Namespace is +// enabled for the account. Sets POSIX access permissions for the file owner, the file owning group, and others. Each +// class may be granted read, write, or execute permission. The sticky bit is also supported. Both symbolic +// (rwxrw-rw-) and 4-digit octal notation (e.g. 0766) are supported. posixACL is sets POSIX access control rights on +// files and directories. The value is a comma-separated list of access control entries. Each access control entry +// (ACE) consists of a scope, a type, a user or group identifier, and permissions in the format +// "[scope:][type]:[id]:[permissions]". ifMatch is specify an ETag value to operate only on blobs with a matching +// value. ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. ifModifiedSince is +// specify this header value to operate only on a blob if it has been modified since the specified date/time. +// ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been modified since the +// specified date/time. requestID is provides a client-generated, opaque value with a 1 KB character limit that is +// recorded in the analytics logs when storage analytics logging is enabled. +func (client blobClient) SetAccessControl(ctx context.Context, timeout *int32, leaseID *string, owner *string, group *string, posixPermissions *string, posixACL *string, ifMatch *ETag, ifNoneMatch *ETag, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, requestID *string) (*BlobSetAccessControlResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.setAccessControlPreparer(timeout, leaseID, owner, group, posixPermissions, posixACL, ifMatch, ifNoneMatch, ifModifiedSince, ifUnmodifiedSince, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.setAccessControlResponder}, req) + if err != nil { + return nil, err + } + return resp.(*BlobSetAccessControlResponse), err +} + +// setAccessControlPreparer prepares the SetAccessControl request. +func (client blobClient) setAccessControlPreparer(timeout *int32, leaseID *string, owner *string, group *string, posixPermissions *string, posixACL *string, ifMatch *ETag, ifNoneMatch *ETag, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PATCH", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("action", "setAccessControl") + req.URL.RawQuery = params.Encode() + if leaseID != nil { + req.Header.Set("x-ms-lease-id", *leaseID) + } + if owner != nil { + req.Header.Set("x-ms-owner", *owner) + } + if group != nil { + req.Header.Set("x-ms-group", *group) + } + if posixPermissions != nil { + req.Header.Set("x-ms-permissions", *posixPermissions) + } + if posixACL != nil { + req.Header.Set("x-ms-acl", *posixACL) + } + if ifMatch != nil { + req.Header.Set("If-Match", string(*ifMatch)) + } + if ifNoneMatch != nil { + req.Header.Set("If-None-Match", string(*ifNoneMatch)) + } + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + req.Header.Set("x-ms-version", ServiceVersion) + return req, nil +} + +// setAccessControlResponder handles the response to the SetAccessControl request. +func (client blobClient) setAccessControlResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &BlobSetAccessControlResponse{rawResponse: resp.Response()}, err +} + +// SetExpiry sets the time a blob will expire and be deleted. +// +// expiryOptions is required. Indicates mode of the expiry time timeout is the timeout parameter is expressed in +// seconds. For more information, see Setting +// Timeouts for Blob Service Operations. requestID is provides a client-generated, opaque value with a 1 KB +// character limit that is recorded in the analytics logs when storage analytics logging is enabled. expiresOn is the +// time to set the blob to expiry +func (client blobClient) SetExpiry(ctx context.Context, expiryOptions BlobExpiryOptionsType, timeout *int32, requestID *string, expiresOn *string) (*BlobSetExpiryResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.setExpiryPreparer(expiryOptions, timeout, requestID, expiresOn) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.setExpiryResponder}, req) + if err != nil { + return nil, err + } + return resp.(*BlobSetExpiryResponse), err +} + +// setExpiryPreparer prepares the SetExpiry request. +func (client blobClient) setExpiryPreparer(expiryOptions BlobExpiryOptionsType, timeout *int32, requestID *string, expiresOn *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("comp", "expiry") + req.URL.RawQuery = params.Encode() + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + req.Header.Set("x-ms-expiry-option", string(expiryOptions)) + if expiresOn != nil { + req.Header.Set("x-ms-expiry-time", *expiresOn) + } + return req, nil +} + +// setExpiryResponder handles the response to the SetExpiry request. +func (client blobClient) setExpiryResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &BlobSetExpiryResponse{rawResponse: resp.Response()}, err +} + // SetHTTPHeaders the Set HTTP Headers operation sets system properties on the blob // // timeout is the timeout parameter is expressed in seconds. For more information, see Setting -// Timeouts for Blob Service Operations. requestID is provides a client-generated, opaque value with a 1 KB -// character limit that is recorded in the analytics logs when storage analytics logging is enabled. leaseID is if -// specified, the operation only succeeds if the resource's lease is active and matches this ID. -func (client blobClient) SetTier(ctx context.Context, tier AccessTierType, timeout *int32, requestID *string, leaseID *string) (*BlobSetTierResponse, error) { +// Timeouts for Blob Service Operations. versionID is the version id parameter is an opaque DateTime value that, +// when present, specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer. +// transactionalContentMD5 is specify the transactional md5 for the body, to be validated by the service. +// transactionalContentCrc64 is specify the transactional crc64 for the body, to be validated by the service. requestID +// is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when +// storage analytics logging is enabled. ifTags is specify a SQL where clause on blob tags to operate only on blobs +// with a matching value. tags is blob tags +func (client blobClient) SetTags(ctx context.Context, timeout *int32, versionID *string, transactionalContentMD5 []byte, transactionalContentCrc64 []byte, requestID *string, ifTags *string, tags *BlobTags) (*BlobSetTagsResponse, error) { if err := validate([]validation{ {targetValue: timeout, constraints: []constraint{{target: "timeout", name: null, rule: false, chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { return nil, err } - req, err := client.setTierPreparer(tier, timeout, requestID, leaseID) + req, err := client.setTagsPreparer(timeout, versionID, transactionalContentMD5, transactionalContentCrc64, requestID, ifTags, tags) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.setTagsResponder}, req) + if err != nil { + return nil, err + } + return resp.(*BlobSetTagsResponse), err +} + +// setTagsPreparer prepares the SetTags request. +func (client blobClient) setTagsPreparer(timeout *int32, versionID *string, transactionalContentMD5 []byte, transactionalContentCrc64 []byte, requestID *string, ifTags *string, tags *BlobTags) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + if versionID != nil && len(*versionID) > 0 { + params.Set("versionid", *versionID) + } + params.Set("comp", "tags") + req.URL.RawQuery = params.Encode() + req.Header.Set("x-ms-version", ServiceVersion) + if transactionalContentMD5 != nil { + req.Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(transactionalContentMD5)) + } + if transactionalContentCrc64 != nil { + req.Header.Set("x-ms-content-crc64", base64.StdEncoding.EncodeToString(transactionalContentCrc64)) + } + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + if ifTags != nil { + req.Header.Set("x-ms-if-tags", *ifTags) + } + b, err := xml.Marshal(tags) + if err != nil { + return req, pipeline.NewError(err, "failed to marshal request body") + } + req.Header.Set("Content-Type", "application/xml") + err = req.SetBody(bytes.NewReader(b)) + if err != nil { + return req, pipeline.NewError(err, "failed to set request body") + } + return req, nil +} + +// setTagsResponder handles the response to the SetTags request. +func (client blobClient) setTagsResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK, http.StatusNoContent) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &BlobSetTagsResponse{rawResponse: resp.Response()}, err +} + +// SetTier the Set Tier operation sets the tier on a blob. The operation is allowed on a page blob in a premium storage +// account and on a block blob in a blob storage account (locally redundant storage only). A premium page blob's tier +// determines the allowed size, IOPS, and bandwidth of the blob. A block blob's tier determines Hot/Cool/Archive +// storage type. This operation does not update the blob's ETag. +// +// tier is indicates the tier to be set on the blob. snapshot is the snapshot parameter is an opaque DateTime value +// that, when present, specifies the blob snapshot to retrieve. For more information on working with blob snapshots, +// see Creating +// a Snapshot of a Blob. versionID is the version id parameter is an opaque DateTime value that, when present, +// specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer. timeout is the +// timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. rehydratePriority is optional: Indicates the priority with which to +// rehydrate an archived blob. requestID is provides a client-generated, opaque value with a 1 KB character limit that +// is recorded in the analytics logs when storage analytics logging is enabled. leaseID is if specified, the operation +// only succeeds if the resource's lease is active and matches this ID. +func (client blobClient) SetTier(ctx context.Context, tier AccessTierType, snapshot *string, versionID *string, timeout *int32, rehydratePriority RehydratePriorityType, requestID *string, leaseID *string) (*BlobSetTierResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.setTierPreparer(tier, snapshot, versionID, timeout, rehydratePriority, requestID, leaseID) if err != nil { return nil, err } @@ -1174,18 +1972,27 @@ func (client blobClient) SetTier(ctx context.Context, tier AccessTierType, timeo } // setTierPreparer prepares the SetTier request. -func (client blobClient) setTierPreparer(tier AccessTierType, timeout *int32, requestID *string, leaseID *string) (pipeline.Request, error) { +func (client blobClient) setTierPreparer(tier AccessTierType, snapshot *string, versionID *string, timeout *int32, rehydratePriority RehydratePriorityType, requestID *string, leaseID *string) (pipeline.Request, error) { req, err := pipeline.NewRequest("PUT", client.url, nil) if err != nil { return req, pipeline.NewError(err, "failed to create request") } params := req.URL.Query() + if snapshot != nil && len(*snapshot) > 0 { + params.Set("snapshot", *snapshot) + } + if versionID != nil && len(*versionID) > 0 { + params.Set("versionid", *versionID) + } if timeout != nil { params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) } params.Set("comp", "tier") req.URL.RawQuery = params.Encode() req.Header.Set("x-ms-access-tier", string(tier)) + if rehydratePriority != RehydratePriorityNone { + req.Header.Set("x-ms-rehydrate-priority", string(rehydratePriority)) + } req.Header.Set("x-ms-version", ServiceVersion) if requestID != nil { req.Header.Set("x-ms-client-request-id", *requestID) @@ -1219,25 +2026,30 @@ func (client blobClient) setTierResponder(resp pipeline.Response) (pipeline.Resp // file to the destination blob. If one or more name-value pairs are specified, the destination blob is created with // the specified metadata, and metadata is not copied from the source blob or file. Note that beginning with version // 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing -// Containers, Blobs, and Metadata for more information. sourceIfModifiedSince is specify this header value to operate -// only on a blob if it has been modified since the specified date/time. sourceIfUnmodifiedSince is specify this header -// value to operate only on a blob if it has not been modified since the specified date/time. sourceIfMatch is specify -// an ETag value to operate only on blobs with a matching value. sourceIfNoneMatch is specify an ETag value to operate -// only on blobs without a matching value. ifModifiedSince is specify this header value to operate only on a blob if it -// has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a -// blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value to operate only on -// blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. -// leaseID is if specified, the operation only succeeds if the resource's lease is active and matches this ID. -// requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics -// logs when storage analytics logging is enabled. -func (client blobClient) StartCopyFromURL(ctx context.Context, copySource string, timeout *int32, metadata map[string]string, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, leaseID *string, requestID *string) (*BlobStartCopyFromURLResponse, error) { +// Containers, Blobs, and Metadata for more information. tier is optional. Indicates the tier to be set on the blob. +// rehydratePriority is optional: Indicates the priority with which to rehydrate an archived blob. +// sourceIfModifiedSince is specify this header value to operate only on a blob if it has been modified since the +// specified date/time. sourceIfUnmodifiedSince is specify this header value to operate only on a blob if it has not +// been modified since the specified date/time. sourceIfMatch is specify an ETag value to operate only on blobs with a +// matching value. sourceIfNoneMatch is specify an ETag value to operate only on blobs without a matching value. +// sourceIfTags is specify a SQL where clause on blob tags to operate only on blobs with a matching value. +// ifModifiedSince is specify this header value to operate only on a blob if it has been modified since the specified +// date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been modified +// since the specified date/time. ifMatch is specify an ETag value to operate only on blobs with a matching value. +// ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. ifTags is specify a SQL +// where clause on blob tags to operate only on blobs with a matching value. leaseID is if specified, the operation +// only succeeds if the resource's lease is active and matches this ID. requestID is provides a client-generated, +// opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is +// enabled. blobTagsString is optional. Used to set blob tags in various blob operations. sealBlob is overrides the +// sealed state of the destination blob. Service version 2019-12-12 and newer. +func (client blobClient) StartCopyFromURL(ctx context.Context, copySource string, timeout *int32, metadata map[string]string, tier AccessTierType, rehydratePriority RehydratePriorityType, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, sourceIfTags *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, leaseID *string, requestID *string, blobTagsString *string, sealBlob *bool) (*BlobStartCopyFromURLResponse, error) { if err := validate([]validation{ {targetValue: timeout, constraints: []constraint{{target: "timeout", name: null, rule: false, chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { return nil, err } - req, err := client.startCopyFromURLPreparer(copySource, timeout, metadata, sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatch, sourceIfNoneMatch, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, leaseID, requestID) + req, err := client.startCopyFromURLPreparer(copySource, timeout, metadata, tier, rehydratePriority, sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatch, sourceIfNoneMatch, sourceIfTags, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, leaseID, requestID, blobTagsString, sealBlob) if err != nil { return nil, err } @@ -1249,7 +2061,7 @@ func (client blobClient) StartCopyFromURL(ctx context.Context, copySource string } // startCopyFromURLPreparer prepares the StartCopyFromURL request. -func (client blobClient) startCopyFromURLPreparer(copySource string, timeout *int32, metadata map[string]string, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, leaseID *string, requestID *string) (pipeline.Request, error) { +func (client blobClient) startCopyFromURLPreparer(copySource string, timeout *int32, metadata map[string]string, tier AccessTierType, rehydratePriority RehydratePriorityType, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, sourceIfTags *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, leaseID *string, requestID *string, blobTagsString *string, sealBlob *bool) (pipeline.Request, error) { req, err := pipeline.NewRequest("PUT", client.url, nil) if err != nil { return req, pipeline.NewError(err, "failed to create request") @@ -1264,6 +2076,12 @@ func (client blobClient) startCopyFromURLPreparer(copySource string, timeout *in req.Header.Set("x-ms-meta-"+k, v) } } + if tier != AccessTierNone { + req.Header.Set("x-ms-access-tier", string(tier)) + } + if rehydratePriority != RehydratePriorityNone { + req.Header.Set("x-ms-rehydrate-priority", string(rehydratePriority)) + } if sourceIfModifiedSince != nil { req.Header.Set("x-ms-source-if-modified-since", (*sourceIfModifiedSince).In(gmt).Format(time.RFC1123)) } @@ -1276,6 +2094,9 @@ func (client blobClient) startCopyFromURLPreparer(copySource string, timeout *in if sourceIfNoneMatch != nil { req.Header.Set("x-ms-source-if-none-match", string(*sourceIfNoneMatch)) } + if sourceIfTags != nil { + req.Header.Set("x-ms-source-if-tags", *sourceIfTags) + } if ifModifiedSince != nil { req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) } @@ -1288,6 +2109,9 @@ func (client blobClient) startCopyFromURLPreparer(copySource string, timeout *in if ifNoneMatch != nil { req.Header.Set("If-None-Match", string(*ifNoneMatch)) } + if ifTags != nil { + req.Header.Set("x-ms-if-tags", *ifTags) + } req.Header.Set("x-ms-copy-source", copySource) if leaseID != nil { req.Header.Set("x-ms-lease-id", *leaseID) @@ -1296,6 +2120,12 @@ func (client blobClient) startCopyFromURLPreparer(copySource string, timeout *in if requestID != nil { req.Header.Set("x-ms-client-request-id", *requestID) } + if blobTagsString != nil { + req.Header.Set("x-ms-tags", *blobTagsString) + } + if sealBlob != nil { + req.Header.Set("x-ms-seal-blob", strconv.FormatBool(*sealBlob)) + } return req, nil } diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_block_blob.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_block_blob.go index 955f7d1..0008273 100644 --- a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_block_blob.go +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_block_blob.go @@ -43,27 +43,39 @@ func newBlockBlobClient(url url.URL, p pipeline.Pipeline) blockBlobClient { // blob and returned with a read request. blobContentLanguage is optional. Set the blob's content language. If // specified, this property is stored with the blob and returned with a read request. blobContentMD5 is optional. An // MD5 hash of the blob content. Note that this hash is not validated, as the hashes for the individual blocks were -// validated when each was uploaded. metadata is optional. Specifies a user-defined name-value pair associated with the -// blob. If no name-value pairs are specified, the operation will copy the metadata from the source blob or file to the -// destination blob. If one or more name-value pairs are specified, the destination blob is created with the specified -// metadata, and metadata is not copied from the source blob or file. Note that beginning with version 2009-09-19, -// metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, Blobs, and -// Metadata for more information. leaseID is if specified, the operation only succeeds if the resource's lease is -// active and matches this ID. blobContentDisposition is optional. Sets the blob's Content-Disposition header. -// ifModifiedSince is specify this header value to operate only on a blob if it has been modified since the specified -// date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been modified -// since the specified date/time. ifMatch is specify an ETag value to operate only on blobs with a matching value. -// ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. requestID is provides a -// client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage -// analytics logging is enabled. -func (client blockBlobClient) CommitBlockList(ctx context.Context, blocks BlockLookupList, timeout *int32, blobCacheControl *string, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, metadata map[string]string, leaseID *string, blobContentDisposition *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*BlockBlobCommitBlockListResponse, error) { +// validated when each was uploaded. transactionalContentMD5 is specify the transactional md5 for the body, to be +// validated by the service. transactionalContentCrc64 is specify the transactional crc64 for the body, to be validated +// by the service. metadata is optional. Specifies a user-defined name-value pair associated with the blob. If no +// name-value pairs are specified, the operation will copy the metadata from the source blob or file to the destination +// blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, +// and metadata is not copied from the source blob or file. Note that beginning with version 2009-09-19, metadata names +// must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for +// more information. leaseID is if specified, the operation only succeeds if the resource's lease is active and matches +// this ID. blobContentDisposition is optional. Sets the blob's Content-Disposition header. encryptionKey is optional. +// Specifies the encryption key to use to encrypt the data provided in the request. If not specified, encryption is +// performed with the root account encryption key. For more information, see Encryption at Rest for Azure Storage +// Services. encryptionKeySha256 is the SHA-256 hash of the provided encryption key. Must be provided if the +// x-ms-encryption-key header is provided. encryptionAlgorithm is the algorithm used to produce the encryption key +// hash. Currently, the only accepted value is "AES256". Must be provided if the x-ms-encryption-key header is +// provided. encryptionScope is optional. Version 2019-07-07 and later. Specifies the name of the encryption scope to +// use to encrypt the data provided in the request. If not specified, encryption is performed with the default account +// encryption scope. For more information, see Encryption at Rest for Azure Storage Services. tier is optional. +// Indicates the tier to be set on the blob. ifModifiedSince is specify this header value to operate only on a blob if +// it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only +// on a blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value to operate +// only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a +// matching value. ifTags is specify a SQL where clause on blob tags to operate only on blobs with a matching value. +// requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics +// logs when storage analytics logging is enabled. blobTagsString is optional. Used to set blob tags in various blob +// operations. +func (client blockBlobClient) CommitBlockList(ctx context.Context, blocks BlockLookupList, timeout *int32, blobCacheControl *string, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, transactionalContentMD5 []byte, transactionalContentCrc64 []byte, metadata map[string]string, leaseID *string, blobContentDisposition *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, tier AccessTierType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string, blobTagsString *string) (*BlockBlobCommitBlockListResponse, error) { if err := validate([]validation{ {targetValue: timeout, constraints: []constraint{{target: "timeout", name: null, rule: false, chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { return nil, err } - req, err := client.commitBlockListPreparer(blocks, timeout, blobCacheControl, blobContentType, blobContentEncoding, blobContentLanguage, blobContentMD5, metadata, leaseID, blobContentDisposition, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID) + req, err := client.commitBlockListPreparer(blocks, timeout, blobCacheControl, blobContentType, blobContentEncoding, blobContentLanguage, blobContentMD5, transactionalContentMD5, transactionalContentCrc64, metadata, leaseID, blobContentDisposition, encryptionKey, encryptionKeySha256, encryptionAlgorithm, encryptionScope, tier, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, requestID, blobTagsString) if err != nil { return nil, err } @@ -75,7 +87,7 @@ func (client blockBlobClient) CommitBlockList(ctx context.Context, blocks BlockL } // commitBlockListPreparer prepares the CommitBlockList request. -func (client blockBlobClient) commitBlockListPreparer(blocks BlockLookupList, timeout *int32, blobCacheControl *string, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, metadata map[string]string, leaseID *string, blobContentDisposition *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) { +func (client blockBlobClient) commitBlockListPreparer(blocks BlockLookupList, timeout *int32, blobCacheControl *string, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, transactionalContentMD5 []byte, transactionalContentCrc64 []byte, metadata map[string]string, leaseID *string, blobContentDisposition *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, tier AccessTierType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string, blobTagsString *string) (pipeline.Request, error) { req, err := pipeline.NewRequest("PUT", client.url, nil) if err != nil { return req, pipeline.NewError(err, "failed to create request") @@ -101,6 +113,12 @@ func (client blockBlobClient) commitBlockListPreparer(blocks BlockLookupList, ti if blobContentMD5 != nil { req.Header.Set("x-ms-blob-content-md5", base64.StdEncoding.EncodeToString(blobContentMD5)) } + if transactionalContentMD5 != nil { + req.Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(transactionalContentMD5)) + } + if transactionalContentCrc64 != nil { + req.Header.Set("x-ms-content-crc64", base64.StdEncoding.EncodeToString(transactionalContentCrc64)) + } if metadata != nil { for k, v := range metadata { req.Header.Set("x-ms-meta-"+k, v) @@ -112,6 +130,21 @@ func (client blockBlobClient) commitBlockListPreparer(blocks BlockLookupList, ti if blobContentDisposition != nil { req.Header.Set("x-ms-blob-content-disposition", *blobContentDisposition) } + if encryptionKey != nil { + req.Header.Set("x-ms-encryption-key", *encryptionKey) + } + if encryptionKeySha256 != nil { + req.Header.Set("x-ms-encryption-key-sha256", *encryptionKeySha256) + } + if encryptionAlgorithm != EncryptionAlgorithmNone { + req.Header.Set("x-ms-encryption-algorithm", string(encryptionAlgorithm)) + } + if encryptionScope != nil { + req.Header.Set("x-ms-encryption-scope", *encryptionScope) + } + if tier != AccessTierNone { + req.Header.Set("x-ms-access-tier", string(tier)) + } if ifModifiedSince != nil { req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) } @@ -124,10 +157,16 @@ func (client blockBlobClient) commitBlockListPreparer(blocks BlockLookupList, ti if ifNoneMatch != nil { req.Header.Set("If-None-Match", string(*ifNoneMatch)) } + if ifTags != nil { + req.Header.Set("x-ms-if-tags", *ifTags) + } req.Header.Set("x-ms-version", ServiceVersion) if requestID != nil { req.Header.Set("x-ms-client-request-id", *requestID) } + if blobTagsString != nil { + req.Header.Set("x-ms-tags", *blobTagsString) + } b, err := xml.Marshal(blocks) if err != nil { return req, pipeline.NewError(err, "failed to marshal request body") @@ -161,16 +200,17 @@ func (client blockBlobClient) commitBlockListResponder(resp pipeline.Response) ( // a Snapshot of a Blob. timeout is the timeout parameter is expressed in seconds. For more information, see Setting // Timeouts for Blob Service Operations. leaseID is if specified, the operation only succeeds if the resource's -// lease is active and matches this ID. requestID is provides a client-generated, opaque value with a 1 KB character -// limit that is recorded in the analytics logs when storage analytics logging is enabled. -func (client blockBlobClient) GetBlockList(ctx context.Context, listType BlockListType, snapshot *string, timeout *int32, leaseID *string, requestID *string) (*BlockList, error) { +// lease is active and matches this ID. ifTags is specify a SQL where clause on blob tags to operate only on blobs with +// a matching value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is +// recorded in the analytics logs when storage analytics logging is enabled. +func (client blockBlobClient) GetBlockList(ctx context.Context, listType BlockListType, snapshot *string, timeout *int32, leaseID *string, ifTags *string, requestID *string) (*BlockList, error) { if err := validate([]validation{ {targetValue: timeout, constraints: []constraint{{target: "timeout", name: null, rule: false, chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { return nil, err } - req, err := client.getBlockListPreparer(listType, snapshot, timeout, leaseID, requestID) + req, err := client.getBlockListPreparer(listType, snapshot, timeout, leaseID, ifTags, requestID) if err != nil { return nil, err } @@ -182,7 +222,7 @@ func (client blockBlobClient) GetBlockList(ctx context.Context, listType BlockLi } // getBlockListPreparer prepares the GetBlockList request. -func (client blockBlobClient) getBlockListPreparer(listType BlockListType, snapshot *string, timeout *int32, leaseID *string, requestID *string) (pipeline.Request, error) { +func (client blockBlobClient) getBlockListPreparer(listType BlockListType, snapshot *string, timeout *int32, leaseID *string, ifTags *string, requestID *string) (pipeline.Request, error) { req, err := pipeline.NewRequest("GET", client.url, nil) if err != nil { return req, pipeline.NewError(err, "failed to create request") @@ -200,6 +240,9 @@ func (client blockBlobClient) getBlockListPreparer(listType BlockListType, snaps if leaseID != nil { req.Header.Set("x-ms-lease-id", *leaseID) } + if ifTags != nil { + req.Header.Set("x-ms-if-tags", *ifTags) + } req.Header.Set("x-ms-version", ServiceVersion) if requestID != nil { req.Header.Set("x-ms-client-request-id", *requestID) @@ -238,13 +281,22 @@ func (client blockBlobClient) getBlockListResponder(resp pipeline.Response) (pip // equal to 64 bytes in size. For a given blob, the length of the value specified for the blockid parameter must be the // same size for each block. contentLength is the length of the request. body is initial data body will be closed upon // successful return. Callers should ensure closure when receiving an error.transactionalContentMD5 is specify the -// transactional md5 for the body, to be validated by the service. timeout is the timeout parameter is expressed in +// transactional md5 for the body, to be validated by the service. transactionalContentCrc64 is specify the +// transactional crc64 for the body, to be validated by the service. timeout is the timeout parameter is expressed in // seconds. For more information, see Setting // Timeouts for Blob Service Operations. leaseID is if specified, the operation only succeeds if the resource's -// lease is active and matches this ID. requestID is provides a client-generated, opaque value with a 1 KB character -// limit that is recorded in the analytics logs when storage analytics logging is enabled. -func (client blockBlobClient) StageBlock(ctx context.Context, blockID string, contentLength int64, body io.ReadSeeker, transactionalContentMD5 []byte, timeout *int32, leaseID *string, requestID *string) (*BlockBlobStageBlockResponse, error) { +// lease is active and matches this ID. encryptionKey is optional. Specifies the encryption key to use to encrypt the +// data provided in the request. If not specified, encryption is performed with the root account encryption key. For +// more information, see Encryption at Rest for Azure Storage Services. encryptionKeySha256 is the SHA-256 hash of the +// provided encryption key. Must be provided if the x-ms-encryption-key header is provided. encryptionAlgorithm is the +// algorithm used to produce the encryption key hash. Currently, the only accepted value is "AES256". Must be provided +// if the x-ms-encryption-key header is provided. encryptionScope is optional. Version 2019-07-07 and later. Specifies +// the name of the encryption scope to use to encrypt the data provided in the request. If not specified, encryption is +// performed with the default account encryption scope. For more information, see Encryption at Rest for Azure Storage +// Services. requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the +// analytics logs when storage analytics logging is enabled. +func (client blockBlobClient) StageBlock(ctx context.Context, blockID string, contentLength int64, body io.ReadSeeker, transactionalContentMD5 []byte, transactionalContentCrc64 []byte, timeout *int32, leaseID *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, requestID *string) (*BlockBlobStageBlockResponse, error) { if err := validate([]validation{ {targetValue: body, constraints: []constraint{{target: "body", name: null, rule: true, chain: nil}}}, @@ -253,7 +305,7 @@ func (client blockBlobClient) StageBlock(ctx context.Context, blockID string, co chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { return nil, err } - req, err := client.stageBlockPreparer(blockID, contentLength, body, transactionalContentMD5, timeout, leaseID, requestID) + req, err := client.stageBlockPreparer(blockID, contentLength, body, transactionalContentMD5, transactionalContentCrc64, timeout, leaseID, encryptionKey, encryptionKeySha256, encryptionAlgorithm, encryptionScope, requestID) if err != nil { return nil, err } @@ -265,7 +317,7 @@ func (client blockBlobClient) StageBlock(ctx context.Context, blockID string, co } // stageBlockPreparer prepares the StageBlock request. -func (client blockBlobClient) stageBlockPreparer(blockID string, contentLength int64, body io.ReadSeeker, transactionalContentMD5 []byte, timeout *int32, leaseID *string, requestID *string) (pipeline.Request, error) { +func (client blockBlobClient) stageBlockPreparer(blockID string, contentLength int64, body io.ReadSeeker, transactionalContentMD5 []byte, transactionalContentCrc64 []byte, timeout *int32, leaseID *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, requestID *string) (pipeline.Request, error) { req, err := pipeline.NewRequest("PUT", client.url, body) if err != nil { return req, pipeline.NewError(err, "failed to create request") @@ -281,9 +333,24 @@ func (client blockBlobClient) stageBlockPreparer(blockID string, contentLength i if transactionalContentMD5 != nil { req.Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(transactionalContentMD5)) } + if transactionalContentCrc64 != nil { + req.Header.Set("x-ms-content-crc64", base64.StdEncoding.EncodeToString(transactionalContentCrc64)) + } if leaseID != nil { req.Header.Set("x-ms-lease-id", *leaseID) } + if encryptionKey != nil { + req.Header.Set("x-ms-encryption-key", *encryptionKey) + } + if encryptionKeySha256 != nil { + req.Header.Set("x-ms-encryption-key-sha256", *encryptionKeySha256) + } + if encryptionAlgorithm != EncryptionAlgorithmNone { + req.Header.Set("x-ms-encryption-algorithm", string(encryptionAlgorithm)) + } + if encryptionScope != nil { + req.Header.Set("x-ms-encryption-scope", *encryptionScope) + } req.Header.Set("x-ms-version", ServiceVersion) if requestID != nil { req.Header.Set("x-ms-client-request-id", *requestID) @@ -309,24 +376,33 @@ func (client blockBlobClient) stageBlockResponder(resp pipeline.Response) (pipel // equal to 64 bytes in size. For a given blob, the length of the value specified for the blockid parameter must be the // same size for each block. contentLength is the length of the request. sourceURL is specify a URL to the copy source. // sourceRange is bytes of source data in the specified range. sourceContentMD5 is specify the md5 calculated for the +// range of bytes that must be read from the copy source. sourceContentcrc64 is specify the crc64 calculated for the // range of bytes that must be read from the copy source. timeout is the timeout parameter is expressed in seconds. For // more information, see Setting -// Timeouts for Blob Service Operations. leaseID is if specified, the operation only succeeds if the resource's -// lease is active and matches this ID. sourceIfModifiedSince is specify this header value to operate only on a blob if -// it has been modified since the specified date/time. sourceIfUnmodifiedSince is specify this header value to operate -// only on a blob if it has not been modified since the specified date/time. sourceIfMatch is specify an ETag value to -// operate only on blobs with a matching value. sourceIfNoneMatch is specify an ETag value to operate only on blobs -// without a matching value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is -// recorded in the analytics logs when storage analytics logging is enabled. -func (client blockBlobClient) StageBlockFromURL(ctx context.Context, blockID string, contentLength int64, sourceURL string, sourceRange *string, sourceContentMD5 []byte, timeout *int32, leaseID *string, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, requestID *string) (*BlockBlobStageBlockFromURLResponse, error) { +// Timeouts for Blob Service Operations. encryptionKey is optional. Specifies the encryption key to use to encrypt +// the data provided in the request. If not specified, encryption is performed with the root account encryption key. +// For more information, see Encryption at Rest for Azure Storage Services. encryptionKeySha256 is the SHA-256 hash of +// the provided encryption key. Must be provided if the x-ms-encryption-key header is provided. encryptionAlgorithm is +// the algorithm used to produce the encryption key hash. Currently, the only accepted value is "AES256". Must be +// provided if the x-ms-encryption-key header is provided. encryptionScope is optional. Version 2019-07-07 and later. +// Specifies the name of the encryption scope to use to encrypt the data provided in the request. If not specified, +// encryption is performed with the default account encryption scope. For more information, see Encryption at Rest for +// Azure Storage Services. leaseID is if specified, the operation only succeeds if the resource's lease is active and +// matches this ID. sourceIfModifiedSince is specify this header value to operate only on a blob if it has been +// modified since the specified date/time. sourceIfUnmodifiedSince is specify this header value to operate only on a +// blob if it has not been modified since the specified date/time. sourceIfMatch is specify an ETag value to operate +// only on blobs with a matching value. sourceIfNoneMatch is specify an ETag value to operate only on blobs without a +// matching value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded +// in the analytics logs when storage analytics logging is enabled. +func (client blockBlobClient) StageBlockFromURL(ctx context.Context, blockID string, contentLength int64, sourceURL string, sourceRange *string, sourceContentMD5 []byte, sourceContentcrc64 []byte, timeout *int32, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, leaseID *string, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, requestID *string) (*BlockBlobStageBlockFromURLResponse, error) { if err := validate([]validation{ {targetValue: timeout, constraints: []constraint{{target: "timeout", name: null, rule: false, chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { return nil, err } - req, err := client.stageBlockFromURLPreparer(blockID, contentLength, sourceURL, sourceRange, sourceContentMD5, timeout, leaseID, sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatch, sourceIfNoneMatch, requestID) + req, err := client.stageBlockFromURLPreparer(blockID, contentLength, sourceURL, sourceRange, sourceContentMD5, sourceContentcrc64, timeout, encryptionKey, encryptionKeySha256, encryptionAlgorithm, encryptionScope, leaseID, sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatch, sourceIfNoneMatch, requestID) if err != nil { return nil, err } @@ -338,7 +414,7 @@ func (client blockBlobClient) StageBlockFromURL(ctx context.Context, blockID str } // stageBlockFromURLPreparer prepares the StageBlockFromURL request. -func (client blockBlobClient) stageBlockFromURLPreparer(blockID string, contentLength int64, sourceURL string, sourceRange *string, sourceContentMD5 []byte, timeout *int32, leaseID *string, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, requestID *string) (pipeline.Request, error) { +func (client blockBlobClient) stageBlockFromURLPreparer(blockID string, contentLength int64, sourceURL string, sourceRange *string, sourceContentMD5 []byte, sourceContentcrc64 []byte, timeout *int32, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, leaseID *string, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, requestID *string) (pipeline.Request, error) { req, err := pipeline.NewRequest("PUT", client.url, nil) if err != nil { return req, pipeline.NewError(err, "failed to create request") @@ -358,6 +434,21 @@ func (client blockBlobClient) stageBlockFromURLPreparer(blockID string, contentL if sourceContentMD5 != nil { req.Header.Set("x-ms-source-content-md5", base64.StdEncoding.EncodeToString(sourceContentMD5)) } + if sourceContentcrc64 != nil { + req.Header.Set("x-ms-source-content-crc64", base64.StdEncoding.EncodeToString(sourceContentcrc64)) + } + if encryptionKey != nil { + req.Header.Set("x-ms-encryption-key", *encryptionKey) + } + if encryptionKeySha256 != nil { + req.Header.Set("x-ms-encryption-key-sha256", *encryptionKeySha256) + } + if encryptionAlgorithm != EncryptionAlgorithmNone { + req.Header.Set("x-ms-encryption-algorithm", string(encryptionAlgorithm)) + } + if encryptionScope != nil { + req.Header.Set("x-ms-encryption-scope", *encryptionScope) + } if leaseID != nil { req.Header.Set("x-ms-lease-id", *leaseID) } @@ -400,27 +491,37 @@ func (client blockBlobClient) stageBlockFromURLResponder(resp pipeline.Response) // error.contentLength is the length of the request. timeout is the timeout parameter is expressed in seconds. For more // information, see Setting -// Timeouts for Blob Service Operations. blobContentType is optional. Sets the blob's content type. If specified, -// this property is stored with the blob and returned with a read request. blobContentEncoding is optional. Sets the -// blob's content encoding. If specified, this property is stored with the blob and returned with a read request. -// blobContentLanguage is optional. Set the blob's content language. If specified, this property is stored with the -// blob and returned with a read request. blobContentMD5 is optional. An MD5 hash of the blob content. Note that this -// hash is not validated, as the hashes for the individual blocks were validated when each was uploaded. -// blobCacheControl is optional. Sets the blob's cache control. If specified, this property is stored with the blob and -// returned with a read request. metadata is optional. Specifies a user-defined name-value pair associated with the -// blob. If no name-value pairs are specified, the operation will copy the metadata from the source blob or file to the -// destination blob. If one or more name-value pairs are specified, the destination blob is created with the specified -// metadata, and metadata is not copied from the source blob or file. Note that beginning with version 2009-09-19, -// metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, Blobs, and -// Metadata for more information. leaseID is if specified, the operation only succeeds if the resource's lease is -// active and matches this ID. blobContentDisposition is optional. Sets the blob's Content-Disposition header. -// ifModifiedSince is specify this header value to operate only on a blob if it has been modified since the specified -// date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been modified -// since the specified date/time. ifMatch is specify an ETag value to operate only on blobs with a matching value. -// ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. requestID is provides a +// Timeouts for Blob Service Operations. transactionalContentMD5 is specify the transactional md5 for the body, to +// be validated by the service. blobContentType is optional. Sets the blob's content type. If specified, this property +// is stored with the blob and returned with a read request. blobContentEncoding is optional. Sets the blob's content +// encoding. If specified, this property is stored with the blob and returned with a read request. blobContentLanguage +// is optional. Set the blob's content language. If specified, this property is stored with the blob and returned with +// a read request. blobContentMD5 is optional. An MD5 hash of the blob content. Note that this hash is not validated, +// as the hashes for the individual blocks were validated when each was uploaded. blobCacheControl is optional. Sets +// the blob's cache control. If specified, this property is stored with the blob and returned with a read request. +// metadata is optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are +// specified, the operation will copy the metadata from the source blob or file to the destination blob. If one or more +// name-value pairs are specified, the destination blob is created with the specified metadata, and metadata is not +// copied from the source blob or file. Note that beginning with version 2009-09-19, metadata names must adhere to the +// naming rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more information. +// leaseID is if specified, the operation only succeeds if the resource's lease is active and matches this ID. +// blobContentDisposition is optional. Sets the blob's Content-Disposition header. encryptionKey is optional. Specifies +// the encryption key to use to encrypt the data provided in the request. If not specified, encryption is performed +// with the root account encryption key. For more information, see Encryption at Rest for Azure Storage Services. +// encryptionKeySha256 is the SHA-256 hash of the provided encryption key. Must be provided if the x-ms-encryption-key +// header is provided. encryptionAlgorithm is the algorithm used to produce the encryption key hash. Currently, the +// only accepted value is "AES256". Must be provided if the x-ms-encryption-key header is provided. encryptionScope is +// optional. Version 2019-07-07 and later. Specifies the name of the encryption scope to use to encrypt the data +// provided in the request. If not specified, encryption is performed with the default account encryption scope. For +// more information, see Encryption at Rest for Azure Storage Services. tier is optional. Indicates the tier to be set +// on the blob. ifModifiedSince is specify this header value to operate only on a blob if it has been modified since +// the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been +// modified since the specified date/time. ifMatch is specify an ETag value to operate only on blobs with a matching +// value. ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. ifTags is specify a +// SQL where clause on blob tags to operate only on blobs with a matching value. requestID is provides a // client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage -// analytics logging is enabled. -func (client blockBlobClient) Upload(ctx context.Context, body io.ReadSeeker, contentLength int64, timeout *int32, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*BlockBlobUploadResponse, error) { +// analytics logging is enabled. blobTagsString is optional. Used to set blob tags in various blob operations. +func (client blockBlobClient) Upload(ctx context.Context, body io.ReadSeeker, contentLength int64, timeout *int32, transactionalContentMD5 []byte, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, tier AccessTierType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string, blobTagsString *string) (*BlockBlobUploadResponse, error) { if err := validate([]validation{ {targetValue: body, constraints: []constraint{{target: "body", name: null, rule: true, chain: nil}}}, @@ -429,7 +530,7 @@ func (client blockBlobClient) Upload(ctx context.Context, body io.ReadSeeker, co chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { return nil, err } - req, err := client.uploadPreparer(body, contentLength, timeout, blobContentType, blobContentEncoding, blobContentLanguage, blobContentMD5, blobCacheControl, metadata, leaseID, blobContentDisposition, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID) + req, err := client.uploadPreparer(body, contentLength, timeout, transactionalContentMD5, blobContentType, blobContentEncoding, blobContentLanguage, blobContentMD5, blobCacheControl, metadata, leaseID, blobContentDisposition, encryptionKey, encryptionKeySha256, encryptionAlgorithm, encryptionScope, tier, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, requestID, blobTagsString) if err != nil { return nil, err } @@ -441,7 +542,7 @@ func (client blockBlobClient) Upload(ctx context.Context, body io.ReadSeeker, co } // uploadPreparer prepares the Upload request. -func (client blockBlobClient) uploadPreparer(body io.ReadSeeker, contentLength int64, timeout *int32, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) { +func (client blockBlobClient) uploadPreparer(body io.ReadSeeker, contentLength int64, timeout *int32, transactionalContentMD5 []byte, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, tier AccessTierType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string, blobTagsString *string) (pipeline.Request, error) { req, err := pipeline.NewRequest("PUT", client.url, body) if err != nil { return req, pipeline.NewError(err, "failed to create request") @@ -451,6 +552,9 @@ func (client blockBlobClient) uploadPreparer(body io.ReadSeeker, contentLength i params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) } req.URL.RawQuery = params.Encode() + if transactionalContentMD5 != nil { + req.Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(transactionalContentMD5)) + } req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) if blobContentType != nil { req.Header.Set("x-ms-blob-content-type", *blobContentType) @@ -478,6 +582,21 @@ func (client blockBlobClient) uploadPreparer(body io.ReadSeeker, contentLength i if blobContentDisposition != nil { req.Header.Set("x-ms-blob-content-disposition", *blobContentDisposition) } + if encryptionKey != nil { + req.Header.Set("x-ms-encryption-key", *encryptionKey) + } + if encryptionKeySha256 != nil { + req.Header.Set("x-ms-encryption-key-sha256", *encryptionKeySha256) + } + if encryptionAlgorithm != EncryptionAlgorithmNone { + req.Header.Set("x-ms-encryption-algorithm", string(encryptionAlgorithm)) + } + if encryptionScope != nil { + req.Header.Set("x-ms-encryption-scope", *encryptionScope) + } + if tier != AccessTierNone { + req.Header.Set("x-ms-access-tier", string(tier)) + } if ifModifiedSince != nil { req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) } @@ -490,10 +609,16 @@ func (client blockBlobClient) uploadPreparer(body io.ReadSeeker, contentLength i if ifNoneMatch != nil { req.Header.Set("If-None-Match", string(*ifNoneMatch)) } + if ifTags != nil { + req.Header.Set("x-ms-if-tags", *ifTags) + } req.Header.Set("x-ms-version", ServiceVersion) if requestID != nil { req.Header.Set("x-ms-client-request-id", *requestID) } + if blobTagsString != nil { + req.Header.Set("x-ms-tags", *blobTagsString) + } req.Header.Set("x-ms-blob-type", "BlockBlob") return req, nil } diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_client.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_client.go index 1b3ea2e..d697e37 100644 --- a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_client.go +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_client.go @@ -10,7 +10,7 @@ import ( const ( // ServiceVersion specifies the version of the operations used in this package. - ServiceVersion = "2018-11-09" + ServiceVersion = "2019-12-12" ) // managementClient is the base client for Azblob. diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_container.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_container.go index 599e811..88ff7df 100644 --- a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_container.go +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_container.go @@ -259,14 +259,18 @@ func (client containerClient) changeLeaseResponder(resp pipeline.Response) (pipe // Containers, Blobs, and Metadata for more information. access is specifies whether data in the container may be // accessed publicly and the level of access requestID is provides a client-generated, opaque value with a 1 KB // character limit that is recorded in the analytics logs when storage analytics logging is enabled. -func (client containerClient) Create(ctx context.Context, timeout *int32, metadata map[string]string, access PublicAccessType, requestID *string) (*ContainerCreateResponse, error) { +// defaultEncryptionScope is optional. Version 2019-07-07 and later. Specifies the default encryption scope to set on +// the container and use for all future writes. preventEncryptionScopeOverride is optional. Version 2019-07-07 and +// newer. If true, prevents any request from specifying a different encryption scope than the scope set on the +// container. +func (client containerClient) Create(ctx context.Context, timeout *int32, metadata map[string]string, access PublicAccessType, requestID *string, defaultEncryptionScope *string, preventEncryptionScopeOverride *bool) (*ContainerCreateResponse, error) { if err := validate([]validation{ {targetValue: timeout, constraints: []constraint{{target: "timeout", name: null, rule: false, chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { return nil, err } - req, err := client.createPreparer(timeout, metadata, access, requestID) + req, err := client.createPreparer(timeout, metadata, access, requestID, defaultEncryptionScope, preventEncryptionScopeOverride) if err != nil { return nil, err } @@ -278,7 +282,7 @@ func (client containerClient) Create(ctx context.Context, timeout *int32, metada } // createPreparer prepares the Create request. -func (client containerClient) createPreparer(timeout *int32, metadata map[string]string, access PublicAccessType, requestID *string) (pipeline.Request, error) { +func (client containerClient) createPreparer(timeout *int32, metadata map[string]string, access PublicAccessType, requestID *string, defaultEncryptionScope *string, preventEncryptionScopeOverride *bool) (pipeline.Request, error) { req, err := pipeline.NewRequest("PUT", client.url, nil) if err != nil { return req, pipeline.NewError(err, "failed to create request") @@ -301,6 +305,12 @@ func (client containerClient) createPreparer(timeout *int32, metadata map[string if requestID != nil { req.Header.Set("x-ms-client-request-id", *requestID) } + if defaultEncryptionScope != nil { + req.Header.Set("x-ms-default-encryption-scope", *defaultEncryptionScope) + } + if preventEncryptionScopeOverride != nil { + req.Header.Set("x-ms-deny-encryption-scope-override", strconv.FormatBool(*preventEncryptionScopeOverride)) + } return req, nil } @@ -881,6 +891,70 @@ func (client containerClient) renewLeaseResponder(resp pipeline.Response) (pipel return &ContainerRenewLeaseResponse{rawResponse: resp.Response()}, err } +// Restore restores a previously-deleted container. +// +// timeout is the timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. requestID is provides a client-generated, opaque value with a 1 KB +// character limit that is recorded in the analytics logs when storage analytics logging is enabled. +// deletedContainerName is optional. Version 2019-12-12 and laster. Specifies the name of the deleted container to +// restore. deletedContainerVersion is optional. Version 2019-12-12 and laster. Specifies the version of the deleted +// container to restore. +func (client containerClient) Restore(ctx context.Context, timeout *int32, requestID *string, deletedContainerName *string, deletedContainerVersion *string) (*ContainerRestoreResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.restorePreparer(timeout, requestID, deletedContainerName, deletedContainerVersion) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.restoreResponder}, req) + if err != nil { + return nil, err + } + return resp.(*ContainerRestoreResponse), err +} + +// restorePreparer prepares the Restore request. +func (client containerClient) restorePreparer(timeout *int32, requestID *string, deletedContainerName *string, deletedContainerVersion *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("restype", "container") + params.Set("comp", "undelete") + req.URL.RawQuery = params.Encode() + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + if deletedContainerName != nil { + req.Header.Set("x-ms-deleted-container-name", *deletedContainerName) + } + if deletedContainerVersion != nil { + req.Header.Set("x-ms-deleted-container-version", *deletedContainerVersion) + } + return req, nil +} + +// restoreResponder handles the response to the Restore request. +func (client containerClient) restoreResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK, http.StatusCreated) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &ContainerRestoreResponse{rawResponse: resp.Response()}, err +} + // SetAccessPolicy sets the permissions for the specified container. The permissions indicate whether blobs in a // container may be accessed publicly. // diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_models.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_models.go index 3915849..78f467c 100644 --- a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_models.go +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_models.go @@ -4,8 +4,6 @@ package azblob // Changes may cause incorrect behavior and will be lost if the code is regenerated. import ( - "crypto/hmac" - "crypto/sha256" "encoding/base64" "encoding/xml" "errors" @@ -109,6 +107,8 @@ const ( AccessTierNone AccessTierType = "" // AccessTierP10 ... AccessTierP10 AccessTierType = "P10" + // AccessTierP15 ... + AccessTierP15 AccessTierType = "P15" // AccessTierP20 ... AccessTierP20 AccessTierType = "P20" // AccessTierP30 ... @@ -121,11 +121,17 @@ const ( AccessTierP50 AccessTierType = "P50" // AccessTierP6 ... AccessTierP6 AccessTierType = "P6" + // AccessTierP60 ... + AccessTierP60 AccessTierType = "P60" + // AccessTierP70 ... + AccessTierP70 AccessTierType = "P70" + // AccessTierP80 ... + AccessTierP80 AccessTierType = "P80" ) // PossibleAccessTierTypeValues returns an array of possible values for the AccessTierType const type. func PossibleAccessTierTypeValues() []AccessTierType { - return []AccessTierType{AccessTierArchive, AccessTierCool, AccessTierHot, AccessTierNone, AccessTierP10, AccessTierP20, AccessTierP30, AccessTierP4, AccessTierP40, AccessTierP50, AccessTierP6} + return []AccessTierType{AccessTierArchive, AccessTierCool, AccessTierHot, AccessTierNone, AccessTierP10, AccessTierP15, AccessTierP20, AccessTierP30, AccessTierP4, AccessTierP40, AccessTierP50, AccessTierP6, AccessTierP60, AccessTierP70, AccessTierP80} } // AccountKindType enumerates the values for account kind type. @@ -134,6 +140,10 @@ type AccountKindType string const ( // AccountKindBlobStorage ... AccountKindBlobStorage AccountKindType = "BlobStorage" + // AccountKindBlockBlobStorage ... + AccountKindBlockBlobStorage AccountKindType = "BlockBlobStorage" + // AccountKindFileStorage ... + AccountKindFileStorage AccountKindType = "FileStorage" // AccountKindNone represents an empty AccountKindType. AccountKindNone AccountKindType = "" // AccountKindStorage ... @@ -144,7 +154,7 @@ const ( // PossibleAccountKindTypeValues returns an array of possible values for the AccountKindType const type. func PossibleAccountKindTypeValues() []AccountKindType { - return []AccountKindType{AccountKindBlobStorage, AccountKindNone, AccountKindStorage, AccountKindStorageV2} + return []AccountKindType{AccountKindBlobStorage, AccountKindBlockBlobStorage, AccountKindFileStorage, AccountKindNone, AccountKindStorage, AccountKindStorageV2} } // ArchiveStatusType enumerates the values for archive status type. @@ -164,6 +174,27 @@ func PossibleArchiveStatusTypeValues() []ArchiveStatusType { return []ArchiveStatusType{ArchiveStatusNone, ArchiveStatusRehydratePendingToCool, ArchiveStatusRehydratePendingToHot} } +// BlobExpiryOptionsType enumerates the values for blob expiry options type. +type BlobExpiryOptionsType string + +const ( + // BlobExpiryOptionsAbsolute ... + BlobExpiryOptionsAbsolute BlobExpiryOptionsType = "Absolute" + // BlobExpiryOptionsNeverExpire ... + BlobExpiryOptionsNeverExpire BlobExpiryOptionsType = "NeverExpire" + // BlobExpiryOptionsNone represents an empty BlobExpiryOptionsType. + BlobExpiryOptionsNone BlobExpiryOptionsType = "" + // BlobExpiryOptionsRelativeToCreation ... + BlobExpiryOptionsRelativeToCreation BlobExpiryOptionsType = "RelativeToCreation" + // BlobExpiryOptionsRelativeToNow ... + BlobExpiryOptionsRelativeToNow BlobExpiryOptionsType = "RelativeToNow" +) + +// PossibleBlobExpiryOptionsTypeValues returns an array of possible values for the BlobExpiryOptionsType const type. +func PossibleBlobExpiryOptionsTypeValues() []BlobExpiryOptionsType { + return []BlobExpiryOptionsType{BlobExpiryOptionsAbsolute, BlobExpiryOptionsNeverExpire, BlobExpiryOptionsNone, BlobExpiryOptionsRelativeToCreation, BlobExpiryOptionsRelativeToNow} +} + // BlobType enumerates the values for blob type. type BlobType string @@ -240,6 +271,21 @@ func PossibleDeleteSnapshotsOptionTypeValues() []DeleteSnapshotsOptionType { return []DeleteSnapshotsOptionType{DeleteSnapshotsOptionInclude, DeleteSnapshotsOptionNone, DeleteSnapshotsOptionOnly} } +// EncryptionAlgorithmType enumerates the values for encryption algorithm type. +type EncryptionAlgorithmType string + +const ( + // EncryptionAlgorithmAES256 ... + EncryptionAlgorithmAES256 EncryptionAlgorithmType = "AES256" + // EncryptionAlgorithmNone represents an empty EncryptionAlgorithmType. + EncryptionAlgorithmNone EncryptionAlgorithmType = "" +) + +// PossibleEncryptionAlgorithmTypeValues returns an array of possible values for the EncryptionAlgorithmType const type. +func PossibleEncryptionAlgorithmTypeValues() []EncryptionAlgorithmType { + return []EncryptionAlgorithmType{EncryptionAlgorithmAES256, EncryptionAlgorithmNone} +} + // GeoReplicationStatusType enumerates the values for geo replication status type. type GeoReplicationStatusType string @@ -330,19 +376,25 @@ const ( ListBlobsIncludeItemNone ListBlobsIncludeItemType = "" // ListBlobsIncludeItemSnapshots ... ListBlobsIncludeItemSnapshots ListBlobsIncludeItemType = "snapshots" + // ListBlobsIncludeItemTags ... + ListBlobsIncludeItemTags ListBlobsIncludeItemType = "tags" // ListBlobsIncludeItemUncommittedblobs ... ListBlobsIncludeItemUncommittedblobs ListBlobsIncludeItemType = "uncommittedblobs" + // ListBlobsIncludeItemVersions ... + ListBlobsIncludeItemVersions ListBlobsIncludeItemType = "versions" ) // PossibleListBlobsIncludeItemTypeValues returns an array of possible values for the ListBlobsIncludeItemType const type. func PossibleListBlobsIncludeItemTypeValues() []ListBlobsIncludeItemType { - return []ListBlobsIncludeItemType{ListBlobsIncludeItemCopy, ListBlobsIncludeItemDeleted, ListBlobsIncludeItemMetadata, ListBlobsIncludeItemNone, ListBlobsIncludeItemSnapshots, ListBlobsIncludeItemUncommittedblobs} + return []ListBlobsIncludeItemType{ListBlobsIncludeItemCopy, ListBlobsIncludeItemDeleted, ListBlobsIncludeItemMetadata, ListBlobsIncludeItemNone, ListBlobsIncludeItemSnapshots, ListBlobsIncludeItemTags, ListBlobsIncludeItemUncommittedblobs, ListBlobsIncludeItemVersions} } // ListContainersIncludeType enumerates the values for list containers include type. type ListContainersIncludeType string const ( + // ListContainersIncludeDeleted ... + ListContainersIncludeDeleted ListContainersIncludeType = "deleted" // ListContainersIncludeMetadata ... ListContainersIncludeMetadata ListContainersIncludeType = "metadata" // ListContainersIncludeNone represents an empty ListContainersIncludeType. @@ -351,7 +403,59 @@ const ( // PossibleListContainersIncludeTypeValues returns an array of possible values for the ListContainersIncludeType const type. func PossibleListContainersIncludeTypeValues() []ListContainersIncludeType { - return []ListContainersIncludeType{ListContainersIncludeMetadata, ListContainersIncludeNone} + return []ListContainersIncludeType{ListContainersIncludeDeleted, ListContainersIncludeMetadata, ListContainersIncludeNone} +} + +// PathRenameModeType enumerates the values for path rename mode type. +type PathRenameModeType string + +const ( + // PathRenameModeLegacy ... + PathRenameModeLegacy PathRenameModeType = "legacy" + // PathRenameModeNone represents an empty PathRenameModeType. + PathRenameModeNone PathRenameModeType = "" + // PathRenameModePosix ... + PathRenameModePosix PathRenameModeType = "posix" +) + +// PossiblePathRenameModeTypeValues returns an array of possible values for the PathRenameModeType const type. +func PossiblePathRenameModeTypeValues() []PathRenameModeType { + return []PathRenameModeType{PathRenameModeLegacy, PathRenameModeNone, PathRenameModePosix} +} + +// PremiumPageBlobAccessTierType enumerates the values for premium page blob access tier type. +type PremiumPageBlobAccessTierType string + +const ( + // PremiumPageBlobAccessTierNone represents an empty PremiumPageBlobAccessTierType. + PremiumPageBlobAccessTierNone PremiumPageBlobAccessTierType = "" + // PremiumPageBlobAccessTierP10 ... + PremiumPageBlobAccessTierP10 PremiumPageBlobAccessTierType = "P10" + // PremiumPageBlobAccessTierP15 ... + PremiumPageBlobAccessTierP15 PremiumPageBlobAccessTierType = "P15" + // PremiumPageBlobAccessTierP20 ... + PremiumPageBlobAccessTierP20 PremiumPageBlobAccessTierType = "P20" + // PremiumPageBlobAccessTierP30 ... + PremiumPageBlobAccessTierP30 PremiumPageBlobAccessTierType = "P30" + // PremiumPageBlobAccessTierP4 ... + PremiumPageBlobAccessTierP4 PremiumPageBlobAccessTierType = "P4" + // PremiumPageBlobAccessTierP40 ... + PremiumPageBlobAccessTierP40 PremiumPageBlobAccessTierType = "P40" + // PremiumPageBlobAccessTierP50 ... + PremiumPageBlobAccessTierP50 PremiumPageBlobAccessTierType = "P50" + // PremiumPageBlobAccessTierP6 ... + PremiumPageBlobAccessTierP6 PremiumPageBlobAccessTierType = "P6" + // PremiumPageBlobAccessTierP60 ... + PremiumPageBlobAccessTierP60 PremiumPageBlobAccessTierType = "P60" + // PremiumPageBlobAccessTierP70 ... + PremiumPageBlobAccessTierP70 PremiumPageBlobAccessTierType = "P70" + // PremiumPageBlobAccessTierP80 ... + PremiumPageBlobAccessTierP80 PremiumPageBlobAccessTierType = "P80" +) + +// PossiblePremiumPageBlobAccessTierTypeValues returns an array of possible values for the PremiumPageBlobAccessTierType const type. +func PossiblePremiumPageBlobAccessTierTypeValues() []PremiumPageBlobAccessTierType { + return []PremiumPageBlobAccessTierType{PremiumPageBlobAccessTierNone, PremiumPageBlobAccessTierP10, PremiumPageBlobAccessTierP15, PremiumPageBlobAccessTierP20, PremiumPageBlobAccessTierP30, PremiumPageBlobAccessTierP4, PremiumPageBlobAccessTierP40, PremiumPageBlobAccessTierP50, PremiumPageBlobAccessTierP6, PremiumPageBlobAccessTierP60, PremiumPageBlobAccessTierP70, PremiumPageBlobAccessTierP80} } // PublicAccessType enumerates the values for public access type. @@ -371,6 +475,40 @@ func PossiblePublicAccessTypeValues() []PublicAccessType { return []PublicAccessType{PublicAccessBlob, PublicAccessContainer, PublicAccessNone} } +// QueryFormatType enumerates the values for query format type. +type QueryFormatType string + +const ( + // QueryFormatDelimited ... + QueryFormatDelimited QueryFormatType = "delimited" + // QueryFormatJSON ... + QueryFormatJSON QueryFormatType = "json" + // QueryFormatNone represents an empty QueryFormatType. + QueryFormatNone QueryFormatType = "" +) + +// PossibleQueryFormatTypeValues returns an array of possible values for the QueryFormatType const type. +func PossibleQueryFormatTypeValues() []QueryFormatType { + return []QueryFormatType{QueryFormatDelimited, QueryFormatJSON, QueryFormatNone} +} + +// RehydratePriorityType enumerates the values for rehydrate priority type. +type RehydratePriorityType string + +const ( + // RehydratePriorityHigh ... + RehydratePriorityHigh RehydratePriorityType = "High" + // RehydratePriorityNone represents an empty RehydratePriorityType. + RehydratePriorityNone RehydratePriorityType = "" + // RehydratePriorityStandard ... + RehydratePriorityStandard RehydratePriorityType = "Standard" +) + +// PossibleRehydratePriorityTypeValues returns an array of possible values for the RehydratePriorityType const type. +func PossibleRehydratePriorityTypeValues() []RehydratePriorityType { + return []RehydratePriorityType{RehydratePriorityHigh, RehydratePriorityNone, RehydratePriorityStandard} +} + // SequenceNumberActionType enumerates the values for sequence number action type. type SequenceNumberActionType string @@ -429,6 +567,16 @@ const ( StorageErrorCodeAuthenticationFailed StorageErrorCodeType = "AuthenticationFailed" // StorageErrorCodeAuthorizationFailure ... StorageErrorCodeAuthorizationFailure StorageErrorCodeType = "AuthorizationFailure" + // StorageErrorCodeAuthorizationPermissionMismatch ... + StorageErrorCodeAuthorizationPermissionMismatch StorageErrorCodeType = "AuthorizationPermissionMismatch" + // StorageErrorCodeAuthorizationProtocolMismatch ... + StorageErrorCodeAuthorizationProtocolMismatch StorageErrorCodeType = "AuthorizationProtocolMismatch" + // StorageErrorCodeAuthorizationResourceTypeMismatch ... + StorageErrorCodeAuthorizationResourceTypeMismatch StorageErrorCodeType = "AuthorizationResourceTypeMismatch" + // StorageErrorCodeAuthorizationServiceMismatch ... + StorageErrorCodeAuthorizationServiceMismatch StorageErrorCodeType = "AuthorizationServiceMismatch" + // StorageErrorCodeAuthorizationSourceIPMismatch ... + StorageErrorCodeAuthorizationSourceIPMismatch StorageErrorCodeType = "AuthorizationSourceIPMismatch" // StorageErrorCodeBlobAlreadyExists ... StorageErrorCodeBlobAlreadyExists StorageErrorCodeType = "BlobAlreadyExists" // StorageErrorCodeBlobArchived ... @@ -571,6 +719,8 @@ const ( StorageErrorCodeMissingRequiredXMLNode StorageErrorCodeType = "MissingRequiredXmlNode" // StorageErrorCodeMultipleConditionHeadersNotSupported ... StorageErrorCodeMultipleConditionHeadersNotSupported StorageErrorCodeType = "MultipleConditionHeadersNotSupported" + // StorageErrorCodeNoAuthenticationInformation ... + StorageErrorCodeNoAuthenticationInformation StorageErrorCodeType = "NoAuthenticationInformation" // StorageErrorCodeNone represents an empty StorageErrorCodeType. StorageErrorCodeNone StorageErrorCodeType = "" // StorageErrorCodeNoPendingCopyOperation ... @@ -633,7 +783,7 @@ const ( // PossibleStorageErrorCodeTypeValues returns an array of possible values for the StorageErrorCodeType const type. func PossibleStorageErrorCodeTypeValues() []StorageErrorCodeType { - return []StorageErrorCodeType{StorageErrorCodeAccountAlreadyExists, StorageErrorCodeAccountBeingCreated, StorageErrorCodeAccountIsDisabled, StorageErrorCodeAppendPositionConditionNotMet, StorageErrorCodeAuthenticationFailed, StorageErrorCodeAuthorizationFailure, StorageErrorCodeBlobAlreadyExists, StorageErrorCodeBlobArchived, StorageErrorCodeBlobBeingRehydrated, StorageErrorCodeBlobNotArchived, StorageErrorCodeBlobNotFound, StorageErrorCodeBlobOverwritten, StorageErrorCodeBlobTierInadequateForContentLength, StorageErrorCodeBlockCountExceedsLimit, StorageErrorCodeBlockListTooLong, StorageErrorCodeCannotChangeToLowerTier, StorageErrorCodeCannotVerifyCopySource, StorageErrorCodeConditionHeadersNotSupported, StorageErrorCodeConditionNotMet, StorageErrorCodeContainerAlreadyExists, StorageErrorCodeContainerBeingDeleted, StorageErrorCodeContainerDisabled, StorageErrorCodeContainerNotFound, StorageErrorCodeContentLengthLargerThanTierLimit, StorageErrorCodeCopyAcrossAccountsNotSupported, StorageErrorCodeCopyIDMismatch, StorageErrorCodeEmptyMetadataKey, StorageErrorCodeFeatureVersionMismatch, StorageErrorCodeIncrementalCopyBlobMismatch, StorageErrorCodeIncrementalCopyOfEralierVersionSnapshotNotAllowed, StorageErrorCodeIncrementalCopySourceMustBeSnapshot, StorageErrorCodeInfiniteLeaseDurationRequired, StorageErrorCodeInsufficientAccountPermissions, StorageErrorCodeInternalError, StorageErrorCodeInvalidAuthenticationInfo, StorageErrorCodeInvalidBlobOrBlock, StorageErrorCodeInvalidBlobTier, StorageErrorCodeInvalidBlobType, StorageErrorCodeInvalidBlockID, StorageErrorCodeInvalidBlockList, StorageErrorCodeInvalidHeaderValue, StorageErrorCodeInvalidHTTPVerb, StorageErrorCodeInvalidInput, StorageErrorCodeInvalidMd5, StorageErrorCodeInvalidMetadata, StorageErrorCodeInvalidOperation, StorageErrorCodeInvalidPageRange, StorageErrorCodeInvalidQueryParameterValue, StorageErrorCodeInvalidRange, StorageErrorCodeInvalidResourceName, StorageErrorCodeInvalidSourceBlobType, StorageErrorCodeInvalidSourceBlobURL, StorageErrorCodeInvalidURI, StorageErrorCodeInvalidVersionForPageBlobOperation, StorageErrorCodeInvalidXMLDocument, StorageErrorCodeInvalidXMLNodeValue, StorageErrorCodeLeaseAlreadyBroken, StorageErrorCodeLeaseAlreadyPresent, StorageErrorCodeLeaseIDMismatchWithBlobOperation, StorageErrorCodeLeaseIDMismatchWithContainerOperation, StorageErrorCodeLeaseIDMismatchWithLeaseOperation, StorageErrorCodeLeaseIDMissing, StorageErrorCodeLeaseIsBreakingAndCannotBeAcquired, StorageErrorCodeLeaseIsBreakingAndCannotBeChanged, StorageErrorCodeLeaseIsBrokenAndCannotBeRenewed, StorageErrorCodeLeaseLost, StorageErrorCodeLeaseNotPresentWithBlobOperation, StorageErrorCodeLeaseNotPresentWithContainerOperation, StorageErrorCodeLeaseNotPresentWithLeaseOperation, StorageErrorCodeMaxBlobSizeConditionNotMet, StorageErrorCodeMd5Mismatch, StorageErrorCodeMetadataTooLarge, StorageErrorCodeMissingContentLengthHeader, StorageErrorCodeMissingRequiredHeader, StorageErrorCodeMissingRequiredQueryParameter, StorageErrorCodeMissingRequiredXMLNode, StorageErrorCodeMultipleConditionHeadersNotSupported, StorageErrorCodeNone, StorageErrorCodeNoPendingCopyOperation, StorageErrorCodeOperationNotAllowedOnIncrementalCopyBlob, StorageErrorCodeOperationTimedOut, StorageErrorCodeOutOfRangeInput, StorageErrorCodeOutOfRangeQueryParameterValue, StorageErrorCodePendingCopyOperation, StorageErrorCodePreviousSnapshotCannotBeNewer, StorageErrorCodePreviousSnapshotNotFound, StorageErrorCodePreviousSnapshotOperationNotSupported, StorageErrorCodeRequestBodyTooLarge, StorageErrorCodeRequestURLFailedToParse, StorageErrorCodeResourceAlreadyExists, StorageErrorCodeResourceNotFound, StorageErrorCodeResourceTypeMismatch, StorageErrorCodeSequenceNumberConditionNotMet, StorageErrorCodeSequenceNumberIncrementTooLarge, StorageErrorCodeServerBusy, StorageErrorCodeSnaphotOperationRateExceeded, StorageErrorCodeSnapshotCountExceeded, StorageErrorCodeSnapshotsPresent, StorageErrorCodeSourceConditionNotMet, StorageErrorCodeSystemInUse, StorageErrorCodeTargetConditionNotMet, StorageErrorCodeUnauthorizedBlobOverwrite, StorageErrorCodeUnsupportedHeader, StorageErrorCodeUnsupportedHTTPVerb, StorageErrorCodeUnsupportedQueryParameter, StorageErrorCodeUnsupportedXMLNode} + return []StorageErrorCodeType{StorageErrorCodeAccountAlreadyExists, StorageErrorCodeAccountBeingCreated, StorageErrorCodeAccountIsDisabled, StorageErrorCodeAppendPositionConditionNotMet, StorageErrorCodeAuthenticationFailed, StorageErrorCodeAuthorizationFailure, StorageErrorCodeAuthorizationPermissionMismatch, StorageErrorCodeAuthorizationProtocolMismatch, StorageErrorCodeAuthorizationResourceTypeMismatch, StorageErrorCodeAuthorizationServiceMismatch, StorageErrorCodeAuthorizationSourceIPMismatch, StorageErrorCodeBlobAlreadyExists, StorageErrorCodeBlobArchived, StorageErrorCodeBlobBeingRehydrated, StorageErrorCodeBlobNotArchived, StorageErrorCodeBlobNotFound, StorageErrorCodeBlobOverwritten, StorageErrorCodeBlobTierInadequateForContentLength, StorageErrorCodeBlockCountExceedsLimit, StorageErrorCodeBlockListTooLong, StorageErrorCodeCannotChangeToLowerTier, StorageErrorCodeCannotVerifyCopySource, StorageErrorCodeConditionHeadersNotSupported, StorageErrorCodeConditionNotMet, StorageErrorCodeContainerAlreadyExists, StorageErrorCodeContainerBeingDeleted, StorageErrorCodeContainerDisabled, StorageErrorCodeContainerNotFound, StorageErrorCodeContentLengthLargerThanTierLimit, StorageErrorCodeCopyAcrossAccountsNotSupported, StorageErrorCodeCopyIDMismatch, StorageErrorCodeEmptyMetadataKey, StorageErrorCodeFeatureVersionMismatch, StorageErrorCodeIncrementalCopyBlobMismatch, StorageErrorCodeIncrementalCopyOfEralierVersionSnapshotNotAllowed, StorageErrorCodeIncrementalCopySourceMustBeSnapshot, StorageErrorCodeInfiniteLeaseDurationRequired, StorageErrorCodeInsufficientAccountPermissions, StorageErrorCodeInternalError, StorageErrorCodeInvalidAuthenticationInfo, StorageErrorCodeInvalidBlobOrBlock, StorageErrorCodeInvalidBlobTier, StorageErrorCodeInvalidBlobType, StorageErrorCodeInvalidBlockID, StorageErrorCodeInvalidBlockList, StorageErrorCodeInvalidHeaderValue, StorageErrorCodeInvalidHTTPVerb, StorageErrorCodeInvalidInput, StorageErrorCodeInvalidMd5, StorageErrorCodeInvalidMetadata, StorageErrorCodeInvalidOperation, StorageErrorCodeInvalidPageRange, StorageErrorCodeInvalidQueryParameterValue, StorageErrorCodeInvalidRange, StorageErrorCodeInvalidResourceName, StorageErrorCodeInvalidSourceBlobType, StorageErrorCodeInvalidSourceBlobURL, StorageErrorCodeInvalidURI, StorageErrorCodeInvalidVersionForPageBlobOperation, StorageErrorCodeInvalidXMLDocument, StorageErrorCodeInvalidXMLNodeValue, StorageErrorCodeLeaseAlreadyBroken, StorageErrorCodeLeaseAlreadyPresent, StorageErrorCodeLeaseIDMismatchWithBlobOperation, StorageErrorCodeLeaseIDMismatchWithContainerOperation, StorageErrorCodeLeaseIDMismatchWithLeaseOperation, StorageErrorCodeLeaseIDMissing, StorageErrorCodeLeaseIsBreakingAndCannotBeAcquired, StorageErrorCodeLeaseIsBreakingAndCannotBeChanged, StorageErrorCodeLeaseIsBrokenAndCannotBeRenewed, StorageErrorCodeLeaseLost, StorageErrorCodeLeaseNotPresentWithBlobOperation, StorageErrorCodeLeaseNotPresentWithContainerOperation, StorageErrorCodeLeaseNotPresentWithLeaseOperation, StorageErrorCodeMaxBlobSizeConditionNotMet, StorageErrorCodeMd5Mismatch, StorageErrorCodeMetadataTooLarge, StorageErrorCodeMissingContentLengthHeader, StorageErrorCodeMissingRequiredHeader, StorageErrorCodeMissingRequiredQueryParameter, StorageErrorCodeMissingRequiredXMLNode, StorageErrorCodeMultipleConditionHeadersNotSupported, StorageErrorCodeNoAuthenticationInformation, StorageErrorCodeNone, StorageErrorCodeNoPendingCopyOperation, StorageErrorCodeOperationNotAllowedOnIncrementalCopyBlob, StorageErrorCodeOperationTimedOut, StorageErrorCodeOutOfRangeInput, StorageErrorCodeOutOfRangeQueryParameterValue, StorageErrorCodePendingCopyOperation, StorageErrorCodePreviousSnapshotCannotBeNewer, StorageErrorCodePreviousSnapshotNotFound, StorageErrorCodePreviousSnapshotOperationNotSupported, StorageErrorCodeRequestBodyTooLarge, StorageErrorCodeRequestURLFailedToParse, StorageErrorCodeResourceAlreadyExists, StorageErrorCodeResourceNotFound, StorageErrorCodeResourceTypeMismatch, StorageErrorCodeSequenceNumberConditionNotMet, StorageErrorCodeSequenceNumberIncrementTooLarge, StorageErrorCodeServerBusy, StorageErrorCodeSnaphotOperationRateExceeded, StorageErrorCodeSnapshotCountExceeded, StorageErrorCodeSnapshotsPresent, StorageErrorCodeSourceConditionNotMet, StorageErrorCodeSystemInUse, StorageErrorCodeTargetConditionNotMet, StorageErrorCodeUnauthorizedBlobOverwrite, StorageErrorCodeUnsupportedHeader, StorageErrorCodeUnsupportedHTTPVerb, StorageErrorCodeUnsupportedQueryParameter, StorageErrorCodeUnsupportedXMLNode} } // SyncCopyStatusType enumerates the values for sync copy status type. @@ -654,11 +804,11 @@ func PossibleSyncCopyStatusTypeValues() []SyncCopyStatusType { // AccessPolicy - An Access policy type AccessPolicy struct { // Start - the date-time the policy is active - Start time.Time `xml:"Start"` + Start *time.Time `xml:"Start"` // Expiry - the date-time the policy expires - Expiry time.Time `xml:"Expiry"` + Expiry *time.Time `xml:"Expiry"` // Permission - the permissions for the acl policy - Permission string `xml:"Permission"` + Permission *string `xml:"Permission"` } // MarshalXML implements the xml.Marshaler interface for AccessPolicy. @@ -737,6 +887,16 @@ func (ababfur AppendBlobAppendBlockFromURLResponse) Date() time.Time { return t } +// EncryptionKeySha256 returns the value for header x-ms-encryption-key-sha256. +func (ababfur AppendBlobAppendBlockFromURLResponse) EncryptionKeySha256() string { + return ababfur.rawResponse.Header.Get("x-ms-encryption-key-sha256") +} + +// EncryptionScope returns the value for header x-ms-encryption-scope. +func (ababfur AppendBlobAppendBlockFromURLResponse) EncryptionScope() string { + return ababfur.rawResponse.Header.Get("x-ms-encryption-scope") +} + // ErrorCode returns the value for header x-ms-error-code. func (ababfur AppendBlobAppendBlockFromURLResponse) ErrorCode() string { return ababfur.rawResponse.Header.Get("x-ms-error-code") @@ -747,6 +907,11 @@ func (ababfur AppendBlobAppendBlockFromURLResponse) ETag() ETag { return ETag(ababfur.rawResponse.Header.Get("ETag")) } +// IsServerEncrypted returns the value for header x-ms-request-server-encrypted. +func (ababfur AppendBlobAppendBlockFromURLResponse) IsServerEncrypted() string { + return ababfur.rawResponse.Header.Get("x-ms-request-server-encrypted") +} + // LastModified returns the value for header Last-Modified. func (ababfur AppendBlobAppendBlockFromURLResponse) LastModified() time.Time { s := ababfur.rawResponse.Header.Get("Last-Modified") @@ -770,6 +935,19 @@ func (ababfur AppendBlobAppendBlockFromURLResponse) Version() string { return ababfur.rawResponse.Header.Get("x-ms-version") } +// XMsContentCrc64 returns the value for header x-ms-content-crc64. +func (ababfur AppendBlobAppendBlockFromURLResponse) XMsContentCrc64() []byte { + s := ababfur.rawResponse.Header.Get("x-ms-content-crc64") + if s == "" { + return nil + } + b, err := base64.StdEncoding.DecodeString(s) + if err != nil { + b = nil + } + return b +} + // AppendBlobAppendBlockResponse ... type AppendBlobAppendBlockResponse struct { rawResponse *http.Response @@ -808,6 +986,11 @@ func (ababr AppendBlobAppendBlockResponse) BlobCommittedBlockCount() int32 { return int32(i) } +// ClientRequestID returns the value for header x-ms-client-request-id. +func (ababr AppendBlobAppendBlockResponse) ClientRequestID() string { + return ababr.rawResponse.Header.Get("x-ms-client-request-id") +} + // ContentMD5 returns the value for header Content-MD5. func (ababr AppendBlobAppendBlockResponse) ContentMD5() []byte { s := ababr.rawResponse.Header.Get("Content-MD5") @@ -834,6 +1017,16 @@ func (ababr AppendBlobAppendBlockResponse) Date() time.Time { return t } +// EncryptionKeySha256 returns the value for header x-ms-encryption-key-sha256. +func (ababr AppendBlobAppendBlockResponse) EncryptionKeySha256() string { + return ababr.rawResponse.Header.Get("x-ms-encryption-key-sha256") +} + +// EncryptionScope returns the value for header x-ms-encryption-scope. +func (ababr AppendBlobAppendBlockResponse) EncryptionScope() string { + return ababr.rawResponse.Header.Get("x-ms-encryption-scope") +} + // ErrorCode returns the value for header x-ms-error-code. func (ababr AppendBlobAppendBlockResponse) ErrorCode() string { return ababr.rawResponse.Header.Get("x-ms-error-code") @@ -872,6 +1065,19 @@ func (ababr AppendBlobAppendBlockResponse) Version() string { return ababr.rawResponse.Header.Get("x-ms-version") } +// XMsContentCrc64 returns the value for header x-ms-content-crc64. +func (ababr AppendBlobAppendBlockResponse) XMsContentCrc64() []byte { + s := ababr.rawResponse.Header.Get("x-ms-content-crc64") + if s == "" { + return nil + } + b, err := base64.StdEncoding.DecodeString(s) + if err != nil { + b = nil + } + return b +} + // AppendBlobCreateResponse ... type AppendBlobCreateResponse struct { rawResponse *http.Response @@ -892,6 +1098,11 @@ func (abcr AppendBlobCreateResponse) Status() string { return abcr.rawResponse.Status } +// ClientRequestID returns the value for header x-ms-client-request-id. +func (abcr AppendBlobCreateResponse) ClientRequestID() string { + return abcr.rawResponse.Header.Get("x-ms-client-request-id") +} + // ContentMD5 returns the value for header Content-MD5. func (abcr AppendBlobCreateResponse) ContentMD5() []byte { s := abcr.rawResponse.Header.Get("Content-MD5") @@ -918,6 +1129,16 @@ func (abcr AppendBlobCreateResponse) Date() time.Time { return t } +// EncryptionKeySha256 returns the value for header x-ms-encryption-key-sha256. +func (abcr AppendBlobCreateResponse) EncryptionKeySha256() string { + return abcr.rawResponse.Header.Get("x-ms-encryption-key-sha256") +} + +// EncryptionScope returns the value for header x-ms-encryption-scope. +func (abcr AppendBlobCreateResponse) EncryptionScope() string { + return abcr.rawResponse.Header.Get("x-ms-encryption-scope") +} + // ErrorCode returns the value for header x-ms-error-code. func (abcr AppendBlobCreateResponse) ErrorCode() string { return abcr.rawResponse.Header.Get("x-ms-error-code") @@ -956,6 +1177,87 @@ func (abcr AppendBlobCreateResponse) Version() string { return abcr.rawResponse.Header.Get("x-ms-version") } +// VersionID returns the value for header x-ms-version-id. +func (abcr AppendBlobCreateResponse) VersionID() string { + return abcr.rawResponse.Header.Get("x-ms-version-id") +} + +// AppendBlobSealResponse ... +type AppendBlobSealResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (absr AppendBlobSealResponse) Response() *http.Response { + return absr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (absr AppendBlobSealResponse) StatusCode() int { + return absr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (absr AppendBlobSealResponse) Status() string { + return absr.rawResponse.Status +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (absr AppendBlobSealResponse) ClientRequestID() string { + return absr.rawResponse.Header.Get("x-ms-client-request-id") +} + +// Date returns the value for header Date. +func (absr AppendBlobSealResponse) Date() time.Time { + s := absr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (absr AppendBlobSealResponse) ErrorCode() string { + return absr.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (absr AppendBlobSealResponse) ETag() ETag { + return ETag(absr.rawResponse.Header.Get("ETag")) +} + +// IsSealed returns the value for header x-ms-blob-sealed. +func (absr AppendBlobSealResponse) IsSealed() string { + return absr.rawResponse.Header.Get("x-ms-blob-sealed") +} + +// LastModified returns the value for header Last-Modified. +func (absr AppendBlobSealResponse) LastModified() time.Time { + s := absr.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// RequestID returns the value for header x-ms-request-id. +func (absr AppendBlobSealResponse) RequestID() string { + return absr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (absr AppendBlobSealResponse) Version() string { + return absr.rawResponse.Header.Get("x-ms-version") +} + // BlobAbortCopyFromURLResponse ... type BlobAbortCopyFromURLResponse struct { rawResponse *http.Response @@ -976,6 +1278,11 @@ func (bacfur BlobAbortCopyFromURLResponse) Status() string { return bacfur.rawResponse.Status } +// ClientRequestID returns the value for header x-ms-client-request-id. +func (bacfur BlobAbortCopyFromURLResponse) ClientRequestID() string { + return bacfur.rawResponse.Header.Get("x-ms-client-request-id") +} + // Date returns the value for header Date. func (bacfur BlobAbortCopyFromURLResponse) Date() time.Time { s := bacfur.rawResponse.Header.Get("Date") @@ -1024,6 +1331,11 @@ func (balr BlobAcquireLeaseResponse) Status() string { return balr.rawResponse.Status } +// ClientRequestID returns the value for header x-ms-client-request-id. +func (balr BlobAcquireLeaseResponse) ClientRequestID() string { + return balr.rawResponse.Header.Get("x-ms-client-request-id") +} + // Date returns the value for header Date. func (balr BlobAcquireLeaseResponse) Date() time.Time { s := balr.rawResponse.Header.Get("Date") @@ -1095,6 +1407,11 @@ func (bblr BlobBreakLeaseResponse) Status() string { return bblr.rawResponse.Status } +// ClientRequestID returns the value for header x-ms-client-request-id. +func (bblr BlobBreakLeaseResponse) ClientRequestID() string { + return bblr.rawResponse.Header.Get("x-ms-client-request-id") +} + // Date returns the value for header Date. func (bblr BlobBreakLeaseResponse) Date() time.Time { s := bblr.rawResponse.Header.Get("Date") @@ -1174,6 +1491,11 @@ func (bclr BlobChangeLeaseResponse) Status() string { return bclr.rawResponse.Status } +// ClientRequestID returns the value for header x-ms-client-request-id. +func (bclr BlobChangeLeaseResponse) ClientRequestID() string { + return bclr.rawResponse.Header.Get("x-ms-client-request-id") +} + // Date returns the value for header Date. func (bclr BlobChangeLeaseResponse) Date() time.Time { s := bclr.rawResponse.Header.Get("Date") @@ -1245,6 +1567,24 @@ func (bcfur BlobCopyFromURLResponse) Status() string { return bcfur.rawResponse.Status } +// ClientRequestID returns the value for header x-ms-client-request-id. +func (bcfur BlobCopyFromURLResponse) ClientRequestID() string { + return bcfur.rawResponse.Header.Get("x-ms-client-request-id") +} + +// ContentMD5 returns the value for header Content-MD5. +func (bcfur BlobCopyFromURLResponse) ContentMD5() []byte { + s := bcfur.rawResponse.Header.Get("Content-MD5") + if s == "" { + return nil + } + b, err := base64.StdEncoding.DecodeString(s) + if err != nil { + b = nil + } + return b +} + // CopyID returns the value for header x-ms-copy-id. func (bcfur BlobCopyFromURLResponse) CopyID() string { return bcfur.rawResponse.Header.Get("x-ms-copy-id") @@ -1301,6 +1641,24 @@ func (bcfur BlobCopyFromURLResponse) Version() string { return bcfur.rawResponse.Header.Get("x-ms-version") } +// VersionID returns the value for header x-ms-version-id. +func (bcfur BlobCopyFromURLResponse) VersionID() string { + return bcfur.rawResponse.Header.Get("x-ms-version-id") +} + +// XMsContentCrc64 returns the value for header x-ms-content-crc64. +func (bcfur BlobCopyFromURLResponse) XMsContentCrc64() []byte { + s := bcfur.rawResponse.Header.Get("x-ms-content-crc64") + if s == "" { + return nil + } + b, err := base64.StdEncoding.DecodeString(s) + if err != nil { + b = nil + } + return b +} + // BlobCreateSnapshotResponse ... type BlobCreateSnapshotResponse struct { rawResponse *http.Response @@ -1321,6 +1679,11 @@ func (bcsr BlobCreateSnapshotResponse) Status() string { return bcsr.rawResponse.Status } +// ClientRequestID returns the value for header x-ms-client-request-id. +func (bcsr BlobCreateSnapshotResponse) ClientRequestID() string { + return bcsr.rawResponse.Header.Get("x-ms-client-request-id") +} + // Date returns the value for header Date. func (bcsr BlobCreateSnapshotResponse) Date() time.Time { s := bcsr.rawResponse.Header.Get("Date") @@ -1344,6 +1707,11 @@ func (bcsr BlobCreateSnapshotResponse) ETag() ETag { return ETag(bcsr.rawResponse.Header.Get("ETag")) } +// IsServerEncrypted returns the value for header x-ms-request-server-encrypted. +func (bcsr BlobCreateSnapshotResponse) IsServerEncrypted() string { + return bcsr.rawResponse.Header.Get("x-ms-request-server-encrypted") +} + // LastModified returns the value for header Last-Modified. func (bcsr BlobCreateSnapshotResponse) LastModified() time.Time { s := bcsr.rawResponse.Header.Get("Last-Modified") @@ -1372,6 +1740,11 @@ func (bcsr BlobCreateSnapshotResponse) Version() string { return bcsr.rawResponse.Header.Get("x-ms-version") } +// VersionID returns the value for header x-ms-version-id. +func (bcsr BlobCreateSnapshotResponse) VersionID() string { + return bcsr.rawResponse.Header.Get("x-ms-version-id") +} + // BlobDeleteResponse ... type BlobDeleteResponse struct { rawResponse *http.Response @@ -1392,6 +1765,11 @@ func (bdr BlobDeleteResponse) Status() string { return bdr.rawResponse.Status } +// ClientRequestID returns the value for header x-ms-client-request-id. +func (bdr BlobDeleteResponse) ClientRequestID() string { + return bdr.rawResponse.Header.Get("x-ms-client-request-id") +} + // Date returns the value for header Date. func (bdr BlobDeleteResponse) Date() time.Time { s := bdr.rawResponse.Header.Get("Date") @@ -1423,8 +1801,94 @@ func (bdr BlobDeleteResponse) Version() string { // BlobFlatListSegment ... type BlobFlatListSegment struct { // XMLName is used for marshalling and is subject to removal in a future release. - XMLName xml.Name `xml:"Blobs"` - BlobItems []BlobItem `xml:"Blob"` + XMLName xml.Name `xml:"Blobs"` + BlobItems []BlobItemInternal `xml:"Blob"` +} + +// BlobGetAccessControlResponse ... +type BlobGetAccessControlResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (bgacr BlobGetAccessControlResponse) Response() *http.Response { + return bgacr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (bgacr BlobGetAccessControlResponse) StatusCode() int { + return bgacr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (bgacr BlobGetAccessControlResponse) Status() string { + return bgacr.rawResponse.Status +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (bgacr BlobGetAccessControlResponse) ClientRequestID() string { + return bgacr.rawResponse.Header.Get("x-ms-client-request-id") +} + +// Date returns the value for header Date. +func (bgacr BlobGetAccessControlResponse) Date() time.Time { + s := bgacr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ETag returns the value for header ETag. +func (bgacr BlobGetAccessControlResponse) ETag() ETag { + return ETag(bgacr.rawResponse.Header.Get("ETag")) +} + +// LastModified returns the value for header Last-Modified. +func (bgacr BlobGetAccessControlResponse) LastModified() time.Time { + s := bgacr.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// RequestID returns the value for header x-ms-request-id. +func (bgacr BlobGetAccessControlResponse) RequestID() string { + return bgacr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (bgacr BlobGetAccessControlResponse) Version() string { + return bgacr.rawResponse.Header.Get("x-ms-version") +} + +// XMsACL returns the value for header x-ms-acl. +func (bgacr BlobGetAccessControlResponse) XMsACL() string { + return bgacr.rawResponse.Header.Get("x-ms-acl") +} + +// XMsGroup returns the value for header x-ms-group. +func (bgacr BlobGetAccessControlResponse) XMsGroup() string { + return bgacr.rawResponse.Header.Get("x-ms-group") +} + +// XMsOwner returns the value for header x-ms-owner. +func (bgacr BlobGetAccessControlResponse) XMsOwner() string { + return bgacr.rawResponse.Header.Get("x-ms-owner") +} + +// XMsPermissions returns the value for header x-ms-permissions. +func (bgacr BlobGetAccessControlResponse) XMsPermissions() string { + return bgacr.rawResponse.Header.Get("x-ms-permissions") } // BlobGetAccountInfoResponse ... @@ -1452,6 +1916,11 @@ func (bgair BlobGetAccountInfoResponse) AccountKind() AccountKindType { return AccountKindType(bgair.rawResponse.Header.Get("x-ms-account-kind")) } +// ClientRequestID returns the value for header x-ms-client-request-id. +func (bgair BlobGetAccountInfoResponse) ClientRequestID() string { + return bgair.rawResponse.Header.Get("x-ms-client-request-id") +} + // Date returns the value for header Date. func (bgair BlobGetAccountInfoResponse) Date() time.Time { s := bgair.rawResponse.Header.Get("Date") @@ -1587,6 +2056,11 @@ func (bgpr BlobGetPropertiesResponse) CacheControl() string { return bgpr.rawResponse.Header.Get("Cache-Control") } +// ClientRequestID returns the value for header x-ms-client-request-id. +func (bgpr BlobGetPropertiesResponse) ClientRequestID() string { + return bgpr.rawResponse.Header.Get("x-ms-client-request-id") +} + // ContentDisposition returns the value for header Content-Disposition. func (bgpr BlobGetPropertiesResponse) ContentDisposition() string { return bgpr.rawResponse.Header.Get("Content-Disposition") @@ -1702,6 +2176,16 @@ func (bgpr BlobGetPropertiesResponse) DestinationSnapshot() string { return bgpr.rawResponse.Header.Get("x-ms-copy-destination-snapshot") } +// EncryptionKeySha256 returns the value for header x-ms-encryption-key-sha256. +func (bgpr BlobGetPropertiesResponse) EncryptionKeySha256() string { + return bgpr.rawResponse.Header.Get("x-ms-encryption-key-sha256") +} + +// EncryptionScope returns the value for header x-ms-encryption-scope. +func (bgpr BlobGetPropertiesResponse) EncryptionScope() string { + return bgpr.rawResponse.Header.Get("x-ms-encryption-scope") +} + // ErrorCode returns the value for header x-ms-error-code. func (bgpr BlobGetPropertiesResponse) ErrorCode() string { return bgpr.rawResponse.Header.Get("x-ms-error-code") @@ -1712,11 +2196,34 @@ func (bgpr BlobGetPropertiesResponse) ETag() ETag { return ETag(bgpr.rawResponse.Header.Get("ETag")) } +// ExpiresOn returns the value for header x-ms-expiry-time. +func (bgpr BlobGetPropertiesResponse) ExpiresOn() time.Time { + s := bgpr.rawResponse.Header.Get("x-ms-expiry-time") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// IsCurrentVersion returns the value for header x-ms-is-current-version. +func (bgpr BlobGetPropertiesResponse) IsCurrentVersion() string { + return bgpr.rawResponse.Header.Get("x-ms-is-current-version") +} + // IsIncrementalCopy returns the value for header x-ms-incremental-copy. func (bgpr BlobGetPropertiesResponse) IsIncrementalCopy() string { return bgpr.rawResponse.Header.Get("x-ms-incremental-copy") } +// IsSealed returns the value for header x-ms-blob-sealed. +func (bgpr BlobGetPropertiesResponse) IsSealed() string { + return bgpr.rawResponse.Header.Get("x-ms-blob-sealed") +} + // IsServerEncrypted returns the value for header x-ms-server-encrypted. func (bgpr BlobGetPropertiesResponse) IsServerEncrypted() string { return bgpr.rawResponse.Header.Get("x-ms-server-encrypted") @@ -1750,33 +2257,81 @@ func (bgpr BlobGetPropertiesResponse) LeaseStatus() LeaseStatusType { return LeaseStatusType(bgpr.rawResponse.Header.Get("x-ms-lease-status")) } +// ObjectReplicationPolicyID returns the value for header x-ms-or-policy-id. +func (bgpr BlobGetPropertiesResponse) ObjectReplicationPolicyID() string { + return bgpr.rawResponse.Header.Get("x-ms-or-policy-id") +} + +// ObjectReplicationRules returns the value for header x-ms-or. +func (bgpr BlobGetPropertiesResponse) ObjectReplicationRules() string { + return bgpr.rawResponse.Header.Get("x-ms-or") +} + +// RehydratePriority returns the value for header x-ms-rehydrate-priority. +func (bgpr BlobGetPropertiesResponse) RehydratePriority() string { + return bgpr.rawResponse.Header.Get("x-ms-rehydrate-priority") +} + // RequestID returns the value for header x-ms-request-id. func (bgpr BlobGetPropertiesResponse) RequestID() string { return bgpr.rawResponse.Header.Get("x-ms-request-id") } +// TagCount returns the value for header x-ms-tag-count. +func (bgpr BlobGetPropertiesResponse) TagCount() int64 { + s := bgpr.rawResponse.Header.Get("x-ms-tag-count") + if s == "" { + return -1 + } + i, err := strconv.ParseInt(s, 10, 64) + if err != nil { + i = 0 + } + return i +} + // Version returns the value for header x-ms-version. func (bgpr BlobGetPropertiesResponse) Version() string { return bgpr.rawResponse.Header.Get("x-ms-version") } +// VersionID returns the value for header x-ms-version-id. +func (bgpr BlobGetPropertiesResponse) VersionID() string { + return bgpr.rawResponse.Header.Get("x-ms-version-id") +} + // BlobHierarchyListSegment ... type BlobHierarchyListSegment struct { // XMLName is used for marshalling and is subject to removal in a future release. - XMLName xml.Name `xml:"Blobs"` - BlobPrefixes []BlobPrefix `xml:"BlobPrefix"` - BlobItems []BlobItem `xml:"Blob"` + XMLName xml.Name `xml:"Blobs"` + BlobPrefixes []BlobPrefix `xml:"BlobPrefix"` + BlobItems []BlobItemInternal `xml:"Blob"` } -// BlobItem - An Azure Storage blob -type BlobItem struct { +// BlobItemInternal - An Azure Storage blob +type BlobItemInternal struct { // XMLName is used for marshalling and is subject to removal in a future release. - XMLName xml.Name `xml:"Blob"` - Name string `xml:"Name"` - Deleted bool `xml:"Deleted"` - Snapshot string `xml:"Snapshot"` - Properties BlobProperties `xml:"Properties"` - Metadata Metadata `xml:"Metadata"` + XMLName xml.Name `xml:"Blob"` + Name string `xml:"Name"` + Deleted bool `xml:"Deleted"` + Snapshot string `xml:"Snapshot"` + VersionID *string `xml:"VersionId"` + IsCurrentVersion *bool `xml:"IsCurrentVersion"` + Properties BlobProperties `xml:"Properties"` + + // TODO funky generator type -> *BlobMetadata + Metadata Metadata `xml:"Metadata"` + BlobTags *BlobTags `xml:"Tags"` + ObjectReplicationMetadata map[string]string `xml:"ObjectReplicationMetadata"` +} + +// BlobMetadata ... +type BlobMetadata struct { + // XMLName is used for marshalling and is subject to removal in a future release. + XMLName xml.Name `xml:"Metadata"` + // AdditionalProperties - Unmatched properties from the message are deserialized this collection + AdditionalProperties map[string]string `xml:"AdditionalProperties"` + Encrypted *string `xml:"Encrypted,attr"` } // BlobPrefix ... @@ -1820,24 +2375,32 @@ type BlobProperties struct { DestinationSnapshot *string `xml:"DestinationSnapshot"` DeletedTime *time.Time `xml:"DeletedTime"` RemainingRetentionDays *int32 `xml:"RemainingRetentionDays"` - // AccessTier - Possible values include: 'AccessTierP4', 'AccessTierP6', 'AccessTierP10', 'AccessTierP20', 'AccessTierP30', 'AccessTierP40', 'AccessTierP50', 'AccessTierHot', 'AccessTierCool', 'AccessTierArchive', 'AccessTierNone' + // AccessTier - Possible values include: 'AccessTierP4', 'AccessTierP6', 'AccessTierP10', 'AccessTierP15', 'AccessTierP20', 'AccessTierP30', 'AccessTierP40', 'AccessTierP50', 'AccessTierP60', 'AccessTierP70', 'AccessTierP80', 'AccessTierHot', 'AccessTierCool', 'AccessTierArchive', 'AccessTierNone' AccessTier AccessTierType `xml:"AccessTier"` AccessTierInferred *bool `xml:"AccessTierInferred"` // ArchiveStatus - Possible values include: 'ArchiveStatusRehydratePendingToHot', 'ArchiveStatusRehydratePendingToCool', 'ArchiveStatusNone' - ArchiveStatus ArchiveStatusType `xml:"ArchiveStatus"` - AccessTierChangeTime *time.Time `xml:"AccessTierChangeTime"` + ArchiveStatus ArchiveStatusType `xml:"ArchiveStatus"` + CustomerProvidedKeySha256 *string `xml:"CustomerProvidedKeySha256"` + // EncryptionScope - The name of the encryption scope under which the blob is encrypted. + EncryptionScope *string `xml:"EncryptionScope"` + AccessTierChangeTime *time.Time `xml:"AccessTierChangeTime"` + TagCount *int32 `xml:"TagCount"` + ExpiresOn *time.Time `xml:"Expiry-Time"` + IsSealed *bool `xml:"IsSealed"` + // RehydratePriority - Possible values include: 'RehydratePriorityHigh', 'RehydratePriorityStandard', 'RehydratePriorityNone' + RehydratePriority RehydratePriorityType `xml:"RehydratePriority"` } // MarshalXML implements the xml.Marshaler interface for BlobProperties. -func (bp BlobProperties) MarshalXML(e *xml.Encoder, start xml.StartElement) error { - bp2 := (*blobProperties)(unsafe.Pointer(&bp)) - return e.EncodeElement(*bp2, start) +func (bpi BlobProperties) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + bpi2 := (*blobProperties)(unsafe.Pointer(&bpi)) + return e.EncodeElement(*bpi2, start) } // UnmarshalXML implements the xml.Unmarshaler interface for BlobProperties. -func (bp *BlobProperties) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { - bp2 := (*blobProperties)(unsafe.Pointer(bp)) - return d.DecodeElement(bp2, &start) +func (bpi *BlobProperties) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + bpi2 := (*blobProperties)(unsafe.Pointer(bpi)) + return d.DecodeElement(bpi2, &start) } // BlobReleaseLeaseResponse ... @@ -1860,6 +2423,11 @@ func (brlr BlobReleaseLeaseResponse) Status() string { return brlr.rawResponse.Status } +// ClientRequestID returns the value for header x-ms-client-request-id. +func (brlr BlobReleaseLeaseResponse) ClientRequestID() string { + return brlr.rawResponse.Header.Get("x-ms-client-request-id") +} + // Date returns the value for header Date. func (brlr BlobReleaseLeaseResponse) Date() time.Time { s := brlr.rawResponse.Header.Get("Date") @@ -1906,6 +2474,85 @@ func (brlr BlobReleaseLeaseResponse) Version() string { return brlr.rawResponse.Header.Get("x-ms-version") } +// BlobRenameResponse ... +type BlobRenameResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (brr BlobRenameResponse) Response() *http.Response { + return brr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (brr BlobRenameResponse) StatusCode() int { + return brr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (brr BlobRenameResponse) Status() string { + return brr.rawResponse.Status +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (brr BlobRenameResponse) ClientRequestID() string { + return brr.rawResponse.Header.Get("x-ms-client-request-id") +} + +// ContentLength returns the value for header Content-Length. +func (brr BlobRenameResponse) ContentLength() int64 { + s := brr.rawResponse.Header.Get("Content-Length") + if s == "" { + return -1 + } + i, err := strconv.ParseInt(s, 10, 64) + if err != nil { + i = 0 + } + return i +} + +// Date returns the value for header Date. +func (brr BlobRenameResponse) Date() time.Time { + s := brr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ETag returns the value for header ETag. +func (brr BlobRenameResponse) ETag() ETag { + return ETag(brr.rawResponse.Header.Get("ETag")) +} + +// LastModified returns the value for header Last-Modified. +func (brr BlobRenameResponse) LastModified() time.Time { + s := brr.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// RequestID returns the value for header x-ms-request-id. +func (brr BlobRenameResponse) RequestID() string { + return brr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (brr BlobRenameResponse) Version() string { + return brr.rawResponse.Header.Get("x-ms-version") +} + // BlobRenewLeaseResponse ... type BlobRenewLeaseResponse struct { rawResponse *http.Response @@ -1926,6 +2573,11 @@ func (brlr BlobRenewLeaseResponse) Status() string { return brlr.rawResponse.Status } +// ClientRequestID returns the value for header x-ms-client-request-id. +func (brlr BlobRenewLeaseResponse) ClientRequestID() string { + return brlr.rawResponse.Header.Get("x-ms-client-request-id") +} + // Date returns the value for header Date. func (brlr BlobRenewLeaseResponse) Date() time.Time { s := brlr.rawResponse.Header.Get("Date") @@ -1977,6 +2629,143 @@ func (brlr BlobRenewLeaseResponse) Version() string { return brlr.rawResponse.Header.Get("x-ms-version") } +// BlobSetAccessControlResponse ... +type BlobSetAccessControlResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (bsacr BlobSetAccessControlResponse) Response() *http.Response { + return bsacr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (bsacr BlobSetAccessControlResponse) StatusCode() int { + return bsacr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (bsacr BlobSetAccessControlResponse) Status() string { + return bsacr.rawResponse.Status +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (bsacr BlobSetAccessControlResponse) ClientRequestID() string { + return bsacr.rawResponse.Header.Get("x-ms-client-request-id") +} + +// Date returns the value for header Date. +func (bsacr BlobSetAccessControlResponse) Date() time.Time { + s := bsacr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ETag returns the value for header ETag. +func (bsacr BlobSetAccessControlResponse) ETag() ETag { + return ETag(bsacr.rawResponse.Header.Get("ETag")) +} + +// LastModified returns the value for header Last-Modified. +func (bsacr BlobSetAccessControlResponse) LastModified() time.Time { + s := bsacr.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// RequestID returns the value for header x-ms-request-id. +func (bsacr BlobSetAccessControlResponse) RequestID() string { + return bsacr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (bsacr BlobSetAccessControlResponse) Version() string { + return bsacr.rawResponse.Header.Get("x-ms-version") +} + +// BlobSetExpiryResponse ... +type BlobSetExpiryResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (bser BlobSetExpiryResponse) Response() *http.Response { + return bser.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (bser BlobSetExpiryResponse) StatusCode() int { + return bser.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (bser BlobSetExpiryResponse) Status() string { + return bser.rawResponse.Status +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (bser BlobSetExpiryResponse) ClientRequestID() string { + return bser.rawResponse.Header.Get("x-ms-client-request-id") +} + +// Date returns the value for header Date. +func (bser BlobSetExpiryResponse) Date() time.Time { + s := bser.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (bser BlobSetExpiryResponse) ErrorCode() string { + return bser.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (bser BlobSetExpiryResponse) ETag() ETag { + return ETag(bser.rawResponse.Header.Get("ETag")) +} + +// LastModified returns the value for header Last-Modified. +func (bser BlobSetExpiryResponse) LastModified() time.Time { + s := bser.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// RequestID returns the value for header x-ms-request-id. +func (bser BlobSetExpiryResponse) RequestID() string { + return bser.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (bser BlobSetExpiryResponse) Version() string { + return bser.rawResponse.Header.Get("x-ms-version") +} + // BlobSetHTTPHeadersResponse ... type BlobSetHTTPHeadersResponse struct { rawResponse *http.Response @@ -2010,6 +2799,11 @@ func (bshhr BlobSetHTTPHeadersResponse) BlobSequenceNumber() int64 { return i } +// ClientRequestID returns the value for header x-ms-client-request-id. +func (bshhr BlobSetHTTPHeadersResponse) ClientRequestID() string { + return bshhr.rawResponse.Header.Get("x-ms-client-request-id") +} + // Date returns the value for header Date. func (bshhr BlobSetHTTPHeadersResponse) Date() time.Time { s := bshhr.rawResponse.Header.Get("Date") @@ -2076,6 +2870,11 @@ func (bsmr BlobSetMetadataResponse) Status() string { return bsmr.rawResponse.Status } +// ClientRequestID returns the value for header x-ms-client-request-id. +func (bsmr BlobSetMetadataResponse) ClientRequestID() string { + return bsmr.rawResponse.Header.Get("x-ms-client-request-id") +} + // Date returns the value for header Date. func (bsmr BlobSetMetadataResponse) Date() time.Time { s := bsmr.rawResponse.Header.Get("Date") @@ -2089,6 +2888,16 @@ func (bsmr BlobSetMetadataResponse) Date() time.Time { return t } +// EncryptionKeySha256 returns the value for header x-ms-encryption-key-sha256. +func (bsmr BlobSetMetadataResponse) EncryptionKeySha256() string { + return bsmr.rawResponse.Header.Get("x-ms-encryption-key-sha256") +} + +// EncryptionScope returns the value for header x-ms-encryption-scope. +func (bsmr BlobSetMetadataResponse) EncryptionScope() string { + return bsmr.rawResponse.Header.Get("x-ms-encryption-scope") +} + // ErrorCode returns the value for header x-ms-error-code. func (bsmr BlobSetMetadataResponse) ErrorCode() string { return bsmr.rawResponse.Header.Get("x-ms-error-code") @@ -2127,6 +2936,64 @@ func (bsmr BlobSetMetadataResponse) Version() string { return bsmr.rawResponse.Header.Get("x-ms-version") } +// VersionID returns the value for header x-ms-version-id. +func (bsmr BlobSetMetadataResponse) VersionID() string { + return bsmr.rawResponse.Header.Get("x-ms-version-id") +} + +// BlobSetTagsResponse ... +type BlobSetTagsResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (bstr BlobSetTagsResponse) Response() *http.Response { + return bstr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (bstr BlobSetTagsResponse) StatusCode() int { + return bstr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (bstr BlobSetTagsResponse) Status() string { + return bstr.rawResponse.Status +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (bstr BlobSetTagsResponse) ClientRequestID() string { + return bstr.rawResponse.Header.Get("x-ms-client-request-id") +} + +// Date returns the value for header Date. +func (bstr BlobSetTagsResponse) Date() time.Time { + s := bstr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (bstr BlobSetTagsResponse) ErrorCode() string { + return bstr.rawResponse.Header.Get("x-ms-error-code") +} + +// RequestID returns the value for header x-ms-request-id. +func (bstr BlobSetTagsResponse) RequestID() string { + return bstr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (bstr BlobSetTagsResponse) Version() string { + return bstr.rawResponse.Header.Get("x-ms-version") +} + // BlobSetTierResponse ... type BlobSetTierResponse struct { rawResponse *http.Response @@ -2147,6 +3014,11 @@ func (bstr BlobSetTierResponse) Status() string { return bstr.rawResponse.Status } +// ClientRequestID returns the value for header x-ms-client-request-id. +func (bstr BlobSetTierResponse) ClientRequestID() string { + return bstr.rawResponse.Header.Get("x-ms-client-request-id") +} + // ErrorCode returns the value for header x-ms-error-code. func (bstr BlobSetTierResponse) ErrorCode() string { return bstr.rawResponse.Header.Get("x-ms-error-code") @@ -2182,6 +3054,11 @@ func (bscfur BlobStartCopyFromURLResponse) Status() string { return bscfur.rawResponse.Status } +// ClientRequestID returns the value for header x-ms-client-request-id. +func (bscfur BlobStartCopyFromURLResponse) ClientRequestID() string { + return bscfur.rawResponse.Header.Get("x-ms-client-request-id") +} + // CopyID returns the value for header x-ms-copy-id. func (bscfur BlobStartCopyFromURLResponse) CopyID() string { return bscfur.rawResponse.Header.Get("x-ms-copy-id") @@ -2238,6 +3115,75 @@ func (bscfur BlobStartCopyFromURLResponse) Version() string { return bscfur.rawResponse.Header.Get("x-ms-version") } +// VersionID returns the value for header x-ms-version-id. +func (bscfur BlobStartCopyFromURLResponse) VersionID() string { + return bscfur.rawResponse.Header.Get("x-ms-version-id") +} + +// BlobTag ... +type BlobTag struct { + // XMLName is used for marshalling and is subject to removal in a future release. + XMLName xml.Name `xml:"Tag"` + Key string `xml:"Key"` + Value string `xml:"Value"` +} + +// BlobTags - Blob tags +type BlobTags struct { + rawResponse *http.Response + // XMLName is used for marshalling and is subject to removal in a future release. + XMLName xml.Name `xml:"Tags"` + BlobTagSet []BlobTag `xml:"TagSet>Tag"` +} + +// Response returns the raw HTTP response object. +func (bt BlobTags) Response() *http.Response { + return bt.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (bt BlobTags) StatusCode() int { + return bt.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (bt BlobTags) Status() string { + return bt.rawResponse.Status +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (bt BlobTags) ClientRequestID() string { + return bt.rawResponse.Header.Get("x-ms-client-request-id") +} + +// Date returns the value for header Date. +func (bt BlobTags) Date() time.Time { + s := bt.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (bt BlobTags) ErrorCode() string { + return bt.rawResponse.Header.Get("x-ms-error-code") +} + +// RequestID returns the value for header x-ms-request-id. +func (bt BlobTags) RequestID() string { + return bt.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (bt BlobTags) Version() string { + return bt.rawResponse.Header.Get("x-ms-version") +} + // BlobUndeleteResponse ... type BlobUndeleteResponse struct { rawResponse *http.Response @@ -2258,6 +3204,11 @@ func (bur BlobUndeleteResponse) Status() string { return bur.rawResponse.Status } +// ClientRequestID returns the value for header x-ms-client-request-id. +func (bur BlobUndeleteResponse) ClientRequestID() string { + return bur.rawResponse.Header.Get("x-ms-client-request-id") +} + // Date returns the value for header Date. func (bur BlobUndeleteResponse) Date() time.Time { s := bur.rawResponse.Header.Get("Date") @@ -2291,7 +3242,7 @@ type Block struct { // Name - The base64 encoded block ID. Name string `xml:"Name"` // Size - The block size in bytes. - Size int32 `xml:"Size"` + Size int64 `xml:"Size"` } // BlockBlobCommitBlockListResponse ... @@ -2314,6 +3265,11 @@ func (bbcblr BlockBlobCommitBlockListResponse) Status() string { return bbcblr.rawResponse.Status } +// ClientRequestID returns the value for header x-ms-client-request-id. +func (bbcblr BlockBlobCommitBlockListResponse) ClientRequestID() string { + return bbcblr.rawResponse.Header.Get("x-ms-client-request-id") +} + // ContentMD5 returns the value for header Content-MD5. func (bbcblr BlockBlobCommitBlockListResponse) ContentMD5() []byte { s := bbcblr.rawResponse.Header.Get("Content-MD5") @@ -2340,6 +3296,16 @@ func (bbcblr BlockBlobCommitBlockListResponse) Date() time.Time { return t } +// EncryptionKeySha256 returns the value for header x-ms-encryption-key-sha256. +func (bbcblr BlockBlobCommitBlockListResponse) EncryptionKeySha256() string { + return bbcblr.rawResponse.Header.Get("x-ms-encryption-key-sha256") +} + +// EncryptionScope returns the value for header x-ms-encryption-scope. +func (bbcblr BlockBlobCommitBlockListResponse) EncryptionScope() string { + return bbcblr.rawResponse.Header.Get("x-ms-encryption-scope") +} + // ErrorCode returns the value for header x-ms-error-code. func (bbcblr BlockBlobCommitBlockListResponse) ErrorCode() string { return bbcblr.rawResponse.Header.Get("x-ms-error-code") @@ -2378,6 +3344,24 @@ func (bbcblr BlockBlobCommitBlockListResponse) Version() string { return bbcblr.rawResponse.Header.Get("x-ms-version") } +// VersionID returns the value for header x-ms-version-id. +func (bbcblr BlockBlobCommitBlockListResponse) VersionID() string { + return bbcblr.rawResponse.Header.Get("x-ms-version-id") +} + +// XMsContentCrc64 returns the value for header x-ms-content-crc64. +func (bbcblr BlockBlobCommitBlockListResponse) XMsContentCrc64() []byte { + s := bbcblr.rawResponse.Header.Get("x-ms-content-crc64") + if s == "" { + return nil + } + b, err := base64.StdEncoding.DecodeString(s) + if err != nil { + b = nil + } + return b +} + // BlockBlobStageBlockFromURLResponse ... type BlockBlobStageBlockFromURLResponse struct { rawResponse *http.Response @@ -2398,6 +3382,11 @@ func (bbsbfur BlockBlobStageBlockFromURLResponse) Status() string { return bbsbfur.rawResponse.Status } +// ClientRequestID returns the value for header x-ms-client-request-id. +func (bbsbfur BlockBlobStageBlockFromURLResponse) ClientRequestID() string { + return bbsbfur.rawResponse.Header.Get("x-ms-client-request-id") +} + // ContentMD5 returns the value for header Content-MD5. func (bbsbfur BlockBlobStageBlockFromURLResponse) ContentMD5() []byte { s := bbsbfur.rawResponse.Header.Get("Content-MD5") @@ -2424,6 +3413,16 @@ func (bbsbfur BlockBlobStageBlockFromURLResponse) Date() time.Time { return t } +// EncryptionKeySha256 returns the value for header x-ms-encryption-key-sha256. +func (bbsbfur BlockBlobStageBlockFromURLResponse) EncryptionKeySha256() string { + return bbsbfur.rawResponse.Header.Get("x-ms-encryption-key-sha256") +} + +// EncryptionScope returns the value for header x-ms-encryption-scope. +func (bbsbfur BlockBlobStageBlockFromURLResponse) EncryptionScope() string { + return bbsbfur.rawResponse.Header.Get("x-ms-encryption-scope") +} + // ErrorCode returns the value for header x-ms-error-code. func (bbsbfur BlockBlobStageBlockFromURLResponse) ErrorCode() string { return bbsbfur.rawResponse.Header.Get("x-ms-error-code") @@ -2444,6 +3443,19 @@ func (bbsbfur BlockBlobStageBlockFromURLResponse) Version() string { return bbsbfur.rawResponse.Header.Get("x-ms-version") } +// XMsContentCrc64 returns the value for header x-ms-content-crc64. +func (bbsbfur BlockBlobStageBlockFromURLResponse) XMsContentCrc64() []byte { + s := bbsbfur.rawResponse.Header.Get("x-ms-content-crc64") + if s == "" { + return nil + } + b, err := base64.StdEncoding.DecodeString(s) + if err != nil { + b = nil + } + return b +} + // BlockBlobStageBlockResponse ... type BlockBlobStageBlockResponse struct { rawResponse *http.Response @@ -2464,6 +3476,11 @@ func (bbsbr BlockBlobStageBlockResponse) Status() string { return bbsbr.rawResponse.Status } +// ClientRequestID returns the value for header x-ms-client-request-id. +func (bbsbr BlockBlobStageBlockResponse) ClientRequestID() string { + return bbsbr.rawResponse.Header.Get("x-ms-client-request-id") +} + // ContentMD5 returns the value for header Content-MD5. func (bbsbr BlockBlobStageBlockResponse) ContentMD5() []byte { s := bbsbr.rawResponse.Header.Get("Content-MD5") @@ -2490,6 +3507,16 @@ func (bbsbr BlockBlobStageBlockResponse) Date() time.Time { return t } +// EncryptionKeySha256 returns the value for header x-ms-encryption-key-sha256. +func (bbsbr BlockBlobStageBlockResponse) EncryptionKeySha256() string { + return bbsbr.rawResponse.Header.Get("x-ms-encryption-key-sha256") +} + +// EncryptionScope returns the value for header x-ms-encryption-scope. +func (bbsbr BlockBlobStageBlockResponse) EncryptionScope() string { + return bbsbr.rawResponse.Header.Get("x-ms-encryption-scope") +} + // ErrorCode returns the value for header x-ms-error-code. func (bbsbr BlockBlobStageBlockResponse) ErrorCode() string { return bbsbr.rawResponse.Header.Get("x-ms-error-code") @@ -2510,6 +3537,19 @@ func (bbsbr BlockBlobStageBlockResponse) Version() string { return bbsbr.rawResponse.Header.Get("x-ms-version") } +// XMsContentCrc64 returns the value for header x-ms-content-crc64. +func (bbsbr BlockBlobStageBlockResponse) XMsContentCrc64() []byte { + s := bbsbr.rawResponse.Header.Get("x-ms-content-crc64") + if s == "" { + return nil + } + b, err := base64.StdEncoding.DecodeString(s) + if err != nil { + b = nil + } + return b +} + // BlockBlobUploadResponse ... type BlockBlobUploadResponse struct { rawResponse *http.Response @@ -2530,6 +3570,11 @@ func (bbur BlockBlobUploadResponse) Status() string { return bbur.rawResponse.Status } +// ClientRequestID returns the value for header x-ms-client-request-id. +func (bbur BlockBlobUploadResponse) ClientRequestID() string { + return bbur.rawResponse.Header.Get("x-ms-client-request-id") +} + // ContentMD5 returns the value for header Content-MD5. func (bbur BlockBlobUploadResponse) ContentMD5() []byte { s := bbur.rawResponse.Header.Get("Content-MD5") @@ -2556,6 +3601,16 @@ func (bbur BlockBlobUploadResponse) Date() time.Time { return t } +// EncryptionKeySha256 returns the value for header x-ms-encryption-key-sha256. +func (bbur BlockBlobUploadResponse) EncryptionKeySha256() string { + return bbur.rawResponse.Header.Get("x-ms-encryption-key-sha256") +} + +// EncryptionScope returns the value for header x-ms-encryption-scope. +func (bbur BlockBlobUploadResponse) EncryptionScope() string { + return bbur.rawResponse.Header.Get("x-ms-encryption-scope") +} + // ErrorCode returns the value for header x-ms-error-code. func (bbur BlockBlobUploadResponse) ErrorCode() string { return bbur.rawResponse.Header.Get("x-ms-error-code") @@ -2594,6 +3649,11 @@ func (bbur BlockBlobUploadResponse) Version() string { return bbur.rawResponse.Header.Get("x-ms-version") } +// VersionID returns the value for header x-ms-version-id. +func (bbur BlockBlobUploadResponse) VersionID() string { + return bbur.rawResponse.Header.Get("x-ms-version-id") +} + // BlockList ... type BlockList struct { rawResponse *http.Response @@ -2629,6 +3689,11 @@ func (bl BlockList) BlobContentLength() int64 { return i } +// ClientRequestID returns the value for header x-ms-client-request-id. +func (bl BlockList) ClientRequestID() string { + return bl.rawResponse.Header.Get("x-ms-client-request-id") +} + // ContentType returns the value for header Content-Type. func (bl BlockList) ContentType() string { return bl.rawResponse.Header.Get("Content-Type") @@ -2715,6 +3780,11 @@ func (calr ContainerAcquireLeaseResponse) Status() string { return calr.rawResponse.Status } +// ClientRequestID returns the value for header x-ms-client-request-id. +func (calr ContainerAcquireLeaseResponse) ClientRequestID() string { + return calr.rawResponse.Header.Get("x-ms-client-request-id") +} + // Date returns the value for header Date. func (calr ContainerAcquireLeaseResponse) Date() time.Time { s := calr.rawResponse.Header.Get("Date") @@ -2786,6 +3856,11 @@ func (cblr ContainerBreakLeaseResponse) Status() string { return cblr.rawResponse.Status } +// ClientRequestID returns the value for header x-ms-client-request-id. +func (cblr ContainerBreakLeaseResponse) ClientRequestID() string { + return cblr.rawResponse.Header.Get("x-ms-client-request-id") +} + // Date returns the value for header Date. func (cblr ContainerBreakLeaseResponse) Date() time.Time { s := cblr.rawResponse.Header.Get("Date") @@ -2865,6 +3940,11 @@ func (cclr ContainerChangeLeaseResponse) Status() string { return cclr.rawResponse.Status } +// ClientRequestID returns the value for header x-ms-client-request-id. +func (cclr ContainerChangeLeaseResponse) ClientRequestID() string { + return cclr.rawResponse.Header.Get("x-ms-client-request-id") +} + // Date returns the value for header Date. func (cclr ContainerChangeLeaseResponse) Date() time.Time { s := cclr.rawResponse.Header.Get("Date") @@ -2936,6 +4016,11 @@ func (ccr ContainerCreateResponse) Status() string { return ccr.rawResponse.Status } +// ClientRequestID returns the value for header x-ms-client-request-id. +func (ccr ContainerCreateResponse) ClientRequestID() string { + return ccr.rawResponse.Header.Get("x-ms-client-request-id") +} + // Date returns the value for header Date. func (ccr ContainerCreateResponse) Date() time.Time { s := ccr.rawResponse.Header.Get("Date") @@ -3002,6 +4087,11 @@ func (cdr ContainerDeleteResponse) Status() string { return cdr.rawResponse.Status } +// ClientRequestID returns the value for header x-ms-client-request-id. +func (cdr ContainerDeleteResponse) ClientRequestID() string { + return cdr.rawResponse.Header.Get("x-ms-client-request-id") +} + // Date returns the value for header Date. func (cdr ContainerDeleteResponse) Date() time.Time { s := cdr.rawResponse.Header.Get("Date") @@ -3055,6 +4145,11 @@ func (cgair ContainerGetAccountInfoResponse) AccountKind() AccountKindType { return AccountKindType(cgair.rawResponse.Header.Get("x-ms-account-kind")) } +// ClientRequestID returns the value for header x-ms-client-request-id. +func (cgair ContainerGetAccountInfoResponse) ClientRequestID() string { + return cgair.rawResponse.Header.Get("x-ms-client-request-id") +} + // Date returns the value for header Date. func (cgair ContainerGetAccountInfoResponse) Date() time.Time { s := cgair.rawResponse.Header.Get("Date") @@ -3126,6 +4221,11 @@ func (cgpr ContainerGetPropertiesResponse) BlobPublicAccess() PublicAccessType { return PublicAccessType(cgpr.rawResponse.Header.Get("x-ms-blob-public-access")) } +// ClientRequestID returns the value for header x-ms-client-request-id. +func (cgpr ContainerGetPropertiesResponse) ClientRequestID() string { + return cgpr.rawResponse.Header.Get("x-ms-client-request-id") +} + // Date returns the value for header Date. func (cgpr ContainerGetPropertiesResponse) Date() time.Time { s := cgpr.rawResponse.Header.Get("Date") @@ -3139,6 +4239,16 @@ func (cgpr ContainerGetPropertiesResponse) Date() time.Time { return t } +// DefaultEncryptionScope returns the value for header x-ms-default-encryption-scope. +func (cgpr ContainerGetPropertiesResponse) DefaultEncryptionScope() string { + return cgpr.rawResponse.Header.Get("x-ms-default-encryption-scope") +} + +// DenyEncryptionScopeOverride returns the value for header x-ms-deny-encryption-scope-override. +func (cgpr ContainerGetPropertiesResponse) DenyEncryptionScopeOverride() string { + return cgpr.rawResponse.Header.Get("x-ms-deny-encryption-scope-override") +} + // ErrorCode returns the value for header x-ms-error-code. func (cgpr ContainerGetPropertiesResponse) ErrorCode() string { return cgpr.rawResponse.Header.Get("x-ms-error-code") @@ -3202,6 +4312,8 @@ type ContainerItem struct { // XMLName is used for marshalling and is subject to removal in a future release. XMLName xml.Name `xml:"Container"` Name string `xml:"Name"` + Deleted *bool `xml:"Deleted"` + Version *string `xml:"Version"` Properties ContainerProperties `xml:"Properties"` Metadata Metadata `xml:"Metadata"` } @@ -3217,9 +4329,13 @@ type ContainerProperties struct { // LeaseDuration - Possible values include: 'LeaseDurationInfinite', 'LeaseDurationFixed', 'LeaseDurationNone' LeaseDuration LeaseDurationType `xml:"LeaseDuration"` // PublicAccess - Possible values include: 'PublicAccessContainer', 'PublicAccessBlob', 'PublicAccessNone' - PublicAccess PublicAccessType `xml:"PublicAccess"` - HasImmutabilityPolicy *bool `xml:"HasImmutabilityPolicy"` - HasLegalHold *bool `xml:"HasLegalHold"` + PublicAccess PublicAccessType `xml:"PublicAccess"` + HasImmutabilityPolicy *bool `xml:"HasImmutabilityPolicy"` + HasLegalHold *bool `xml:"HasLegalHold"` + DefaultEncryptionScope *string `xml:"DefaultEncryptionScope"` + PreventEncryptionScopeOverride *bool `xml:"DenyEncryptionScopeOverride"` + DeletedTime *time.Time `xml:"DeletedTime"` + RemainingRetentionDays *int32 `xml:"RemainingRetentionDays"` } // MarshalXML implements the xml.Marshaler interface for ContainerProperties. @@ -3254,6 +4370,11 @@ func (crlr ContainerReleaseLeaseResponse) Status() string { return crlr.rawResponse.Status } +// ClientRequestID returns the value for header x-ms-client-request-id. +func (crlr ContainerReleaseLeaseResponse) ClientRequestID() string { + return crlr.rawResponse.Header.Get("x-ms-client-request-id") +} + // Date returns the value for header Date. func (crlr ContainerReleaseLeaseResponse) Date() time.Time { s := crlr.rawResponse.Header.Get("Date") @@ -3320,6 +4441,11 @@ func (crlr ContainerRenewLeaseResponse) Status() string { return crlr.rawResponse.Status } +// ClientRequestID returns the value for header x-ms-client-request-id. +func (crlr ContainerRenewLeaseResponse) ClientRequestID() string { + return crlr.rawResponse.Header.Get("x-ms-client-request-id") +} + // Date returns the value for header Date. func (crlr ContainerRenewLeaseResponse) Date() time.Time { s := crlr.rawResponse.Header.Get("Date") @@ -3371,6 +4497,59 @@ func (crlr ContainerRenewLeaseResponse) Version() string { return crlr.rawResponse.Header.Get("x-ms-version") } +// ContainerRestoreResponse ... +type ContainerRestoreResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (crr ContainerRestoreResponse) Response() *http.Response { + return crr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (crr ContainerRestoreResponse) StatusCode() int { + return crr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (crr ContainerRestoreResponse) Status() string { + return crr.rawResponse.Status +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (crr ContainerRestoreResponse) ClientRequestID() string { + return crr.rawResponse.Header.Get("x-ms-client-request-id") +} + +// Date returns the value for header Date. +func (crr ContainerRestoreResponse) Date() time.Time { + s := crr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (crr ContainerRestoreResponse) ErrorCode() string { + return crr.rawResponse.Header.Get("x-ms-error-code") +} + +// RequestID returns the value for header x-ms-request-id. +func (crr ContainerRestoreResponse) RequestID() string { + return crr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (crr ContainerRestoreResponse) Version() string { + return crr.rawResponse.Header.Get("x-ms-version") +} + // ContainerSetAccessPolicyResponse ... type ContainerSetAccessPolicyResponse struct { rawResponse *http.Response @@ -3391,6 +4570,11 @@ func (csapr ContainerSetAccessPolicyResponse) Status() string { return csapr.rawResponse.Status } +// ClientRequestID returns the value for header x-ms-client-request-id. +func (csapr ContainerSetAccessPolicyResponse) ClientRequestID() string { + return csapr.rawResponse.Header.Get("x-ms-client-request-id") +} + // Date returns the value for header Date. func (csapr ContainerSetAccessPolicyResponse) Date() time.Time { s := csapr.rawResponse.Header.Get("Date") @@ -3457,6 +4641,11 @@ func (csmr ContainerSetMetadataResponse) Status() string { return csmr.rawResponse.Status } +// ClientRequestID returns the value for header x-ms-client-request-id. +func (csmr ContainerSetMetadataResponse) ClientRequestID() string { + return csmr.rawResponse.Header.Get("x-ms-client-request-id") +} + // Date returns the value for header Date. func (csmr ContainerSetMetadataResponse) Date() time.Time { s := csmr.rawResponse.Header.Get("Date") @@ -3520,6 +4709,404 @@ type CorsRule struct { MaxAgeInSeconds int32 `xml:"MaxAgeInSeconds"` } +// DataLakeStorageError ... +type DataLakeStorageError struct { + // DataLakeStorageErrorDetails - The service error response object. + DataLakeStorageErrorDetails *DataLakeStorageErrorError `xml:"error"` +} + +// DataLakeStorageErrorError - The service error response object. +type DataLakeStorageErrorError struct { + // XMLName is used for marshalling and is subject to removal in a future release. + XMLName xml.Name `xml:"DataLakeStorageError_error"` + // Code - The service error code. + Code *string `xml:"Code"` + // Message - The service error message. + Message *string `xml:"Message"` +} + +// DelimitedTextConfiguration - delimited text configuration +type DelimitedTextConfiguration struct { + // ColumnSeparator - column separator + ColumnSeparator string `xml:"ColumnSeparator"` + // FieldQuote - field quote + FieldQuote string `xml:"FieldQuote"` + // RecordSeparator - record separator + RecordSeparator string `xml:"RecordSeparator"` + // EscapeChar - escape char + EscapeChar string `xml:"EscapeChar"` + // HeadersPresent - has headers + HeadersPresent bool `xml:"HasHeaders"` +} + +// DirectoryCreateResponse ... +type DirectoryCreateResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (dcr DirectoryCreateResponse) Response() *http.Response { + return dcr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (dcr DirectoryCreateResponse) StatusCode() int { + return dcr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (dcr DirectoryCreateResponse) Status() string { + return dcr.rawResponse.Status +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (dcr DirectoryCreateResponse) ClientRequestID() string { + return dcr.rawResponse.Header.Get("x-ms-client-request-id") +} + +// ContentLength returns the value for header Content-Length. +func (dcr DirectoryCreateResponse) ContentLength() int64 { + s := dcr.rawResponse.Header.Get("Content-Length") + if s == "" { + return -1 + } + i, err := strconv.ParseInt(s, 10, 64) + if err != nil { + i = 0 + } + return i +} + +// Date returns the value for header Date. +func (dcr DirectoryCreateResponse) Date() time.Time { + s := dcr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ETag returns the value for header ETag. +func (dcr DirectoryCreateResponse) ETag() ETag { + return ETag(dcr.rawResponse.Header.Get("ETag")) +} + +// LastModified returns the value for header Last-Modified. +func (dcr DirectoryCreateResponse) LastModified() time.Time { + s := dcr.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// RequestID returns the value for header x-ms-request-id. +func (dcr DirectoryCreateResponse) RequestID() string { + return dcr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (dcr DirectoryCreateResponse) Version() string { + return dcr.rawResponse.Header.Get("x-ms-version") +} + +// DirectoryDeleteResponse ... +type DirectoryDeleteResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (ddr DirectoryDeleteResponse) Response() *http.Response { + return ddr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (ddr DirectoryDeleteResponse) StatusCode() int { + return ddr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (ddr DirectoryDeleteResponse) Status() string { + return ddr.rawResponse.Status +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (ddr DirectoryDeleteResponse) ClientRequestID() string { + return ddr.rawResponse.Header.Get("x-ms-client-request-id") +} + +// Date returns the value for header Date. +func (ddr DirectoryDeleteResponse) Date() time.Time { + s := ddr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// Marker returns the value for header x-ms-continuation. +func (ddr DirectoryDeleteResponse) Marker() string { + return ddr.rawResponse.Header.Get("x-ms-continuation") +} + +// RequestID returns the value for header x-ms-request-id. +func (ddr DirectoryDeleteResponse) RequestID() string { + return ddr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (ddr DirectoryDeleteResponse) Version() string { + return ddr.rawResponse.Header.Get("x-ms-version") +} + +// DirectoryGetAccessControlResponse ... +type DirectoryGetAccessControlResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (dgacr DirectoryGetAccessControlResponse) Response() *http.Response { + return dgacr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (dgacr DirectoryGetAccessControlResponse) StatusCode() int { + return dgacr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (dgacr DirectoryGetAccessControlResponse) Status() string { + return dgacr.rawResponse.Status +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (dgacr DirectoryGetAccessControlResponse) ClientRequestID() string { + return dgacr.rawResponse.Header.Get("x-ms-client-request-id") +} + +// Date returns the value for header Date. +func (dgacr DirectoryGetAccessControlResponse) Date() time.Time { + s := dgacr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ETag returns the value for header ETag. +func (dgacr DirectoryGetAccessControlResponse) ETag() ETag { + return ETag(dgacr.rawResponse.Header.Get("ETag")) +} + +// LastModified returns the value for header Last-Modified. +func (dgacr DirectoryGetAccessControlResponse) LastModified() time.Time { + s := dgacr.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// RequestID returns the value for header x-ms-request-id. +func (dgacr DirectoryGetAccessControlResponse) RequestID() string { + return dgacr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (dgacr DirectoryGetAccessControlResponse) Version() string { + return dgacr.rawResponse.Header.Get("x-ms-version") +} + +// XMsACL returns the value for header x-ms-acl. +func (dgacr DirectoryGetAccessControlResponse) XMsACL() string { + return dgacr.rawResponse.Header.Get("x-ms-acl") +} + +// XMsGroup returns the value for header x-ms-group. +func (dgacr DirectoryGetAccessControlResponse) XMsGroup() string { + return dgacr.rawResponse.Header.Get("x-ms-group") +} + +// XMsOwner returns the value for header x-ms-owner. +func (dgacr DirectoryGetAccessControlResponse) XMsOwner() string { + return dgacr.rawResponse.Header.Get("x-ms-owner") +} + +// XMsPermissions returns the value for header x-ms-permissions. +func (dgacr DirectoryGetAccessControlResponse) XMsPermissions() string { + return dgacr.rawResponse.Header.Get("x-ms-permissions") +} + +// DirectoryRenameResponse ... +type DirectoryRenameResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (drr DirectoryRenameResponse) Response() *http.Response { + return drr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (drr DirectoryRenameResponse) StatusCode() int { + return drr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (drr DirectoryRenameResponse) Status() string { + return drr.rawResponse.Status +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (drr DirectoryRenameResponse) ClientRequestID() string { + return drr.rawResponse.Header.Get("x-ms-client-request-id") +} + +// ContentLength returns the value for header Content-Length. +func (drr DirectoryRenameResponse) ContentLength() int64 { + s := drr.rawResponse.Header.Get("Content-Length") + if s == "" { + return -1 + } + i, err := strconv.ParseInt(s, 10, 64) + if err != nil { + i = 0 + } + return i +} + +// Date returns the value for header Date. +func (drr DirectoryRenameResponse) Date() time.Time { + s := drr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ETag returns the value for header ETag. +func (drr DirectoryRenameResponse) ETag() ETag { + return ETag(drr.rawResponse.Header.Get("ETag")) +} + +// LastModified returns the value for header Last-Modified. +func (drr DirectoryRenameResponse) LastModified() time.Time { + s := drr.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// Marker returns the value for header x-ms-continuation. +func (drr DirectoryRenameResponse) Marker() string { + return drr.rawResponse.Header.Get("x-ms-continuation") +} + +// RequestID returns the value for header x-ms-request-id. +func (drr DirectoryRenameResponse) RequestID() string { + return drr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (drr DirectoryRenameResponse) Version() string { + return drr.rawResponse.Header.Get("x-ms-version") +} + +// DirectorySetAccessControlResponse ... +type DirectorySetAccessControlResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (dsacr DirectorySetAccessControlResponse) Response() *http.Response { + return dsacr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (dsacr DirectorySetAccessControlResponse) StatusCode() int { + return dsacr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (dsacr DirectorySetAccessControlResponse) Status() string { + return dsacr.rawResponse.Status +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (dsacr DirectorySetAccessControlResponse) ClientRequestID() string { + return dsacr.rawResponse.Header.Get("x-ms-client-request-id") +} + +// Date returns the value for header Date. +func (dsacr DirectorySetAccessControlResponse) Date() time.Time { + s := dsacr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ETag returns the value for header ETag. +func (dsacr DirectorySetAccessControlResponse) ETag() ETag { + return ETag(dsacr.rawResponse.Header.Get("ETag")) +} + +// LastModified returns the value for header Last-Modified. +func (dsacr DirectorySetAccessControlResponse) LastModified() time.Time { + s := dsacr.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// RequestID returns the value for header x-ms-request-id. +func (dsacr DirectorySetAccessControlResponse) RequestID() string { + return dsacr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (dsacr DirectorySetAccessControlResponse) Version() string { + return dsacr.rawResponse.Header.Get("x-ms-version") +} + // downloadResponse - Wraps the response from the blobClient.Download method. type downloadResponse struct { rawResponse *http.Response @@ -3612,6 +5199,24 @@ func (dr downloadResponse) CacheControl() string { return dr.rawResponse.Header.Get("Cache-Control") } +// ClientRequestID returns the value for header x-ms-client-request-id. +func (dr downloadResponse) ClientRequestID() string { + return dr.rawResponse.Header.Get("x-ms-client-request-id") +} + +// ContentCrc64 returns the value for header x-ms-content-crc64. +func (dr downloadResponse) ContentCrc64() []byte { + s := dr.rawResponse.Header.Get("x-ms-content-crc64") + if s == "" { + return nil + } + b, err := base64.StdEncoding.DecodeString(s) + if err != nil { + b = nil + } + return b +} + // ContentDisposition returns the value for header Content-Disposition. func (dr downloadResponse) ContentDisposition() string { return dr.rawResponse.Header.Get("Content-Disposition") @@ -3714,6 +5319,16 @@ func (dr downloadResponse) Date() time.Time { return t } +// EncryptionKeySha256 returns the value for header x-ms-encryption-key-sha256. +func (dr downloadResponse) EncryptionKeySha256() string { + return dr.rawResponse.Header.Get("x-ms-encryption-key-sha256") +} + +// EncryptionScope returns the value for header x-ms-encryption-scope. +func (dr downloadResponse) EncryptionScope() string { + return dr.rawResponse.Header.Get("x-ms-encryption-scope") +} + // ErrorCode returns the value for header x-ms-error-code. func (dr downloadResponse) ErrorCode() string { return dr.rawResponse.Header.Get("x-ms-error-code") @@ -3724,6 +5339,11 @@ func (dr downloadResponse) ETag() ETag { return ETag(dr.rawResponse.Header.Get("ETag")) } +// IsSealed returns the value for header x-ms-blob-sealed. +func (dr downloadResponse) IsSealed() string { + return dr.rawResponse.Header.Get("x-ms-blob-sealed") +} + // IsServerEncrypted returns the value for header x-ms-server-encrypted. func (dr downloadResponse) IsServerEncrypted() string { return dr.rawResponse.Header.Get("x-ms-server-encrypted") @@ -3757,16 +5377,112 @@ func (dr downloadResponse) LeaseStatus() LeaseStatusType { return LeaseStatusType(dr.rawResponse.Header.Get("x-ms-lease-status")) } +// ObjectReplicationPolicyID returns the value for header x-ms-or-policy-id. +func (dr downloadResponse) ObjectReplicationPolicyID() string { + return dr.rawResponse.Header.Get("x-ms-or-policy-id") +} + +// ObjectReplicationRules returns the value for header x-ms-or. +func (dr downloadResponse) ObjectReplicationRules() string { + return dr.rawResponse.Header.Get("x-ms-or") +} + // RequestID returns the value for header x-ms-request-id. func (dr downloadResponse) RequestID() string { return dr.rawResponse.Header.Get("x-ms-request-id") } +// TagCount returns the value for header x-ms-tag-count. +func (dr downloadResponse) TagCount() int64 { + s := dr.rawResponse.Header.Get("x-ms-tag-count") + if s == "" { + return -1 + } + i, err := strconv.ParseInt(s, 10, 64) + if err != nil { + i = 0 + } + return i +} + // Version returns the value for header x-ms-version. func (dr downloadResponse) Version() string { return dr.rawResponse.Header.Get("x-ms-version") } +// VersionID returns the value for header x-ms-version-id. +func (dr downloadResponse) VersionID() string { + return dr.rawResponse.Header.Get("x-ms-version-id") +} + +// FilterBlobItem - Blob info from a Filter Blobs API call +type FilterBlobItem struct { + // XMLName is used for marshalling and is subject to removal in a future release. + XMLName xml.Name `xml:"Blob"` + Name string `xml:"Name"` + ContainerName string `xml:"ContainerName"` + TagValue string `xml:"TagValue"` +} + +// FilterBlobSegment - The result of a Filter Blobs API call +type FilterBlobSegment struct { + rawResponse *http.Response + // XMLName is used for marshalling and is subject to removal in a future release. + XMLName xml.Name `xml:"EnumerationResults"` + ServiceEndpoint string `xml:"ServiceEndpoint,attr"` + Where string `xml:"Where"` + Blobs []FilterBlobItem `xml:"Blobs>Blob"` + NextMarker *string `xml:"NextMarker"` +} + +// Response returns the raw HTTP response object. +func (fbs FilterBlobSegment) Response() *http.Response { + return fbs.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (fbs FilterBlobSegment) StatusCode() int { + return fbs.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (fbs FilterBlobSegment) Status() string { + return fbs.rawResponse.Status +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (fbs FilterBlobSegment) ClientRequestID() string { + return fbs.rawResponse.Header.Get("x-ms-client-request-id") +} + +// Date returns the value for header Date. +func (fbs FilterBlobSegment) Date() time.Time { + s := fbs.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (fbs FilterBlobSegment) ErrorCode() string { + return fbs.rawResponse.Header.Get("x-ms-error-code") +} + +// RequestID returns the value for header x-ms-request-id. +func (fbs FilterBlobSegment) RequestID() string { + return fbs.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (fbs FilterBlobSegment) Version() string { + return fbs.rawResponse.Header.Get("x-ms-version") +} + // GeoReplication - Geo-Replication information for the Secondary Storage Service type GeoReplication struct { // Status - The status of the secondary location. Possible values include: 'GeoReplicationStatusLive', 'GeoReplicationStatusBootstrap', 'GeoReplicationStatusUnavailable', 'GeoReplicationStatusNone' @@ -3787,6 +5503,14 @@ func (gr *GeoReplication) UnmarshalXML(d *xml.Decoder, start xml.StartElement) e return d.DecodeElement(gr2, &start) } +// JSONTextConfiguration - json text configuration +type JSONTextConfiguration struct { + // XMLName is used for marshalling and is subject to removal in a future release. + XMLName xml.Name `xml:"JsonTextConfiguration"` + // RecordSeparator - record separator + RecordSeparator string `xml:"RecordSeparator"` +} + // KeyInfo - Key information type KeyInfo struct { // Start - The date-time the key is active in ISO 8601 UTC time @@ -3795,14 +5519,6 @@ type KeyInfo struct { Expiry string `xml:"Expiry"` } -//NewKeyInfo creates a new KeyInfo struct with the correct time formatting & conversion -func NewKeyInfo(Start, Expiry time.Time) KeyInfo { - return KeyInfo{ - Start: Start.UTC().Format(SASTimeFormat), - Expiry: Expiry.UTC().Format(SASTimeFormat), - } -} - // ListBlobsFlatSegmentResponse - An enumeration of blobs type ListBlobsFlatSegmentResponse struct { rawResponse *http.Response @@ -3813,7 +5529,6 @@ type ListBlobsFlatSegmentResponse struct { Prefix *string `xml:"Prefix"` Marker *string `xml:"Marker"` MaxResults *int32 `xml:"MaxResults"` - Delimiter *string `xml:"Delimiter"` Segment BlobFlatListSegment `xml:"Blobs"` NextMarker Marker `xml:"NextMarker"` } @@ -3833,6 +5548,11 @@ func (lbfsr ListBlobsFlatSegmentResponse) Status() string { return lbfsr.rawResponse.Status } +// ClientRequestID returns the value for header x-ms-client-request-id. +func (lbfsr ListBlobsFlatSegmentResponse) ClientRequestID() string { + return lbfsr.rawResponse.Header.Get("x-ms-client-request-id") +} + // ContentType returns the value for header Content-Type. func (lbfsr ListBlobsFlatSegmentResponse) ContentType() string { return lbfsr.rawResponse.Header.Get("Content-Type") @@ -3896,6 +5616,11 @@ func (lbhsr ListBlobsHierarchySegmentResponse) Status() string { return lbhsr.rawResponse.Status } +// ClientRequestID returns the value for header x-ms-client-request-id. +func (lbhsr ListBlobsHierarchySegmentResponse) ClientRequestID() string { + return lbhsr.rawResponse.Header.Get("x-ms-client-request-id") +} + // ContentType returns the value for header Content-Type. func (lbhsr ListBlobsHierarchySegmentResponse) ContentType() string { return lbhsr.rawResponse.Header.Get("Content-Type") @@ -3957,6 +5682,11 @@ func (lcsr ListContainersSegmentResponse) Status() string { return lcsr.rawResponse.Status } +// ClientRequestID returns the value for header x-ms-client-request-id. +func (lcsr ListContainersSegmentResponse) ClientRequestID() string { + return lcsr.rawResponse.Header.Get("x-ms-client-request-id") +} + // ErrorCode returns the value for header x-ms-error-code. func (lcsr ListContainersSegmentResponse) ErrorCode() string { return lcsr.rawResponse.Header.Get("x-ms-error-code") @@ -4029,6 +5759,11 @@ func (pbcpr PageBlobClearPagesResponse) BlobSequenceNumber() int64 { return i } +// ClientRequestID returns the value for header x-ms-client-request-id. +func (pbcpr PageBlobClearPagesResponse) ClientRequestID() string { + return pbcpr.rawResponse.Header.Get("x-ms-client-request-id") +} + // ContentMD5 returns the value for header Content-MD5. func (pbcpr PageBlobClearPagesResponse) ContentMD5() []byte { s := pbcpr.rawResponse.Header.Get("Content-MD5") @@ -4088,6 +5823,19 @@ func (pbcpr PageBlobClearPagesResponse) Version() string { return pbcpr.rawResponse.Header.Get("x-ms-version") } +// XMsContentCrc64 returns the value for header x-ms-content-crc64. +func (pbcpr PageBlobClearPagesResponse) XMsContentCrc64() []byte { + s := pbcpr.rawResponse.Header.Get("x-ms-content-crc64") + if s == "" { + return nil + } + b, err := base64.StdEncoding.DecodeString(s) + if err != nil { + b = nil + } + return b +} + // PageBlobCopyIncrementalResponse ... type PageBlobCopyIncrementalResponse struct { rawResponse *http.Response @@ -4108,6 +5856,11 @@ func (pbcir PageBlobCopyIncrementalResponse) Status() string { return pbcir.rawResponse.Status } +// ClientRequestID returns the value for header x-ms-client-request-id. +func (pbcir PageBlobCopyIncrementalResponse) ClientRequestID() string { + return pbcir.rawResponse.Header.Get("x-ms-client-request-id") +} + // CopyID returns the value for header x-ms-copy-id. func (pbcir PageBlobCopyIncrementalResponse) CopyID() string { return pbcir.rawResponse.Header.Get("x-ms-copy-id") @@ -4184,6 +5937,11 @@ func (pbcr PageBlobCreateResponse) Status() string { return pbcr.rawResponse.Status } +// ClientRequestID returns the value for header x-ms-client-request-id. +func (pbcr PageBlobCreateResponse) ClientRequestID() string { + return pbcr.rawResponse.Header.Get("x-ms-client-request-id") +} + // ContentMD5 returns the value for header Content-MD5. func (pbcr PageBlobCreateResponse) ContentMD5() []byte { s := pbcr.rawResponse.Header.Get("Content-MD5") @@ -4210,6 +5968,16 @@ func (pbcr PageBlobCreateResponse) Date() time.Time { return t } +// EncryptionKeySha256 returns the value for header x-ms-encryption-key-sha256. +func (pbcr PageBlobCreateResponse) EncryptionKeySha256() string { + return pbcr.rawResponse.Header.Get("x-ms-encryption-key-sha256") +} + +// EncryptionScope returns the value for header x-ms-encryption-scope. +func (pbcr PageBlobCreateResponse) EncryptionScope() string { + return pbcr.rawResponse.Header.Get("x-ms-encryption-scope") +} + // ErrorCode returns the value for header x-ms-error-code. func (pbcr PageBlobCreateResponse) ErrorCode() string { return pbcr.rawResponse.Header.Get("x-ms-error-code") @@ -4248,6 +6016,11 @@ func (pbcr PageBlobCreateResponse) Version() string { return pbcr.rawResponse.Header.Get("x-ms-version") } +// VersionID returns the value for header x-ms-version-id. +func (pbcr PageBlobCreateResponse) VersionID() string { + return pbcr.rawResponse.Header.Get("x-ms-version-id") +} + // PageBlobResizeResponse ... type PageBlobResizeResponse struct { rawResponse *http.Response @@ -4281,6 +6054,11 @@ func (pbrr PageBlobResizeResponse) BlobSequenceNumber() int64 { return i } +// ClientRequestID returns the value for header x-ms-client-request-id. +func (pbrr PageBlobResizeResponse) ClientRequestID() string { + return pbrr.rawResponse.Header.Get("x-ms-client-request-id") +} + // Date returns the value for header Date. func (pbrr PageBlobResizeResponse) Date() time.Time { s := pbrr.rawResponse.Header.Get("Date") @@ -4360,6 +6138,11 @@ func (pbusnr PageBlobUpdateSequenceNumberResponse) BlobSequenceNumber() int64 { return i } +// ClientRequestID returns the value for header x-ms-client-request-id. +func (pbusnr PageBlobUpdateSequenceNumberResponse) ClientRequestID() string { + return pbusnr.rawResponse.Header.Get("x-ms-client-request-id") +} + // Date returns the value for header Date. func (pbusnr PageBlobUpdateSequenceNumberResponse) Date() time.Time { s := pbusnr.rawResponse.Header.Get("Date") @@ -4465,6 +6248,16 @@ func (pbupfur PageBlobUploadPagesFromURLResponse) Date() time.Time { return t } +// EncryptionKeySha256 returns the value for header x-ms-encryption-key-sha256. +func (pbupfur PageBlobUploadPagesFromURLResponse) EncryptionKeySha256() string { + return pbupfur.rawResponse.Header.Get("x-ms-encryption-key-sha256") +} + +// EncryptionScope returns the value for header x-ms-encryption-scope. +func (pbupfur PageBlobUploadPagesFromURLResponse) EncryptionScope() string { + return pbupfur.rawResponse.Header.Get("x-ms-encryption-scope") +} + // ErrorCode returns the value for header x-ms-error-code. func (pbupfur PageBlobUploadPagesFromURLResponse) ErrorCode() string { return pbupfur.rawResponse.Header.Get("x-ms-error-code") @@ -4503,6 +6296,19 @@ func (pbupfur PageBlobUploadPagesFromURLResponse) Version() string { return pbupfur.rawResponse.Header.Get("x-ms-version") } +// XMsContentCrc64 returns the value for header x-ms-content-crc64. +func (pbupfur PageBlobUploadPagesFromURLResponse) XMsContentCrc64() []byte { + s := pbupfur.rawResponse.Header.Get("x-ms-content-crc64") + if s == "" { + return nil + } + b, err := base64.StdEncoding.DecodeString(s) + if err != nil { + b = nil + } + return b +} + // PageBlobUploadPagesResponse ... type PageBlobUploadPagesResponse struct { rawResponse *http.Response @@ -4536,6 +6342,11 @@ func (pbupr PageBlobUploadPagesResponse) BlobSequenceNumber() int64 { return i } +// ClientRequestID returns the value for header x-ms-client-request-id. +func (pbupr PageBlobUploadPagesResponse) ClientRequestID() string { + return pbupr.rawResponse.Header.Get("x-ms-client-request-id") +} + // ContentMD5 returns the value for header Content-MD5. func (pbupr PageBlobUploadPagesResponse) ContentMD5() []byte { s := pbupr.rawResponse.Header.Get("Content-MD5") @@ -4562,6 +6373,16 @@ func (pbupr PageBlobUploadPagesResponse) Date() time.Time { return t } +// EncryptionKeySha256 returns the value for header x-ms-encryption-key-sha256. +func (pbupr PageBlobUploadPagesResponse) EncryptionKeySha256() string { + return pbupr.rawResponse.Header.Get("x-ms-encryption-key-sha256") +} + +// EncryptionScope returns the value for header x-ms-encryption-scope. +func (pbupr PageBlobUploadPagesResponse) EncryptionScope() string { + return pbupr.rawResponse.Header.Get("x-ms-encryption-scope") +} + // ErrorCode returns the value for header x-ms-error-code. func (pbupr PageBlobUploadPagesResponse) ErrorCode() string { return pbupr.rawResponse.Header.Get("x-ms-error-code") @@ -4600,6 +6421,19 @@ func (pbupr PageBlobUploadPagesResponse) Version() string { return pbupr.rawResponse.Header.Get("x-ms-version") } +// XMsContentCrc64 returns the value for header x-ms-content-crc64. +func (pbupr PageBlobUploadPagesResponse) XMsContentCrc64() []byte { + s := pbupr.rawResponse.Header.Get("x-ms-content-crc64") + if s == "" { + return nil + } + b, err := base64.StdEncoding.DecodeString(s) + if err != nil { + b = nil + } + return b +} + // PageList - the list of pages type PageList struct { rawResponse *http.Response @@ -4635,6 +6469,11 @@ func (pl PageList) BlobContentLength() int64 { return i } +// ClientRequestID returns the value for header x-ms-client-request-id. +func (pl PageList) ClientRequestID() string { + return pl.rawResponse.Header.Get("x-ms-client-request-id") +} + // Date returns the value for header Date. func (pl PageList) Date() time.Time { s := pl.rawResponse.Header.Get("Date") @@ -4687,6 +6526,304 @@ type PageRange struct { End int64 `xml:"End"` } +// QueryFormat ... +type QueryFormat struct { + // Type - Possible values include: 'QueryFormatDelimited', 'QueryFormatJSON', 'QueryFormatNone' + Type QueryFormatType `xml:"Type"` + DelimitedTextConfiguration *DelimitedTextConfiguration `xml:"DelimitedTextConfiguration"` + JSONTextConfiguration *JSONTextConfiguration `xml:"JsonTextConfiguration"` +} + +// QueryRequest - the quick query body +type QueryRequest struct { + // QueryType - the query type + QueryType string `xml:"QueryType"` + // Expression - a query statement + Expression string `xml:"Expression"` + InputSerialization *QuerySerialization `xml:"InputSerialization"` + OutputSerialization *QuerySerialization `xml:"OutputSerialization"` +} + +// QueryResponse - Wraps the response from the blobClient.Query method. +type QueryResponse struct { + rawResponse *http.Response +} + +// NewMetadata returns user-defined key/value pairs. +func (qr QueryResponse) NewMetadata() Metadata { + md := Metadata{} + for k, v := range qr.rawResponse.Header { + if len(k) > mdPrefixLen { + if prefix := k[0:mdPrefixLen]; strings.EqualFold(prefix, mdPrefix) { + md[strings.ToLower(k[mdPrefixLen:])] = v[0] + } + } + } + return md +} + +// Response returns the raw HTTP response object. +func (qr QueryResponse) Response() *http.Response { + return qr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (qr QueryResponse) StatusCode() int { + return qr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (qr QueryResponse) Status() string { + return qr.rawResponse.Status +} + +// Body returns the raw HTTP response object's Body. +func (qr QueryResponse) Body() io.ReadCloser { + return qr.rawResponse.Body +} + +// AcceptRanges returns the value for header Accept-Ranges. +func (qr QueryResponse) AcceptRanges() string { + return qr.rawResponse.Header.Get("Accept-Ranges") +} + +// BlobCommittedBlockCount returns the value for header x-ms-blob-committed-block-count. +func (qr QueryResponse) BlobCommittedBlockCount() int32 { + s := qr.rawResponse.Header.Get("x-ms-blob-committed-block-count") + if s == "" { + return -1 + } + i, err := strconv.ParseInt(s, 10, 32) + if err != nil { + i = 0 + } + return int32(i) +} + +// BlobContentMD5 returns the value for header x-ms-blob-content-md5. +func (qr QueryResponse) BlobContentMD5() []byte { + s := qr.rawResponse.Header.Get("x-ms-blob-content-md5") + if s == "" { + return nil + } + b, err := base64.StdEncoding.DecodeString(s) + if err != nil { + b = nil + } + return b +} + +// BlobSequenceNumber returns the value for header x-ms-blob-sequence-number. +func (qr QueryResponse) BlobSequenceNumber() int64 { + s := qr.rawResponse.Header.Get("x-ms-blob-sequence-number") + if s == "" { + return -1 + } + i, err := strconv.ParseInt(s, 10, 64) + if err != nil { + i = 0 + } + return i +} + +// BlobType returns the value for header x-ms-blob-type. +func (qr QueryResponse) BlobType() BlobType { + return BlobType(qr.rawResponse.Header.Get("x-ms-blob-type")) +} + +// CacheControl returns the value for header Cache-Control. +func (qr QueryResponse) CacheControl() string { + return qr.rawResponse.Header.Get("Cache-Control") +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (qr QueryResponse) ClientRequestID() string { + return qr.rawResponse.Header.Get("x-ms-client-request-id") +} + +// ContentCrc64 returns the value for header x-ms-content-crc64. +func (qr QueryResponse) ContentCrc64() []byte { + s := qr.rawResponse.Header.Get("x-ms-content-crc64") + if s == "" { + return nil + } + b, err := base64.StdEncoding.DecodeString(s) + if err != nil { + b = nil + } + return b +} + +// ContentDisposition returns the value for header Content-Disposition. +func (qr QueryResponse) ContentDisposition() string { + return qr.rawResponse.Header.Get("Content-Disposition") +} + +// ContentEncoding returns the value for header Content-Encoding. +func (qr QueryResponse) ContentEncoding() string { + return qr.rawResponse.Header.Get("Content-Encoding") +} + +// ContentLanguage returns the value for header Content-Language. +func (qr QueryResponse) ContentLanguage() string { + return qr.rawResponse.Header.Get("Content-Language") +} + +// ContentLength returns the value for header Content-Length. +func (qr QueryResponse) ContentLength() int64 { + s := qr.rawResponse.Header.Get("Content-Length") + if s == "" { + return -1 + } + i, err := strconv.ParseInt(s, 10, 64) + if err != nil { + i = 0 + } + return i +} + +// ContentMD5 returns the value for header Content-MD5. +func (qr QueryResponse) ContentMD5() []byte { + s := qr.rawResponse.Header.Get("Content-MD5") + if s == "" { + return nil + } + b, err := base64.StdEncoding.DecodeString(s) + if err != nil { + b = nil + } + return b +} + +// ContentRange returns the value for header Content-Range. +func (qr QueryResponse) ContentRange() string { + return qr.rawResponse.Header.Get("Content-Range") +} + +// ContentType returns the value for header Content-Type. +func (qr QueryResponse) ContentType() string { + return qr.rawResponse.Header.Get("Content-Type") +} + +// CopyCompletionTime returns the value for header x-ms-copy-completion-time. +func (qr QueryResponse) CopyCompletionTime() time.Time { + s := qr.rawResponse.Header.Get("x-ms-copy-completion-time") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// CopyID returns the value for header x-ms-copy-id. +func (qr QueryResponse) CopyID() string { + return qr.rawResponse.Header.Get("x-ms-copy-id") +} + +// CopyProgress returns the value for header x-ms-copy-progress. +func (qr QueryResponse) CopyProgress() string { + return qr.rawResponse.Header.Get("x-ms-copy-progress") +} + +// CopySource returns the value for header x-ms-copy-source. +func (qr QueryResponse) CopySource() string { + return qr.rawResponse.Header.Get("x-ms-copy-source") +} + +// CopyStatus returns the value for header x-ms-copy-status. +func (qr QueryResponse) CopyStatus() CopyStatusType { + return CopyStatusType(qr.rawResponse.Header.Get("x-ms-copy-status")) +} + +// CopyStatusDescription returns the value for header x-ms-copy-status-description. +func (qr QueryResponse) CopyStatusDescription() string { + return qr.rawResponse.Header.Get("x-ms-copy-status-description") +} + +// Date returns the value for header Date. +func (qr QueryResponse) Date() time.Time { + s := qr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// EncryptionKeySha256 returns the value for header x-ms-encryption-key-sha256. +func (qr QueryResponse) EncryptionKeySha256() string { + return qr.rawResponse.Header.Get("x-ms-encryption-key-sha256") +} + +// EncryptionScope returns the value for header x-ms-encryption-scope. +func (qr QueryResponse) EncryptionScope() string { + return qr.rawResponse.Header.Get("x-ms-encryption-scope") +} + +// ErrorCode returns the value for header x-ms-error-code. +func (qr QueryResponse) ErrorCode() string { + return qr.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (qr QueryResponse) ETag() ETag { + return ETag(qr.rawResponse.Header.Get("ETag")) +} + +// IsServerEncrypted returns the value for header x-ms-server-encrypted. +func (qr QueryResponse) IsServerEncrypted() string { + return qr.rawResponse.Header.Get("x-ms-server-encrypted") +} + +// LastModified returns the value for header Last-Modified. +func (qr QueryResponse) LastModified() time.Time { + s := qr.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// LeaseDuration returns the value for header x-ms-lease-duration. +func (qr QueryResponse) LeaseDuration() LeaseDurationType { + return LeaseDurationType(qr.rawResponse.Header.Get("x-ms-lease-duration")) +} + +// LeaseState returns the value for header x-ms-lease-state. +func (qr QueryResponse) LeaseState() LeaseStateType { + return LeaseStateType(qr.rawResponse.Header.Get("x-ms-lease-state")) +} + +// LeaseStatus returns the value for header x-ms-lease-status. +func (qr QueryResponse) LeaseStatus() LeaseStatusType { + return LeaseStatusType(qr.rawResponse.Header.Get("x-ms-lease-status")) +} + +// RequestID returns the value for header x-ms-request-id. +func (qr QueryResponse) RequestID() string { + return qr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (qr QueryResponse) Version() string { + return qr.rawResponse.Header.Get("x-ms-version") +} + +// QuerySerialization ... +type QuerySerialization struct { + Format QueryFormat `xml:"Format"` +} + // RetentionPolicy - the retention policy which determines how long the associated data should persist type RetentionPolicy struct { // Enabled - Indicates whether a retention policy is enabled for the storage service @@ -4720,6 +6857,11 @@ func (sgair ServiceGetAccountInfoResponse) AccountKind() AccountKindType { return AccountKindType(sgair.rawResponse.Header.Get("x-ms-account-kind")) } +// ClientRequestID returns the value for header x-ms-client-request-id. +func (sgair ServiceGetAccountInfoResponse) ClientRequestID() string { + return sgair.rawResponse.Header.Get("x-ms-client-request-id") +} + // Date returns the value for header Date. func (sgair ServiceGetAccountInfoResponse) Date() time.Time { s := sgair.rawResponse.Header.Get("Date") @@ -4773,6 +6915,11 @@ func (sspr ServiceSetPropertiesResponse) Status() string { return sspr.rawResponse.Status } +// ClientRequestID returns the value for header x-ms-client-request-id. +func (sspr ServiceSetPropertiesResponse) ClientRequestID() string { + return sspr.rawResponse.Header.Get("x-ms-client-request-id") +} + // ErrorCode returns the value for header x-ms-error-code. func (sspr ServiceSetPropertiesResponse) ErrorCode() string { return sspr.rawResponse.Header.Get("x-ms-error-code") @@ -4821,6 +6968,11 @@ func (si SignedIdentifiers) BlobPublicAccess() PublicAccessType { return PublicAccessType(si.rawResponse.Header.Get("x-ms-blob-public-access")) } +// ClientRequestID returns the value for header x-ms-client-request-id. +func (si SignedIdentifiers) ClientRequestID() string { + return si.rawResponse.Header.Get("x-ms-client-request-id") +} + // Date returns the value for header Date. func (si SignedIdentifiers) Date() time.Time { s := si.rawResponse.Header.Get("Date") @@ -4875,6 +7027,8 @@ type StaticWebsite struct { IndexDocument *string `xml:"IndexDocument"` // ErrorDocument404Path - The absolute path of the custom 404 page ErrorDocument404Path *string `xml:"ErrorDocument404Path"` + // DefaultIndexDocumentPath - Absolute path of the default index page + DefaultIndexDocumentPath *string `xml:"DefaultIndexDocumentPath"` } // StorageServiceProperties - Storage Service Properties. @@ -4906,6 +7060,11 @@ func (ssp StorageServiceProperties) Status() string { return ssp.rawResponse.Status } +// ClientRequestID returns the value for header x-ms-client-request-id. +func (ssp StorageServiceProperties) ClientRequestID() string { + return ssp.rawResponse.Header.Get("x-ms-client-request-id") +} + // ErrorCode returns the value for header x-ms-error-code. func (ssp StorageServiceProperties) ErrorCode() string { return ssp.rawResponse.Header.Get("x-ms-error-code") @@ -4942,6 +7101,11 @@ func (sss StorageServiceStats) Status() string { return sss.rawResponse.Status } +// ClientRequestID returns the value for header x-ms-client-request-id. +func (sss StorageServiceStats) ClientRequestID() string { + return sss.rawResponse.Header.Get("x-ms-client-request-id") +} + // Date returns the value for header Date. func (sss StorageServiceStats) Date() time.Time { s := sss.rawResponse.Header.Get("Date") @@ -4970,6 +7134,51 @@ func (sss StorageServiceStats) Version() string { return sss.rawResponse.Header.Get("x-ms-version") } +// SubmitBatchResponse - Wraps the response from the serviceClient.SubmitBatch method. +type SubmitBatchResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (sbr SubmitBatchResponse) Response() *http.Response { + return sbr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (sbr SubmitBatchResponse) StatusCode() int { + return sbr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (sbr SubmitBatchResponse) Status() string { + return sbr.rawResponse.Status +} + +// Body returns the raw HTTP response object's Body. +func (sbr SubmitBatchResponse) Body() io.ReadCloser { + return sbr.rawResponse.Body +} + +// ContentType returns the value for header Content-Type. +func (sbr SubmitBatchResponse) ContentType() string { + return sbr.rawResponse.Header.Get("Content-Type") +} + +// ErrorCode returns the value for header x-ms-error-code. +func (sbr SubmitBatchResponse) ErrorCode() string { + return sbr.rawResponse.Header.Get("x-ms-error-code") +} + +// RequestID returns the value for header x-ms-request-id. +func (sbr SubmitBatchResponse) RequestID() string { + return sbr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (sbr SubmitBatchResponse) Version() string { + return sbr.rawResponse.Header.Get("x-ms-version") +} + // UserDelegationKey - A user delegation key type UserDelegationKey struct { rawResponse *http.Response @@ -4989,13 +7198,6 @@ type UserDelegationKey struct { Value string `xml:"Value"` } -func (udk UserDelegationKey) ComputeHMACSHA256(message string) (base64String string) { - bytes, _ := base64.StdEncoding.DecodeString(udk.Value) - h := hmac.New(sha256.New, bytes) - h.Write([]byte(message)) - return base64.StdEncoding.EncodeToString(h.Sum(nil)) -} - // MarshalXML implements the xml.Marshaler interface for UserDelegationKey. func (udk UserDelegationKey) MarshalXML(e *xml.Encoder, start xml.StartElement) error { udk2 := (*userDelegationKey)(unsafe.Pointer(&udk)) @@ -5023,6 +7225,11 @@ func (udk UserDelegationKey) Status() string { return udk.rawResponse.Status } +// ClientRequestID returns the value for header x-ms-client-request-id. +func (udk UserDelegationKey) ClientRequestID() string { + return udk.rawResponse.Header.Get("x-ms-client-request-id") +} + // Date returns the value for header Date. func (udk UserDelegationKey) Date() time.Time { s := udk.rawResponse.Header.Get("Date") @@ -5142,57 +7349,67 @@ type userDelegationKey struct { // internal type used for marshalling type accessPolicy struct { - Start timeRFC3339 `xml:"Start"` - Expiry timeRFC3339 `xml:"Expiry"` - Permission string `xml:"Permission"` + Start *timeRFC3339 `xml:"Start"` + Expiry *timeRFC3339 `xml:"Expiry"` + Permission *string `xml:"Permission"` } // internal type used for marshalling type blobProperties struct { // XMLName is used for marshalling and is subject to removal in a future release. - XMLName xml.Name `xml:"Properties"` - CreationTime *timeRFC1123 `xml:"Creation-Time"` - LastModified timeRFC1123 `xml:"Last-Modified"` - Etag ETag `xml:"Etag"` - ContentLength *int64 `xml:"Content-Length"` - ContentType *string `xml:"Content-Type"` - ContentEncoding *string `xml:"Content-Encoding"` - ContentLanguage *string `xml:"Content-Language"` - ContentMD5 base64Encoded `xml:"Content-MD5"` - ContentDisposition *string `xml:"Content-Disposition"` - CacheControl *string `xml:"Cache-Control"` - BlobSequenceNumber *int64 `xml:"x-ms-blob-sequence-number"` - BlobType BlobType `xml:"BlobType"` - LeaseStatus LeaseStatusType `xml:"LeaseStatus"` - LeaseState LeaseStateType `xml:"LeaseState"` - LeaseDuration LeaseDurationType `xml:"LeaseDuration"` - CopyID *string `xml:"CopyId"` - CopyStatus CopyStatusType `xml:"CopyStatus"` - CopySource *string `xml:"CopySource"` - CopyProgress *string `xml:"CopyProgress"` - CopyCompletionTime *timeRFC1123 `xml:"CopyCompletionTime"` - CopyStatusDescription *string `xml:"CopyStatusDescription"` - ServerEncrypted *bool `xml:"ServerEncrypted"` - IncrementalCopy *bool `xml:"IncrementalCopy"` - DestinationSnapshot *string `xml:"DestinationSnapshot"` - DeletedTime *timeRFC1123 `xml:"DeletedTime"` - RemainingRetentionDays *int32 `xml:"RemainingRetentionDays"` - AccessTier AccessTierType `xml:"AccessTier"` - AccessTierInferred *bool `xml:"AccessTierInferred"` - ArchiveStatus ArchiveStatusType `xml:"ArchiveStatus"` - AccessTierChangeTime *timeRFC1123 `xml:"AccessTierChangeTime"` + XMLName xml.Name `xml:"Properties"` + CreationTime *timeRFC1123 `xml:"Creation-Time"` + LastModified timeRFC1123 `xml:"Last-Modified"` + Etag ETag `xml:"Etag"` + ContentLength *int64 `xml:"Content-Length"` + ContentType *string `xml:"Content-Type"` + ContentEncoding *string `xml:"Content-Encoding"` + ContentLanguage *string `xml:"Content-Language"` + ContentMD5 base64Encoded `xml:"Content-MD5"` + ContentDisposition *string `xml:"Content-Disposition"` + CacheControl *string `xml:"Cache-Control"` + BlobSequenceNumber *int64 `xml:"x-ms-blob-sequence-number"` + BlobType BlobType `xml:"BlobType"` + LeaseStatus LeaseStatusType `xml:"LeaseStatus"` + LeaseState LeaseStateType `xml:"LeaseState"` + LeaseDuration LeaseDurationType `xml:"LeaseDuration"` + CopyID *string `xml:"CopyId"` + CopyStatus CopyStatusType `xml:"CopyStatus"` + CopySource *string `xml:"CopySource"` + CopyProgress *string `xml:"CopyProgress"` + CopyCompletionTime *timeRFC1123 `xml:"CopyCompletionTime"` + CopyStatusDescription *string `xml:"CopyStatusDescription"` + ServerEncrypted *bool `xml:"ServerEncrypted"` + IncrementalCopy *bool `xml:"IncrementalCopy"` + DestinationSnapshot *string `xml:"DestinationSnapshot"` + DeletedTime *timeRFC1123 `xml:"DeletedTime"` + RemainingRetentionDays *int32 `xml:"RemainingRetentionDays"` + AccessTier AccessTierType `xml:"AccessTier"` + AccessTierInferred *bool `xml:"AccessTierInferred"` + ArchiveStatus ArchiveStatusType `xml:"ArchiveStatus"` + CustomerProvidedKeySha256 *string `xml:"CustomerProvidedKeySha256"` + EncryptionScope *string `xml:"EncryptionScope"` + AccessTierChangeTime *timeRFC1123 `xml:"AccessTierChangeTime"` + TagCount *int32 `xml:"TagCount"` + ExpiresOn *timeRFC1123 `xml:"Expiry-Time"` + IsSealed *bool `xml:"IsSealed"` + RehydratePriority RehydratePriorityType `xml:"RehydratePriority"` } // internal type used for marshalling type containerProperties struct { - LastModified timeRFC1123 `xml:"Last-Modified"` - Etag ETag `xml:"Etag"` - LeaseStatus LeaseStatusType `xml:"LeaseStatus"` - LeaseState LeaseStateType `xml:"LeaseState"` - LeaseDuration LeaseDurationType `xml:"LeaseDuration"` - PublicAccess PublicAccessType `xml:"PublicAccess"` - HasImmutabilityPolicy *bool `xml:"HasImmutabilityPolicy"` - HasLegalHold *bool `xml:"HasLegalHold"` + LastModified timeRFC1123 `xml:"Last-Modified"` + Etag ETag `xml:"Etag"` + LeaseStatus LeaseStatusType `xml:"LeaseStatus"` + LeaseState LeaseStateType `xml:"LeaseState"` + LeaseDuration LeaseDurationType `xml:"LeaseDuration"` + PublicAccess PublicAccessType `xml:"PublicAccess"` + HasImmutabilityPolicy *bool `xml:"HasImmutabilityPolicy"` + HasLegalHold *bool `xml:"HasLegalHold"` + DefaultEncryptionScope *string `xml:"DefaultEncryptionScope"` + PreventEncryptionScopeOverride *bool `xml:"DenyEncryptionScopeOverride"` + DeletedTime *timeRFC1123 `xml:"DeletedTime"` + RemainingRetentionDays *int32 `xml:"RemainingRetentionDays"` } // internal type used for marshalling diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_page_blob.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_page_blob.go index 42e27da..b55ae12 100644 --- a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_page_blob.go +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_page_blob.go @@ -33,6 +33,14 @@ func newPageBlobClient(url url.URL, p pipeline.Pipeline) pageBlobClient { // href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations">Setting // Timeouts for Blob Service Operations. rangeParameter is return only the bytes of the blob in the specified // range. leaseID is if specified, the operation only succeeds if the resource's lease is active and matches this ID. +// encryptionKey is optional. Specifies the encryption key to use to encrypt the data provided in the request. If not +// specified, encryption is performed with the root account encryption key. For more information, see Encryption at +// Rest for Azure Storage Services. encryptionKeySha256 is the SHA-256 hash of the provided encryption key. Must be +// provided if the x-ms-encryption-key header is provided. encryptionAlgorithm is the algorithm used to produce the +// encryption key hash. Currently, the only accepted value is "AES256". Must be provided if the x-ms-encryption-key +// header is provided. encryptionScope is optional. Version 2019-07-07 and later. Specifies the name of the encryption +// scope to use to encrypt the data provided in the request. If not specified, encryption is performed with the default +// account encryption scope. For more information, see Encryption at Rest for Azure Storage Services. // ifSequenceNumberLessThanOrEqualTo is specify this header value to operate only on a blob if it has a sequence number // less than or equal to the specified. ifSequenceNumberLessThan is specify this header value to operate only on a blob // if it has a sequence number less than the specified. ifSequenceNumberEqualTo is specify this header value to operate @@ -42,14 +50,14 @@ func newPageBlobClient(url url.URL, p pipeline.Pipeline) pageBlobClient { // to operate only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs // without a matching value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is // recorded in the analytics logs when storage analytics logging is enabled. -func (client pageBlobClient) ClearPages(ctx context.Context, contentLength int64, timeout *int32, rangeParameter *string, leaseID *string, ifSequenceNumberLessThanOrEqualTo *int64, ifSequenceNumberLessThan *int64, ifSequenceNumberEqualTo *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*PageBlobClearPagesResponse, error) { +func (client pageBlobClient) ClearPages(ctx context.Context, contentLength int64, timeout *int32, rangeParameter *string, leaseID *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, ifSequenceNumberLessThanOrEqualTo *int64, ifSequenceNumberLessThan *int64, ifSequenceNumberEqualTo *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*PageBlobClearPagesResponse, error) { if err := validate([]validation{ {targetValue: timeout, constraints: []constraint{{target: "timeout", name: null, rule: false, chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { return nil, err } - req, err := client.clearPagesPreparer(contentLength, timeout, rangeParameter, leaseID, ifSequenceNumberLessThanOrEqualTo, ifSequenceNumberLessThan, ifSequenceNumberEqualTo, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID) + req, err := client.clearPagesPreparer(contentLength, timeout, rangeParameter, leaseID, encryptionKey, encryptionKeySha256, encryptionAlgorithm, encryptionScope, ifSequenceNumberLessThanOrEqualTo, ifSequenceNumberLessThan, ifSequenceNumberEqualTo, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID) if err != nil { return nil, err } @@ -61,7 +69,7 @@ func (client pageBlobClient) ClearPages(ctx context.Context, contentLength int64 } // clearPagesPreparer prepares the ClearPages request. -func (client pageBlobClient) clearPagesPreparer(contentLength int64, timeout *int32, rangeParameter *string, leaseID *string, ifSequenceNumberLessThanOrEqualTo *int64, ifSequenceNumberLessThan *int64, ifSequenceNumberEqualTo *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) { +func (client pageBlobClient) clearPagesPreparer(contentLength int64, timeout *int32, rangeParameter *string, leaseID *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, ifSequenceNumberLessThanOrEqualTo *int64, ifSequenceNumberLessThan *int64, ifSequenceNumberEqualTo *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) { req, err := pipeline.NewRequest("PUT", client.url, nil) if err != nil { return req, pipeline.NewError(err, "failed to create request") @@ -79,6 +87,18 @@ func (client pageBlobClient) clearPagesPreparer(contentLength int64, timeout *in if leaseID != nil { req.Header.Set("x-ms-lease-id", *leaseID) } + if encryptionKey != nil { + req.Header.Set("x-ms-encryption-key", *encryptionKey) + } + if encryptionKeySha256 != nil { + req.Header.Set("x-ms-encryption-key-sha256", *encryptionKeySha256) + } + if encryptionAlgorithm != EncryptionAlgorithmNone { + req.Header.Set("x-ms-encryption-algorithm", string(encryptionAlgorithm)) + } + if encryptionScope != nil { + req.Header.Set("x-ms-encryption-scope", *encryptionScope) + } if ifSequenceNumberLessThanOrEqualTo != nil { req.Header.Set("x-ms-if-sequence-number-le", strconv.FormatInt(*ifSequenceNumberLessThanOrEqualTo, 10)) } @@ -202,35 +222,45 @@ func (client pageBlobClient) copyIncrementalResponder(resp pipeline.Response) (p // blob, up to 1 TB. The page blob size must be aligned to a 512-byte boundary. timeout is the timeout parameter is // expressed in seconds. For more information, see Setting -// Timeouts for Blob Service Operations. blobContentType is optional. Sets the blob's content type. If specified, -// this property is stored with the blob and returned with a read request. blobContentEncoding is optional. Sets the -// blob's content encoding. If specified, this property is stored with the blob and returned with a read request. -// blobContentLanguage is optional. Set the blob's content language. If specified, this property is stored with the -// blob and returned with a read request. blobContentMD5 is optional. An MD5 hash of the blob content. Note that this -// hash is not validated, as the hashes for the individual blocks were validated when each was uploaded. -// blobCacheControl is optional. Sets the blob's cache control. If specified, this property is stored with the blob and -// returned with a read request. metadata is optional. Specifies a user-defined name-value pair associated with the -// blob. If no name-value pairs are specified, the operation will copy the metadata from the source blob or file to the -// destination blob. If one or more name-value pairs are specified, the destination blob is created with the specified -// metadata, and metadata is not copied from the source blob or file. Note that beginning with version 2009-09-19, -// metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, Blobs, and -// Metadata for more information. leaseID is if specified, the operation only succeeds if the resource's lease is -// active and matches this ID. blobContentDisposition is optional. Sets the blob's Content-Disposition header. -// ifModifiedSince is specify this header value to operate only on a blob if it has been modified since the specified -// date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been modified -// since the specified date/time. ifMatch is specify an ETag value to operate only on blobs with a matching value. -// ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. blobSequenceNumber is set -// for page blobs only. The sequence number is a user-controlled value that you can use to track requests. The value of -// the sequence number must be between 0 and 2^63 - 1. requestID is provides a client-generated, opaque value with a 1 -// KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. -func (client pageBlobClient) Create(ctx context.Context, contentLength int64, blobContentLength int64, timeout *int32, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, blobSequenceNumber *int64, requestID *string) (*PageBlobCreateResponse, error) { +// Timeouts for Blob Service Operations. tier is optional. Indicates the tier to be set on the page blob. +// blobContentType is optional. Sets the blob's content type. If specified, this property is stored with the blob and +// returned with a read request. blobContentEncoding is optional. Sets the blob's content encoding. If specified, this +// property is stored with the blob and returned with a read request. blobContentLanguage is optional. Set the blob's +// content language. If specified, this property is stored with the blob and returned with a read request. +// blobContentMD5 is optional. An MD5 hash of the blob content. Note that this hash is not validated, as the hashes for +// the individual blocks were validated when each was uploaded. blobCacheControl is optional. Sets the blob's cache +// control. If specified, this property is stored with the blob and returned with a read request. metadata is optional. +// Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the +// operation will copy the metadata from the source blob or file to the destination blob. If one or more name-value +// pairs are specified, the destination blob is created with the specified metadata, and metadata is not copied from +// the source blob or file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming rules +// for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more information. leaseID is if +// specified, the operation only succeeds if the resource's lease is active and matches this ID. blobContentDisposition +// is optional. Sets the blob's Content-Disposition header. encryptionKey is optional. Specifies the encryption key to +// use to encrypt the data provided in the request. If not specified, encryption is performed with the root account +// encryption key. For more information, see Encryption at Rest for Azure Storage Services. encryptionKeySha256 is the +// SHA-256 hash of the provided encryption key. Must be provided if the x-ms-encryption-key header is provided. +// encryptionAlgorithm is the algorithm used to produce the encryption key hash. Currently, the only accepted value is +// "AES256". Must be provided if the x-ms-encryption-key header is provided. encryptionScope is optional. Version +// 2019-07-07 and later. Specifies the name of the encryption scope to use to encrypt the data provided in the +// request. If not specified, encryption is performed with the default account encryption scope. For more information, +// see Encryption at Rest for Azure Storage Services. ifModifiedSince is specify this header value to operate only on a +// blob if it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to +// operate only on a blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value +// to operate only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs +// without a matching value. ifTags is specify a SQL where clause on blob tags to operate only on blobs with a matching +// value. blobSequenceNumber is set for page blobs only. The sequence number is a user-controlled value that you can +// use to track requests. The value of the sequence number must be between 0 and 2^63 - 1. requestID is provides a +// client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage +// analytics logging is enabled. blobTagsString is optional. Used to set blob tags in various blob operations. +func (client pageBlobClient) Create(ctx context.Context, contentLength int64, blobContentLength int64, timeout *int32, tier PremiumPageBlobAccessTierType, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, blobSequenceNumber *int64, requestID *string, blobTagsString *string) (*PageBlobCreateResponse, error) { if err := validate([]validation{ {targetValue: timeout, constraints: []constraint{{target: "timeout", name: null, rule: false, chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { return nil, err } - req, err := client.createPreparer(contentLength, blobContentLength, timeout, blobContentType, blobContentEncoding, blobContentLanguage, blobContentMD5, blobCacheControl, metadata, leaseID, blobContentDisposition, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, blobSequenceNumber, requestID) + req, err := client.createPreparer(contentLength, blobContentLength, timeout, tier, blobContentType, blobContentEncoding, blobContentLanguage, blobContentMD5, blobCacheControl, metadata, leaseID, blobContentDisposition, encryptionKey, encryptionKeySha256, encryptionAlgorithm, encryptionScope, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, blobSequenceNumber, requestID, blobTagsString) if err != nil { return nil, err } @@ -242,7 +272,7 @@ func (client pageBlobClient) Create(ctx context.Context, contentLength int64, bl } // createPreparer prepares the Create request. -func (client pageBlobClient) createPreparer(contentLength int64, blobContentLength int64, timeout *int32, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, blobSequenceNumber *int64, requestID *string) (pipeline.Request, error) { +func (client pageBlobClient) createPreparer(contentLength int64, blobContentLength int64, timeout *int32, tier PremiumPageBlobAccessTierType, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, blobSequenceNumber *int64, requestID *string, blobTagsString *string) (pipeline.Request, error) { req, err := pipeline.NewRequest("PUT", client.url, nil) if err != nil { return req, pipeline.NewError(err, "failed to create request") @@ -253,6 +283,9 @@ func (client pageBlobClient) createPreparer(contentLength int64, blobContentLeng } req.URL.RawQuery = params.Encode() req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) + if tier != PremiumPageBlobAccessTierNone { + req.Header.Set("x-ms-access-tier", string(tier)) + } if blobContentType != nil { req.Header.Set("x-ms-blob-content-type", *blobContentType) } @@ -279,6 +312,18 @@ func (client pageBlobClient) createPreparer(contentLength int64, blobContentLeng if blobContentDisposition != nil { req.Header.Set("x-ms-blob-content-disposition", *blobContentDisposition) } + if encryptionKey != nil { + req.Header.Set("x-ms-encryption-key", *encryptionKey) + } + if encryptionKeySha256 != nil { + req.Header.Set("x-ms-encryption-key-sha256", *encryptionKeySha256) + } + if encryptionAlgorithm != EncryptionAlgorithmNone { + req.Header.Set("x-ms-encryption-algorithm", string(encryptionAlgorithm)) + } + if encryptionScope != nil { + req.Header.Set("x-ms-encryption-scope", *encryptionScope) + } if ifModifiedSince != nil { req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) } @@ -291,6 +336,9 @@ func (client pageBlobClient) createPreparer(contentLength int64, blobContentLeng if ifNoneMatch != nil { req.Header.Set("If-None-Match", string(*ifNoneMatch)) } + if ifTags != nil { + req.Header.Set("x-ms-if-tags", *ifTags) + } req.Header.Set("x-ms-blob-content-length", strconv.FormatInt(blobContentLength, 10)) if blobSequenceNumber != nil { req.Header.Set("x-ms-blob-sequence-number", strconv.FormatInt(*blobSequenceNumber, 10)) @@ -299,6 +347,9 @@ func (client pageBlobClient) createPreparer(contentLength int64, blobContentLeng if requestID != nil { req.Header.Set("x-ms-client-request-id", *requestID) } + if blobTagsString != nil { + req.Header.Set("x-ms-tags", *blobTagsString) + } req.Header.Set("x-ms-blob-type", "PageBlob") return req, nil } @@ -327,17 +378,18 @@ func (client pageBlobClient) createResponder(resp pipeline.Response) (pipeline.R // ifModifiedSince is specify this header value to operate only on a blob if it has been modified since the specified // date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been modified // since the specified date/time. ifMatch is specify an ETag value to operate only on blobs with a matching value. -// ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. requestID is provides a -// client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage -// analytics logging is enabled. -func (client pageBlobClient) GetPageRanges(ctx context.Context, snapshot *string, timeout *int32, rangeParameter *string, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*PageList, error) { +// ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. ifTags is specify a SQL +// where clause on blob tags to operate only on blobs with a matching value. requestID is provides a client-generated, +// opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is +// enabled. +func (client pageBlobClient) GetPageRanges(ctx context.Context, snapshot *string, timeout *int32, rangeParameter *string, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (*PageList, error) { if err := validate([]validation{ {targetValue: timeout, constraints: []constraint{{target: "timeout", name: null, rule: false, chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { return nil, err } - req, err := client.getPageRangesPreparer(snapshot, timeout, rangeParameter, leaseID, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID) + req, err := client.getPageRangesPreparer(snapshot, timeout, rangeParameter, leaseID, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, requestID) if err != nil { return nil, err } @@ -349,7 +401,7 @@ func (client pageBlobClient) GetPageRanges(ctx context.Context, snapshot *string } // getPageRangesPreparer prepares the GetPageRanges request. -func (client pageBlobClient) getPageRangesPreparer(snapshot *string, timeout *int32, rangeParameter *string, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) { +func (client pageBlobClient) getPageRangesPreparer(snapshot *string, timeout *int32, rangeParameter *string, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (pipeline.Request, error) { req, err := pipeline.NewRequest("GET", client.url, nil) if err != nil { return req, pipeline.NewError(err, "failed to create request") @@ -381,6 +433,9 @@ func (client pageBlobClient) getPageRangesPreparer(snapshot *string, timeout *in if ifNoneMatch != nil { req.Header.Set("If-None-Match", string(*ifNoneMatch)) } + if ifTags != nil { + req.Header.Set("x-ms-if-tags", *ifTags) + } req.Header.Set("x-ms-version", ServiceVersion) if requestID != nil { req.Header.Set("x-ms-client-request-id", *requestID) @@ -425,22 +480,25 @@ func (client pageBlobClient) getPageRangesResponder(resp pipeline.Response) (pip // parameter is a DateTime value that specifies that the response will contain only pages that were changed between // target blob and previous snapshot. Changed pages include both updated and cleared pages. The target blob may be a // snapshot, as long as the snapshot specified by prevsnapshot is the older of the two. Note that incremental snapshots -// are currently supported only for blobs created on or after January 1, 2016. rangeParameter is return only the bytes -// of the blob in the specified range. leaseID is if specified, the operation only succeeds if the resource's lease is -// active and matches this ID. ifModifiedSince is specify this header value to operate only on a blob if it has been -// modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if -// it has not been modified since the specified date/time. ifMatch is specify an ETag value to operate only on blobs -// with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. -// requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics -// logs when storage analytics logging is enabled. -func (client pageBlobClient) GetPageRangesDiff(ctx context.Context, snapshot *string, timeout *int32, prevsnapshot *string, rangeParameter *string, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*PageList, error) { +// are currently supported only for blobs created on or after January 1, 2016. prevSnapshotURL is optional. This header +// is only supported in service versions 2019-04-19 and after and specifies the URL of a previous snapshot of the +// target blob. The response will only contain pages that were changed between the target blob and its previous +// snapshot. rangeParameter is return only the bytes of the blob in the specified range. leaseID is if specified, the +// operation only succeeds if the resource's lease is active and matches this ID. ifModifiedSince is specify this +// header value to operate only on a blob if it has been modified since the specified date/time. ifUnmodifiedSince is +// specify this header value to operate only on a blob if it has not been modified since the specified date/time. +// ifMatch is specify an ETag value to operate only on blobs with a matching value. ifNoneMatch is specify an ETag +// value to operate only on blobs without a matching value. ifTags is specify a SQL where clause on blob tags to +// operate only on blobs with a matching value. requestID is provides a client-generated, opaque value with a 1 KB +// character limit that is recorded in the analytics logs when storage analytics logging is enabled. +func (client pageBlobClient) GetPageRangesDiff(ctx context.Context, snapshot *string, timeout *int32, prevsnapshot *string, prevSnapshotURL *string, rangeParameter *string, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (*PageList, error) { if err := validate([]validation{ {targetValue: timeout, constraints: []constraint{{target: "timeout", name: null, rule: false, chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { return nil, err } - req, err := client.getPageRangesDiffPreparer(snapshot, timeout, prevsnapshot, rangeParameter, leaseID, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID) + req, err := client.getPageRangesDiffPreparer(snapshot, timeout, prevsnapshot, prevSnapshotURL, rangeParameter, leaseID, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, requestID) if err != nil { return nil, err } @@ -452,7 +510,7 @@ func (client pageBlobClient) GetPageRangesDiff(ctx context.Context, snapshot *st } // getPageRangesDiffPreparer prepares the GetPageRangesDiff request. -func (client pageBlobClient) getPageRangesDiffPreparer(snapshot *string, timeout *int32, prevsnapshot *string, rangeParameter *string, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) { +func (client pageBlobClient) getPageRangesDiffPreparer(snapshot *string, timeout *int32, prevsnapshot *string, prevSnapshotURL *string, rangeParameter *string, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (pipeline.Request, error) { req, err := pipeline.NewRequest("GET", client.url, nil) if err != nil { return req, pipeline.NewError(err, "failed to create request") @@ -469,6 +527,9 @@ func (client pageBlobClient) getPageRangesDiffPreparer(snapshot *string, timeout } params.Set("comp", "pagelist") req.URL.RawQuery = params.Encode() + if prevSnapshotURL != nil { + req.Header.Set("x-ms-previous-snapshot-url", *prevSnapshotURL) + } if rangeParameter != nil { req.Header.Set("x-ms-range", *rangeParameter) } @@ -487,6 +548,9 @@ func (client pageBlobClient) getPageRangesDiffPreparer(snapshot *string, timeout if ifNoneMatch != nil { req.Header.Set("If-None-Match", string(*ifNoneMatch)) } + if ifTags != nil { + req.Header.Set("x-ms-if-tags", *ifTags) + } req.Header.Set("x-ms-version", ServiceVersion) if requestID != nil { req.Header.Set("x-ms-client-request-id", *requestID) @@ -526,20 +590,28 @@ func (client pageBlobClient) getPageRangesDiffResponder(resp pipeline.Response) // see Setting // Timeouts for Blob Service Operations. leaseID is if specified, the operation only succeeds if the resource's -// lease is active and matches this ID. ifModifiedSince is specify this header value to operate only on a blob if it -// has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a -// blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value to operate only on -// blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. -// requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics -// logs when storage analytics logging is enabled. -func (client pageBlobClient) Resize(ctx context.Context, blobContentLength int64, timeout *int32, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*PageBlobResizeResponse, error) { +// lease is active and matches this ID. encryptionKey is optional. Specifies the encryption key to use to encrypt the +// data provided in the request. If not specified, encryption is performed with the root account encryption key. For +// more information, see Encryption at Rest for Azure Storage Services. encryptionKeySha256 is the SHA-256 hash of the +// provided encryption key. Must be provided if the x-ms-encryption-key header is provided. encryptionAlgorithm is the +// algorithm used to produce the encryption key hash. Currently, the only accepted value is "AES256". Must be provided +// if the x-ms-encryption-key header is provided. encryptionScope is optional. Version 2019-07-07 and later. Specifies +// the name of the encryption scope to use to encrypt the data provided in the request. If not specified, encryption is +// performed with the default account encryption scope. For more information, see Encryption at Rest for Azure Storage +// Services. ifModifiedSince is specify this header value to operate only on a blob if it has been modified since the +// specified date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been +// modified since the specified date/time. ifMatch is specify an ETag value to operate only on blobs with a matching +// value. ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. requestID is provides +// a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage +// analytics logging is enabled. +func (client pageBlobClient) Resize(ctx context.Context, blobContentLength int64, timeout *int32, leaseID *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*PageBlobResizeResponse, error) { if err := validate([]validation{ {targetValue: timeout, constraints: []constraint{{target: "timeout", name: null, rule: false, chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { return nil, err } - req, err := client.resizePreparer(blobContentLength, timeout, leaseID, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID) + req, err := client.resizePreparer(blobContentLength, timeout, leaseID, encryptionKey, encryptionKeySha256, encryptionAlgorithm, encryptionScope, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID) if err != nil { return nil, err } @@ -551,7 +623,7 @@ func (client pageBlobClient) Resize(ctx context.Context, blobContentLength int64 } // resizePreparer prepares the Resize request. -func (client pageBlobClient) resizePreparer(blobContentLength int64, timeout *int32, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) { +func (client pageBlobClient) resizePreparer(blobContentLength int64, timeout *int32, leaseID *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) { req, err := pipeline.NewRequest("PUT", client.url, nil) if err != nil { return req, pipeline.NewError(err, "failed to create request") @@ -565,6 +637,18 @@ func (client pageBlobClient) resizePreparer(blobContentLength int64, timeout *in if leaseID != nil { req.Header.Set("x-ms-lease-id", *leaseID) } + if encryptionKey != nil { + req.Header.Set("x-ms-encryption-key", *encryptionKey) + } + if encryptionKeySha256 != nil { + req.Header.Set("x-ms-encryption-key-sha256", *encryptionKeySha256) + } + if encryptionAlgorithm != EncryptionAlgorithmNone { + req.Header.Set("x-ms-encryption-algorithm", string(encryptionAlgorithm)) + } + if encryptionScope != nil { + req.Header.Set("x-ms-encryption-scope", *encryptionScope) + } if ifModifiedSince != nil { req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) } @@ -682,11 +766,19 @@ func (client pageBlobClient) updateSequenceNumberResponder(resp pipeline.Respons // // body is initial data body will be closed upon successful return. Callers should ensure closure when receiving an // error.contentLength is the length of the request. transactionalContentMD5 is specify the transactional md5 for the -// body, to be validated by the service. timeout is the timeout parameter is expressed in seconds. For more -// information, see Setting // Timeouts for Blob Service Operations. rangeParameter is return only the bytes of the blob in the specified // range. leaseID is if specified, the operation only succeeds if the resource's lease is active and matches this ID. +// encryptionKey is optional. Specifies the encryption key to use to encrypt the data provided in the request. If not +// specified, encryption is performed with the root account encryption key. For more information, see Encryption at +// Rest for Azure Storage Services. encryptionKeySha256 is the SHA-256 hash of the provided encryption key. Must be +// provided if the x-ms-encryption-key header is provided. encryptionAlgorithm is the algorithm used to produce the +// encryption key hash. Currently, the only accepted value is "AES256". Must be provided if the x-ms-encryption-key +// header is provided. encryptionScope is optional. Version 2019-07-07 and later. Specifies the name of the encryption +// scope to use to encrypt the data provided in the request. If not specified, encryption is performed with the default +// account encryption scope. For more information, see Encryption at Rest for Azure Storage Services. // ifSequenceNumberLessThanOrEqualTo is specify this header value to operate only on a blob if it has a sequence number // less than or equal to the specified. ifSequenceNumberLessThan is specify this header value to operate only on a blob // if it has a sequence number less than the specified. ifSequenceNumberEqualTo is specify this header value to operate @@ -694,9 +786,10 @@ func (client pageBlobClient) updateSequenceNumberResponder(resp pipeline.Respons // on a blob if it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to // operate only on a blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value // to operate only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs -// without a matching value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is -// recorded in the analytics logs when storage analytics logging is enabled. -func (client pageBlobClient) UploadPages(ctx context.Context, body io.ReadSeeker, contentLength int64, transactionalContentMD5 []byte, timeout *int32, rangeParameter *string, leaseID *string, ifSequenceNumberLessThanOrEqualTo *int64, ifSequenceNumberLessThan *int64, ifSequenceNumberEqualTo *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (*PageBlobUploadPagesResponse, error) { +// without a matching value. ifTags is specify a SQL where clause on blob tags to operate only on blobs with a matching +// value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the +// analytics logs when storage analytics logging is enabled. +func (client pageBlobClient) UploadPages(ctx context.Context, body io.ReadSeeker, contentLength int64, transactionalContentMD5 []byte, transactionalContentCrc64 []byte, timeout *int32, rangeParameter *string, leaseID *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, ifSequenceNumberLessThanOrEqualTo *int64, ifSequenceNumberLessThan *int64, ifSequenceNumberEqualTo *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (*PageBlobUploadPagesResponse, error) { if err := validate([]validation{ {targetValue: body, constraints: []constraint{{target: "body", name: null, rule: true, chain: nil}}}, @@ -705,7 +798,7 @@ func (client pageBlobClient) UploadPages(ctx context.Context, body io.ReadSeeker chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { return nil, err } - req, err := client.uploadPagesPreparer(body, contentLength, transactionalContentMD5, timeout, rangeParameter, leaseID, ifSequenceNumberLessThanOrEqualTo, ifSequenceNumberLessThan, ifSequenceNumberEqualTo, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, requestID) + req, err := client.uploadPagesPreparer(body, contentLength, transactionalContentMD5, transactionalContentCrc64, timeout, rangeParameter, leaseID, encryptionKey, encryptionKeySha256, encryptionAlgorithm, encryptionScope, ifSequenceNumberLessThanOrEqualTo, ifSequenceNumberLessThan, ifSequenceNumberEqualTo, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, requestID) if err != nil { return nil, err } @@ -717,7 +810,7 @@ func (client pageBlobClient) UploadPages(ctx context.Context, body io.ReadSeeker } // uploadPagesPreparer prepares the UploadPages request. -func (client pageBlobClient) uploadPagesPreparer(body io.ReadSeeker, contentLength int64, transactionalContentMD5 []byte, timeout *int32, rangeParameter *string, leaseID *string, ifSequenceNumberLessThanOrEqualTo *int64, ifSequenceNumberLessThan *int64, ifSequenceNumberEqualTo *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, requestID *string) (pipeline.Request, error) { +func (client pageBlobClient) uploadPagesPreparer(body io.ReadSeeker, contentLength int64, transactionalContentMD5 []byte, transactionalContentCrc64 []byte, timeout *int32, rangeParameter *string, leaseID *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, ifSequenceNumberLessThanOrEqualTo *int64, ifSequenceNumberLessThan *int64, ifSequenceNumberEqualTo *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (pipeline.Request, error) { req, err := pipeline.NewRequest("PUT", client.url, body) if err != nil { return req, pipeline.NewError(err, "failed to create request") @@ -732,12 +825,27 @@ func (client pageBlobClient) uploadPagesPreparer(body io.ReadSeeker, contentLeng if transactionalContentMD5 != nil { req.Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(transactionalContentMD5)) } + if transactionalContentCrc64 != nil { + req.Header.Set("x-ms-content-crc64", base64.StdEncoding.EncodeToString(transactionalContentCrc64)) + } if rangeParameter != nil { req.Header.Set("x-ms-range", *rangeParameter) } if leaseID != nil { req.Header.Set("x-ms-lease-id", *leaseID) } + if encryptionKey != nil { + req.Header.Set("x-ms-encryption-key", *encryptionKey) + } + if encryptionKeySha256 != nil { + req.Header.Set("x-ms-encryption-key-sha256", *encryptionKeySha256) + } + if encryptionAlgorithm != EncryptionAlgorithmNone { + req.Header.Set("x-ms-encryption-algorithm", string(encryptionAlgorithm)) + } + if encryptionScope != nil { + req.Header.Set("x-ms-encryption-scope", *encryptionScope) + } if ifSequenceNumberLessThanOrEqualTo != nil { req.Header.Set("x-ms-if-sequence-number-le", strconv.FormatInt(*ifSequenceNumberLessThanOrEqualTo, 10)) } @@ -759,6 +867,9 @@ func (client pageBlobClient) uploadPagesPreparer(body io.ReadSeeker, contentLeng if ifNoneMatch != nil { req.Header.Set("If-None-Match", string(*ifNoneMatch)) } + if ifTags != nil { + req.Header.Set("x-ms-if-tags", *ifTags) + } req.Header.Set("x-ms-version", ServiceVersion) if requestID != nil { req.Header.Set("x-ms-client-request-id", *requestID) @@ -785,32 +896,41 @@ func (client pageBlobClient) uploadPagesResponder(resp pipeline.Response) (pipel // length of this range should match the ContentLength header and x-ms-range/Range destination range header. // contentLength is the length of the request. rangeParameter is the range of bytes to which the source range would be // written. The range should be 512 aligned and range-end is required. sourceContentMD5 is specify the md5 calculated +// for the range of bytes that must be read from the copy source. sourceContentcrc64 is specify the crc64 calculated // for the range of bytes that must be read from the copy source. timeout is the timeout parameter is expressed in // seconds. For more information, see Setting -// Timeouts for Blob Service Operations. leaseID is if specified, the operation only succeeds if the resource's -// lease is active and matches this ID. ifSequenceNumberLessThanOrEqualTo is specify this header value to operate only -// on a blob if it has a sequence number less than or equal to the specified. ifSequenceNumberLessThan is specify this -// header value to operate only on a blob if it has a sequence number less than the specified. ifSequenceNumberEqualTo -// is specify this header value to operate only on a blob if it has the specified sequence number. ifModifiedSince is -// specify this header value to operate only on a blob if it has been modified since the specified date/time. -// ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been modified since the -// specified date/time. ifMatch is specify an ETag value to operate only on blobs with a matching value. ifNoneMatch is -// specify an ETag value to operate only on blobs without a matching value. sourceIfModifiedSince is specify this -// header value to operate only on a blob if it has been modified since the specified date/time. -// sourceIfUnmodifiedSince is specify this header value to operate only on a blob if it has not been modified since the -// specified date/time. sourceIfMatch is specify an ETag value to operate only on blobs with a matching value. -// sourceIfNoneMatch is specify an ETag value to operate only on blobs without a matching value. requestID is provides -// a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage -// analytics logging is enabled. -func (client pageBlobClient) UploadPagesFromURL(ctx context.Context, sourceURL string, sourceRange string, contentLength int64, rangeParameter string, sourceContentMD5 []byte, timeout *int32, leaseID *string, ifSequenceNumberLessThanOrEqualTo *int64, ifSequenceNumberLessThan *int64, ifSequenceNumberEqualTo *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, requestID *string) (*PageBlobUploadPagesFromURLResponse, error) { +// Timeouts for Blob Service Operations. encryptionKey is optional. Specifies the encryption key to use to encrypt +// the data provided in the request. If not specified, encryption is performed with the root account encryption key. +// For more information, see Encryption at Rest for Azure Storage Services. encryptionKeySha256 is the SHA-256 hash of +// the provided encryption key. Must be provided if the x-ms-encryption-key header is provided. encryptionAlgorithm is +// the algorithm used to produce the encryption key hash. Currently, the only accepted value is "AES256". Must be +// provided if the x-ms-encryption-key header is provided. encryptionScope is optional. Version 2019-07-07 and later. +// Specifies the name of the encryption scope to use to encrypt the data provided in the request. If not specified, +// encryption is performed with the default account encryption scope. For more information, see Encryption at Rest for +// Azure Storage Services. leaseID is if specified, the operation only succeeds if the resource's lease is active and +// matches this ID. ifSequenceNumberLessThanOrEqualTo is specify this header value to operate only on a blob if it has +// a sequence number less than or equal to the specified. ifSequenceNumberLessThan is specify this header value to +// operate only on a blob if it has a sequence number less than the specified. ifSequenceNumberEqualTo is specify this +// header value to operate only on a blob if it has the specified sequence number. ifModifiedSince is specify this +// header value to operate only on a blob if it has been modified since the specified date/time. ifUnmodifiedSince is +// specify this header value to operate only on a blob if it has not been modified since the specified date/time. +// ifMatch is specify an ETag value to operate only on blobs with a matching value. ifNoneMatch is specify an ETag +// value to operate only on blobs without a matching value. ifTags is specify a SQL where clause on blob tags to +// operate only on blobs with a matching value. sourceIfModifiedSince is specify this header value to operate only on a +// blob if it has been modified since the specified date/time. sourceIfUnmodifiedSince is specify this header value to +// operate only on a blob if it has not been modified since the specified date/time. sourceIfMatch is specify an ETag +// value to operate only on blobs with a matching value. sourceIfNoneMatch is specify an ETag value to operate only on +// blobs without a matching value. requestID is provides a client-generated, opaque value with a 1 KB character limit +// that is recorded in the analytics logs when storage analytics logging is enabled. +func (client pageBlobClient) UploadPagesFromURL(ctx context.Context, sourceURL string, sourceRange string, contentLength int64, rangeParameter string, sourceContentMD5 []byte, sourceContentcrc64 []byte, timeout *int32, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, leaseID *string, ifSequenceNumberLessThanOrEqualTo *int64, ifSequenceNumberLessThan *int64, ifSequenceNumberEqualTo *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, requestID *string) (*PageBlobUploadPagesFromURLResponse, error) { if err := validate([]validation{ {targetValue: timeout, constraints: []constraint{{target: "timeout", name: null, rule: false, chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { return nil, err } - req, err := client.uploadPagesFromURLPreparer(sourceURL, sourceRange, contentLength, rangeParameter, sourceContentMD5, timeout, leaseID, ifSequenceNumberLessThanOrEqualTo, ifSequenceNumberLessThan, ifSequenceNumberEqualTo, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatch, sourceIfNoneMatch, requestID) + req, err := client.uploadPagesFromURLPreparer(sourceURL, sourceRange, contentLength, rangeParameter, sourceContentMD5, sourceContentcrc64, timeout, encryptionKey, encryptionKeySha256, encryptionAlgorithm, encryptionScope, leaseID, ifSequenceNumberLessThanOrEqualTo, ifSequenceNumberLessThan, ifSequenceNumberEqualTo, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatch, sourceIfNoneMatch, requestID) if err != nil { return nil, err } @@ -822,7 +942,7 @@ func (client pageBlobClient) UploadPagesFromURL(ctx context.Context, sourceURL s } // uploadPagesFromURLPreparer prepares the UploadPagesFromURL request. -func (client pageBlobClient) uploadPagesFromURLPreparer(sourceURL string, sourceRange string, contentLength int64, rangeParameter string, sourceContentMD5 []byte, timeout *int32, leaseID *string, ifSequenceNumberLessThanOrEqualTo *int64, ifSequenceNumberLessThan *int64, ifSequenceNumberEqualTo *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, requestID *string) (pipeline.Request, error) { +func (client pageBlobClient) uploadPagesFromURLPreparer(sourceURL string, sourceRange string, contentLength int64, rangeParameter string, sourceContentMD5 []byte, sourceContentcrc64 []byte, timeout *int32, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, leaseID *string, ifSequenceNumberLessThanOrEqualTo *int64, ifSequenceNumberLessThan *int64, ifSequenceNumberEqualTo *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, requestID *string) (pipeline.Request, error) { req, err := pipeline.NewRequest("PUT", client.url, nil) if err != nil { return req, pipeline.NewError(err, "failed to create request") @@ -838,8 +958,23 @@ func (client pageBlobClient) uploadPagesFromURLPreparer(sourceURL string, source if sourceContentMD5 != nil { req.Header.Set("x-ms-source-content-md5", base64.StdEncoding.EncodeToString(sourceContentMD5)) } + if sourceContentcrc64 != nil { + req.Header.Set("x-ms-source-content-crc64", base64.StdEncoding.EncodeToString(sourceContentcrc64)) + } req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) req.Header.Set("x-ms-range", rangeParameter) + if encryptionKey != nil { + req.Header.Set("x-ms-encryption-key", *encryptionKey) + } + if encryptionKeySha256 != nil { + req.Header.Set("x-ms-encryption-key-sha256", *encryptionKeySha256) + } + if encryptionAlgorithm != EncryptionAlgorithmNone { + req.Header.Set("x-ms-encryption-algorithm", string(encryptionAlgorithm)) + } + if encryptionScope != nil { + req.Header.Set("x-ms-encryption-scope", *encryptionScope) + } if leaseID != nil { req.Header.Set("x-ms-lease-id", *leaseID) } @@ -864,6 +999,9 @@ func (client pageBlobClient) uploadPagesFromURLPreparer(sourceURL string, source if ifNoneMatch != nil { req.Header.Set("If-None-Match", string(*ifNoneMatch)) } + if ifTags != nil { + req.Header.Set("x-ms-if-tags", *ifTags) + } if sourceIfModifiedSince != nil { req.Header.Set("x-ms-source-if-modified-since", (*sourceIfModifiedSince).In(gmt).Format(time.RFC1123)) } diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_service.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_service.go index 6c896b7..daff580 100644 --- a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_service.go +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_service.go @@ -25,6 +25,98 @@ func newServiceClient(url url.URL, p pipeline.Pipeline) serviceClient { return serviceClient{newManagementClient(url, p)} } +// FilterBlobs the Filter Blobs operation enables callers to list blobs across all containers whose tags match a given +// search expression. Filter blobs searches across all containers within a storage account but can be scoped within +// the expression to a single container. +// +// timeout is the timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. requestID is provides a client-generated, opaque value with a 1 KB +// character limit that is recorded in the analytics logs when storage analytics logging is enabled. where is filters +// the results to return only to return only blobs whose tags match the specified expression. marker is a string value +// that identifies the portion of the list of containers to be returned with the next listing operation. The operation +// returns the NextMarker value within the response body if the listing operation did not return all containers +// remaining to be listed with the current page. The NextMarker value can be used as the value for the marker parameter +// in a subsequent call to request the next page of list items. The marker value is opaque to the client. maxresults is +// specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a +// value greater than 5000, the server will return up to 5000 items. Note that if the listing operation crosses a +// partition boundary, then the service will return a continuation token for retrieving the remainder of the results. +// For this reason, it is possible that the service will return fewer results than specified by maxresults, or than the +// default of 5000. +func (client serviceClient) FilterBlobs(ctx context.Context, timeout *int32, requestID *string, where *string, marker *string, maxresults *int32) (*FilterBlobSegment, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}, + {targetValue: maxresults, + constraints: []constraint{{target: "maxresults", name: null, rule: false, + chain: []constraint{{target: "maxresults", name: inclusiveMinimum, rule: 1, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.filterBlobsPreparer(timeout, requestID, where, marker, maxresults) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.filterBlobsResponder}, req) + if err != nil { + return nil, err + } + return resp.(*FilterBlobSegment), err +} + +// filterBlobsPreparer prepares the FilterBlobs request. +func (client serviceClient) filterBlobsPreparer(timeout *int32, requestID *string, where *string, marker *string, maxresults *int32) (pipeline.Request, error) { + req, err := pipeline.NewRequest("GET", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + if where != nil && len(*where) > 0 { + params.Set("where", *where) + } + if marker != nil && len(*marker) > 0 { + params.Set("marker", *marker) + } + if maxresults != nil { + params.Set("maxresults", strconv.FormatInt(int64(*maxresults), 10)) + } + params.Set("comp", "blobs") + req.URL.RawQuery = params.Encode() + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + return req, nil +} + +// filterBlobsResponder handles the response to the FilterBlobs request. +func (client serviceClient) filterBlobsResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK) + if resp == nil { + return nil, err + } + result := &FilterBlobSegment{rawResponse: resp.Response()} + if err != nil { + return result, err + } + defer resp.Response().Body.Close() + b, err := ioutil.ReadAll(resp.Response().Body) + if err != nil { + return result, err + } + if len(b) > 0 { + b = removeBOM(b) + err = xml.Unmarshal(b, result) + if err != nil { + return result, NewResponseError(err, resp.Response(), "failed to unmarshal response body") + } + } + return result, nil +} + // GetAccountInfo returns the sku name and account kind func (client serviceClient) GetAccountInfo(ctx context.Context) (*ServiceGetAccountInfoResponse, error) { req, err := client.getAccountInfoPreparer() @@ -203,7 +295,7 @@ func (client serviceClient) getStatisticsResponder(resp pipeline.Response) (pipe return result, nil } -// GetUserDelegationKey retrieves a user delgation key for the Blob service. This is only a valid operation when using +// GetUserDelegationKey retrieves a user delegation key for the Blob service. This is only a valid operation when using // bearer token authentication. // // timeout is the timeout parameter is expressed in seconds. For more information, see Setting // Timeouts for Blob Service Operations. requestID is provides a client-generated, opaque value with a 1 KB // character limit that is recorded in the analytics logs when storage analytics logging is enabled. -func (client serviceClient) ListContainersSegment(ctx context.Context, prefix *string, marker *string, maxresults *int32, include ListContainersIncludeType, timeout *int32, requestID *string) (*ListContainersSegmentResponse, error) { +func (client serviceClient) ListContainersSegment(ctx context.Context, prefix *string, marker *string, maxresults *int32, include []ListContainersIncludeType, timeout *int32, requestID *string) (*ListContainersSegmentResponse, error) { if err := validate([]validation{ {targetValue: maxresults, constraints: []constraint{{target: "maxresults", name: null, rule: false, @@ -322,7 +414,7 @@ func (client serviceClient) ListContainersSegment(ctx context.Context, prefix *s } // listContainersSegmentPreparer prepares the ListContainersSegment request. -func (client serviceClient) listContainersSegmentPreparer(prefix *string, marker *string, maxresults *int32, include ListContainersIncludeType, timeout *int32, requestID *string) (pipeline.Request, error) { +func (client serviceClient) listContainersSegmentPreparer(prefix *string, marker *string, maxresults *int32, include []ListContainersIncludeType, timeout *int32, requestID *string) (pipeline.Request, error) { req, err := pipeline.NewRequest("GET", client.url, nil) if err != nil { return req, pipeline.NewError(err, "failed to create request") @@ -337,8 +429,8 @@ func (client serviceClient) listContainersSegmentPreparer(prefix *string, marker if maxresults != nil { params.Set("maxresults", strconv.FormatInt(int64(*maxresults), 10)) } - if include != ListContainersIncludeNone { - params.Set("include", string(include)) + if include != nil && len(include) > 0 { + params.Set("include", joinConst(include, ",")) } if timeout != nil { params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) @@ -465,3 +557,62 @@ func (client serviceClient) setPropertiesResponder(resp pipeline.Response) (pipe resp.Response().Body.Close() return &ServiceSetPropertiesResponse{rawResponse: resp.Response()}, err } + +// SubmitBatch the Batch operation allows multiple API calls to be embedded into a single HTTP request. +// +// body is initial data body will be closed upon successful return. Callers should ensure closure when receiving an +// error.contentLength is the length of the request. multipartContentType is required. The value of this header must be +// multipart/mixed with a batch boundary. Example header value: multipart/mixed; boundary=batch_ timeout is the +// timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. requestID is provides a client-generated, opaque value with a 1 KB +// character limit that is recorded in the analytics logs when storage analytics logging is enabled. +func (client serviceClient) SubmitBatch(ctx context.Context, body io.ReadSeeker, contentLength int64, multipartContentType string, timeout *int32, requestID *string) (*SubmitBatchResponse, error) { + if err := validate([]validation{ + {targetValue: body, + constraints: []constraint{{target: "body", name: null, rule: true, chain: nil}}}, + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.submitBatchPreparer(body, contentLength, multipartContentType, timeout, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.submitBatchResponder}, req) + if err != nil { + return nil, err + } + return resp.(*SubmitBatchResponse), err +} + +// submitBatchPreparer prepares the SubmitBatch request. +func (client serviceClient) submitBatchPreparer(body io.ReadSeeker, contentLength int64, multipartContentType string, timeout *int32, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("POST", client.url, body) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("comp", "batch") + req.URL.RawQuery = params.Encode() + req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) + req.Header.Set("Content-Type", multipartContentType) + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + return req, nil +} + +// submitBatchResponder handles the response to the SubmitBatch request. +func (client serviceClient) submitBatchResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK) + if resp == nil { + return nil, err + } + return &SubmitBatchResponse{rawResponse: resp.Response()}, err +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_version.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_version.go index 4b49c18..200b2f5 100644 --- a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_version.go +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_version.go @@ -5,7 +5,7 @@ package azblob // UserAgent returns the UserAgent string to use when sending http.Requests. func UserAgent() string { - return "Azure-SDK-For-Go/0.0.0 azblob/2018-11-09" + return "Azure-SDK-For-Go/0.0.0 azblob/2019-12-12" } // Version returns the semantic version (see http://semver.org) of the client. diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_response_helpers.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_response_helpers.go index 8c7f594..5c086c5 100644 --- a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_response_helpers.go +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_response_helpers.go @@ -45,7 +45,7 @@ func (dr downloadResponse) NewHTTPHeaders() BlobHTTPHeaders { /////////////////////////////////////////////////////////////////////////////// -// DownloadResponse wraps AutoRest generated downloadResponse and helps to provide info for retry. +// DownloadResponse wraps AutoRest generated DownloadResponse and helps to provide info for retry. type DownloadResponse struct { r *downloadResponse ctx context.Context @@ -63,11 +63,9 @@ func (r *DownloadResponse) Body(o RetryReaderOptions) io.ReadCloser { } return NewRetryReader(r.ctx, r.Response(), r.getInfo, o, func(ctx context.Context, getInfo HTTPGetterInfo) (*http.Response, error) { - resp, err := r.b.Download(ctx, getInfo.Offset, getInfo.Count, - BlobAccessConditions{ - ModifiedAccessConditions: ModifiedAccessConditions{IfMatch: getInfo.ETag}, - }, - false) + resp, err := r.b.Download(ctx, getInfo.Offset, getInfo.Count, BlobAccessConditions{ + ModifiedAccessConditions: ModifiedAccessConditions{IfMatch: getInfo.ETag}, + }, false, o.ClientProvidedKeyOptions) if err != nil { return nil, err } diff --git a/vendor/github.com/Azure/go-autorest/.gitignore b/vendor/github.com/Azure/go-autorest/.gitignore new file mode 100644 index 0000000..3350aaf --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/.gitignore @@ -0,0 +1,32 @@ +# The standard Go .gitignore file follows. (Sourced from: github.com/github/gitignore/master/Go.gitignore) +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test +.DS_Store +.idea/ +.vscode/ + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof + +# go-autorest specific +vendor/ +autorest/azure/example/example diff --git a/vendor/github.com/Azure/go-autorest/CHANGELOG.md b/vendor/github.com/Azure/go-autorest/CHANGELOG.md new file mode 100644 index 0000000..d1f596b --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/CHANGELOG.md @@ -0,0 +1,1004 @@ +# CHANGELOG + +## v14.2.0 + +- Added package comment to make `github.com/Azure/go-autorest` importable. + +## v14.1.1 + +### Bug Fixes + +- Change `x-ms-authorization-auxiliary` header value separator to comma. + +## v14.1.0 + +### New Features + +- Added `azure.SetEnvironment()` that will update the global environments map with the specified values. + +## v14.0.1 + +### Bug Fixes + +- Fix race condition when refreshing token. +- Fixed some tests to work with Go 1.14. + +## v14.0.0 + +## Breaking Changes + +- By default, the `DoRetryForStatusCodes` functions will no longer infinitely retry a request when the response returns an HTTP status code of 429 (StatusTooManyRequests). To opt in to the old behavior set `autorest.Count429AsRetry` to `false`. + +## New Features + +- Variable `autorest.Max429Delay` can be used to control the maximum delay between retries when a 429 is received with no `Retry-After` header. The default is zero which means there is no cap. + +## v13.4.0 + +## New Features + +- Added field `SendDecorators` to the `Client` type. This can be used to specify a custom chain of SendDecorators per client. +- Added method `Client.Send()` which includes logic for selecting the preferred chain of SendDecorators. + +## v13.3.3 + +### Bug Fixes + +- Fixed connection leak when retrying requests. +- Enabled exponential back-off with a 2-minute cap when retrying on 429. +- Fixed some cases where errors were inadvertently dropped. + +## v13.3.2 + +### Bug Fixes + +- Updated `autorest.AsStringSlice()` to convert slice elements to their string representation. + +## v13.3.1 + +- Updated external dependencies. + +### Bug Fixes + +## v13.3.0 + +### New Features + +- Added support for shared key and shared access signature token authorization. + - `autorest.NewSharedKeyAuthorizer()` and dependent types. + - `autorest.NewSASTokenAuthorizer()` and dependent types. +- Added `ServicePrincipalToken.SetCustomRefresh()` so a custom refresh function can be invoked when a token has expired. + +### Bug Fixes + +- Fixed `cli.AccessTokensPath()` to respect `AZURE_CONFIG_DIR` when set. +- Support parsing error messages in XML responses. + +## v13.2.0 + +### New Features + +- Added the following functions to replace their versions that don't take a context. + - `adal.InitiateDeviceAuthWithContext()` + - `adal.CheckForUserCompletionWithContext()` + - `adal.WaitForUserCompletionWithContext()` + +## v13.1.0 + +### New Features + +- Added support for MSI authentication on Azure App Service and Azure Functions. + +## v13.0.2 + +### Bug Fixes + +- Always retry a request even if the sender returns a non-nil error. + +## v13.0.1 + +## Bug Fixes + +- Fixed `autorest.WithQueryParameters()` so that it properly encodes multi-value query parameters. + +## v13.0.0 + +## Breaking Changes + +The `tracing` package has been rewritten to provide a common interface for consumers to wire in the tracing package of their choice. +What this means is that by default no tracing provider will be compiled into your program and setting the `AZURE_SDK_TRACING_ENABLED` +environment variable will have no effect. To enable this previous behavior you must now add the following import to your source file. +```go + import _ "github.com/Azure/go-autorest/tracing/opencensus" +``` +The APIs required by autorest-generated code have remained but some APIs have been removed and new ones added. +The following APIs and variables have been removed (the majority of them were moved to the `opencensus` package). +- tracing.Transport +- tracing.Enable() +- tracing.EnableWithAIForwarding() +- tracing.Disable() + +The following APIs and types have been added +- tracing.Tracer +- tracing.Register() + +To hook up a tracer simply call `tracing.Register()` passing in a type that satisfies the `tracing.Tracer` interface. + +## v12.4.3 + +### Bug Fixes + +- `autorest.MultiTenantServicePrincipalTokenAuthorizer` will now properly add its auxiliary bearer tokens. + +## v12.4.2 + +### Bug Fixes + +- Improvements to the fixes made in v12.4.1. + - Remove `override` stanza from Gopkg.toml and `replace` directive from go.mod as they don't apply when being consumed as a dependency. + - Switched to latest version of `ocagent` that still depends on protobuf v1.2. + - Add indirect dependencies to the `required` clause with matching `constraint` stanzas so that `dep` dependencies match go.sum. + +## v12.4.1 + +### Bug Fixes + +- Updated OpenCensus and OCAgent versions to versions that don't depend on v1.3+ of protobuf as it was breaking kubernetes. +- Pinned opencensus-proto to a version that's compatible with our versions of OpenCensus and OCAgent. + +## v12.4.0 + +### New Features + +- Added `autorest.WithPrepareDecorators` and `autorest.GetPrepareDecorators` for adding and retrieving a custom chain of PrepareDecorators to the provided context. + +## v12.3.0 + +### New Features + +- Support for multi-tenant via x-ms-authorization-auxiliary header has been added for client credentials with + secret scenario; this basically bundles multiple OAuthConfig and ServicePrincipalToken types into corresponding + MultiTenant* types along with a new authorizer that adds the primary and auxiliary token headers to the reqest. + The authenticaion helpers have been updated to support this scenario; if environment var AZURE_AUXILIARY_TENANT_IDS + is set with a semicolon delimited list of tenants the multi-tenant codepath will kick in to create the appropriate authorizer. + See `adal.NewMultiTenantOAuthConfig`, `adal.NewMultiTenantServicePrincipalToken` and `autorest.NewMultiTenantServicePrincipalTokenAuthorizer` + along with their supporting types and methods. +- Added `autorest.WithSendDecorators` and `autorest.GetSendDecorators` for adding and retrieving a custom chain of SendDecorators to the provided context. +- Added `autorest.DoRetryForStatusCodesWithCap` and `autorest.DelayForBackoffWithCap` to enforce an upper bound on the duration between retries. + +## v12.2.0 + +### New Features + +- Added `autorest.WithXML`, `autorest.AsMerge`, `autorest.WithBytes` preparer decorators. +- Added `autorest.ByUnmarshallingBytes` response decorator. +- Added `Response.IsHTTPStatus` and `Response.HasHTTPStatus` helper methods for inspecting HTTP status code in `autorest.Response` types. + +### Bug Fixes + +- `autorest.DelayWithRetryAfter` now supports HTTP-Dates in the `Retry-After` header and is not limited to just 429 status codes. + +## v12.1.0 + +### New Features + +- Added `to.ByteSlicePtr()`. +- Added blob/queue storage resource ID to `azure.ResourceIdentifier`. + +## v12.0.0 + +### Breaking Changes + +In preparation for modules the following deprecated content has been removed. + + - async.NewFuture() + - async.Future.Done() + - async.Future.WaitForCompletion() + - async.DoPollForAsynchronous() + - The `utils` package + - validation.NewErrorWithValidationError() + - The `version` package + +## v11.9.0 + +### New Features + +- Add `ResourceIdentifiers` field to `azure.Environment` containing resource IDs for public and sovereign clouds. + +## v11.8.0 + +### New Features + +- Added `autorest.NewClientWithOptions()` to support endpoints that require free renegotiation. + +## v11.7.1 + +### Bug Fixes + +- Fix missing support for http(s) proxy when using the default sender. + +## v11.7.0 + +### New Features + +- Added methods to obtain a ServicePrincipalToken on the various credential configuration types in the `auth` package. + +## v11.6.1 + +### Bug Fixes + +- Fix ACR DNS endpoint for government clouds. +- Add Cosmos DB DNS endpoints. +- Update dependencies to resolve build breaks in OpenCensus. + +## v11.6.0 + +### New Features + +- Added type `autorest.BasicAuthorizer` to support Basic authentication. + +## v11.5.2 + +### Bug Fixes + +- Fixed `GetTokenFromCLI` did not work with zsh. + +## v11.5.1 + +### Bug Fixes + +- In `Client.sender()` set the minimum TLS version on HTTP clients to 1.2. + +## v11.5.0 + +### New Features + +- The `auth` package has been refactored so that the environment and file settings are now available. +- The methods used in `auth.NewAuthorizerFromEnvironment()` are now exported so that custom authorization chains can be created. +- Added support for certificate authorization for file-based config. + +## v11.4.0 + +### New Features + +- Added `adal.AddToUserAgent()` so callers can append custom data to the user-agent header used for ADAL requests. +- Exported `adal.UserAgent()` for parity with `autorest.Client`. + +## v11.3.2 + +### Bug Fixes + +- In `Future.WaitForCompletionRef()` if the provided context has a deadline don't add the default deadline. + +## v11.3.1 + +### Bug Fixes + +- For an LRO PUT operation the final GET URL was incorrectly set to the Location polling header in some cases. + +## v11.3.0 + +### New Features + +- Added method `ServicePrincipalToken()` to `DeviceFlowConfig` type. + +## v11.2.8 + +### Bug Fixes + +- Deprecate content in the `version` package. The functionality has been superseded by content in the `autorest` package. + +## v11.2.7 + +### Bug Fixes + +- Fix environment variable name for enabling tracing from `AZURE_SDK_TRACING_ENABELD` to `AZURE_SDK_TRACING_ENABLED`. + Note that for backward compatibility reasons, both will work until the next major version release of the package. + +## v11.2.6 + +### Bug Fixes + +- If zero bytes are read from a polling response body don't attempt to unmarshal them. + +## v11.2.5 + +### Bug Fixes + +- Removed race condition in `autorest.DoRetryForStatusCodes`. + +## v11.2.4 + +### Bug Fixes + +- Function `cli.ProfilePath` now respects environment `AZURE_CONFIG_DIR` if available. + +## v11.2.1 + +NOTE: Versions of Go prior to 1.10 have been removed from CI as they no +longer work with golint. + +### Bug Fixes + +- Method `MSIConfig.Authorizer` now supports user-assigned identities. +- The adal package now reports its own user-agent string. + +## v11.2.0 + +### New Features + +- Added `tracing` package that enables instrumentation of HTTP and API calls. + Setting the env variable `AZURE_SDK_TRACING_ENABLED` or calling `tracing.Enable` + will start instrumenting the code for metrics and traces. + Additionally, setting the env variable `OCAGENT_TRACE_EXPORTER_ENDPOINT` or + calling `tracing.EnableWithAIForwarding` will start the instrumentation and connect to an + App Insights Local Forwarder that is needs to be running. Note that if the + AI Local Forwarder is not running tracking will still be enabled. + By default, instrumentation is disabled. Once enabled, instrumentation can also + be programatically disabled by calling `Disable`. +- Added `DoneWithContext` call for checking LRO status. `Done` has been deprecated. + +### Bug Fixes + +- Don't use the initial request's context for LRO polling. +- Don't override the `refreshLock` and the `http.Client` when unmarshalling `ServicePrincipalToken` if + it is already set. + +## v11.1.1 + +### Bug Fixes + +- When creating a future always include the polling tracker even if there's a failure; this allows the underlying response to be obtained by the caller. + +## v11.1.0 + +### New Features + +- Added `auth.NewAuthorizerFromCLI` to create an authorizer configured from the Azure 2.0 CLI. +- Added `adal.NewOAuthConfigWithAPIVersion` to create an OAuthConfig with the specified API version. + +## v11.0.1 + +### New Features + +- Added `x5c` header to client assertion for certificate Issuer+Subject Name authentication. + +## v11.0.0 + +### Breaking Changes + +- To handle differences between ADFS and AAD the following fields have had their types changed from `string` to `json.Number` + - ExpiresIn + - ExpiresOn + - NotBefore + +### New Features + +- Added `auth.NewAuthorizerFromFileWithResource` to create an authorizer from the config file with the specified resource. +- Setting a client's `PollingDuration` to zero will use the provided context to control a LRO's polling duration. + +## v10.15.5 + +### Bug Fixes + +- In `DoRetryForStatusCodes`, if a request's context is cancelled return the last response. + +## v10.15.4 + +### Bug Fixes + +- If a polling operation returns a failure status code return the associated error. + +## v10.15.3 + +### Bug Fixes + +- Initialize the polling URL and method for an LRO tracker on each iteration, favoring the Azure-AsyncOperation header. + +## v10.15.2 + +### Bug Fixes + +- Use fmt.Fprint when printing request/response so that any escape sequences aren't treated as format specifiers. + +## v10.15.1 + +### Bug Fixes + +- If an LRO API returns a `Failed` provisioning state in the initial response return an error at that point so the caller doesn't have to poll. +- For failed LROs without an OData v4 error include the response body in the error's `AdditionalInfo` field to aid in diagnosing the failure. + +## v10.15.0 + +### New Features + +- Add initial support for request/response logging via setting environment variables. + Setting `AZURE_GO_SDK_LOG_LEVEL` to `LogInfo` will log request/response + without their bodies. To include the bodies set the log level to `LogDebug`. + By default the logger writes to strerr, however it can also write to stdout or a file + if specified in `AZURE_GO_SDK_LOG_FILE`. Note that if the specified file + already exists it will be truncated. + IMPORTANT: by default the logger will redact the Authorization and Ocp-Apim-Subscription-Key + headers. Any other secrets will _not_ be redacted. + +## v10.14.0 + +### New Features + +- Added package version that contains version constants and user-agent data. + +### Bug Fixes + +- Add the user-agent to token requests. + +## v10.13.0 + +- Added support for additionalInfo in ServiceError type. + +## v10.12.0 + +### New Features + +- Added field ServicePrincipalToken.MaxMSIRefreshAttempts to configure the maximun number of attempts to refresh an MSI token. + +## v10.11.4 + +### Bug Fixes + +- If an LRO returns http.StatusOK on the initial response with no async headers return the response body from Future.GetResult(). +- If there is no "final GET URL" return an error from Future.GetResult(). + +## v10.11.3 + +### Bug Fixes + +- In IMDS retry logic, if we don't receive a response don't retry. + - Renamed the retry function so it's clear it's meant for IMDS only. +- For error response bodies that aren't OData-v4 compliant stick the raw JSON in the ServiceError.Details field so the information isn't lost. + - Also add the raw HTTP response to the DetailedResponse. +- Removed superfluous wrapping of response error in azure.DoRetryWithRegistration(). + +## v10.11.2 + +### Bug Fixes + +- Validation for integers handles int and int64 types. + +## v10.11.1 + +### Bug Fixes + +- Adding User information to authorization config as parsed from CLI cache. + +## v10.11.0 + +### New Features + +- Added NewServicePrincipalTokenFromManualTokenSecret for creating a new SPT using a manual token and secret +- Added method ServicePrincipalToken.MarshalTokenJSON() to marshall the inner Token + +## v10.10.0 + +### New Features + +- Most ServicePrincipalTokens can now be marshalled/unmarshall to/from JSON (ServicePrincipalCertificateSecret and ServicePrincipalMSISecret are not supported). +- Added method ServicePrincipalToken.SetRefreshCallbacks(). + +## v10.9.2 + +### Bug Fixes + +- Refreshing a refresh token obtained from a web app authorization code now works. + +## v10.9.1 + +### Bug Fixes + +- The retry logic for MSI token requests now uses exponential backoff per the guidelines. +- IsTemporaryNetworkError() will return true for errors that don't implement the net.Error interface. + +## v10.9.0 + +### Deprecated Methods + +| Old Method | New Method | +| -------------------------: | :---------------------------: | +| azure.NewFuture() | azure.NewFutureFromResponse() | +| Future.WaitForCompletion() | Future.WaitForCompletionRef() | + +### New Features + +- Added azure.NewFutureFromResponse() for creating a Future from the initial response from an async operation. +- Added Future.GetResult() for making the final GET call to retrieve the result from an async operation. + +### Bug Fixes + +- Some futures failed to return their results, this should now be fixed. + +## v10.8.2 + +### Bug Fixes + +- Add nil-gaurd to token retry logic. + +## v10.8.1 + +### Bug Fixes + +- Return a TokenRefreshError if the sender fails on the initial request. +- Don't retry on non-temporary network errors. + +## v10.8.0 + +- Added NewAuthorizerFromEnvironmentWithResource() helper function. + +## v10.7.0 + +### New Features + +- Added \*WithContext() methods to ADAL token refresh operations. + +## v10.6.2 + +- Fixed a bug on device authentication. + +## v10.6.1 + +- Added retries to MSI token get request. + +## v10.6.0 + +- Changed MSI token implementation. Now, the token endpoint is the IMDS endpoint. + +## v10.5.1 + +### Bug Fixes + +- `DeviceFlowConfig.Authorizer()` now prints the device code message when running `go test`. `-v` flag is required. + +## v10.5.0 + +### New Features + +- Added NewPollingRequestWithContext() for use with polling asynchronous operations. + +### Bug Fixes + +- Make retry logic use the request's context instead of the deprecated Cancel object. + +## v10.4.0 + +### New Features + +- Added helper for parsing Azure Resource ID's. +- Added deprecation message to utils.GetEnvVarOrExit() + +## v10.3.0 + +### New Features + +- Added EnvironmentFromURL method to load an Environment from a given URL. This function is particularly useful in the private and hybrid Cloud model, where one may define their own endpoints +- Added TokenAudience endpoint to Environment structure. This is useful in private and hybrid cloud models where TokenAudience endpoint can be different from ResourceManagerEndpoint + +## v10.2.0 + +### New Features + +- Added endpoints for batch management. + +## v10.1.3 + +### Bug Fixes + +- In Client.Do() invoke WithInspection() last so that it will inspect WithAuthorization(). +- Fixed authorization methods to invoke p.Prepare() first, aligning them with the other preparers. + +## v10.1.2 + +- Corrected comment for auth.NewAuthorizerFromFile() function. + +## v10.1.1 + +- Updated version number to match current release. + +## v10.1.0 + +### New Features + +- Expose the polling URL for futures. + +### Bug Fixes + +- Add validation.NewErrorWithValidationError back to prevent breaking changes (it is deprecated). + +## v10.0.0 + +### New Features + +- Added target and innererror fields to ServiceError to comply with OData v4 spec. +- The Done() method on futures will now return a ServiceError object when available (it used to return a partial value of such errors). +- Added helper methods for obtaining authorizers. +- Expose the polling URL for futures. + +### Bug Fixes + +- Switched from glide to dep for dependency management. +- Fixed unmarshaling of ServiceError for JSON bodies that don't conform to the OData spec. +- Fixed a race condition in token refresh. + +### Breaking Changes + +- The ServiceError.Details field type has been changed to match the OData v4 spec. +- Go v1.7 has been dropped from CI. +- API parameter validation failures will now return a unique error type validation.Error. +- The adal.Token type has been decomposed from adal.ServicePrincipalToken (this was necessary in order to fix the token refresh race). + +## v9.10.0 + +- Fix the Service Bus suffix in Azure public env +- Add Service Bus Endpoint (AAD ResourceURI) for use in [Azure Service Bus RBAC Preview](https://docs.microsoft.com/en-us/azure/service-bus-messaging/service-bus-role-based-access-control) + +## v9.9.0 + +### New Features + +- Added EventGridKeyAuthorizer for key authorization with event grid topics. + +### Bug Fixes + +- Fixed race condition when auto-refreshing service principal tokens. + +## v9.8.1 + +### Bug Fixes + +- Added http.StatusNoContent (204) to the list of expected status codes for long-running operations. +- Updated runtime version info so it's current. + +## v9.8.0 + +### New Features + +- Added type azure.AsyncOpIncompleteError to be returned from a future's Result() method when the operation has not completed. + +## v9.7.1 + +### Bug Fixes + +- Use correct AAD and Graph endpoints for US Gov environment. + +## v9.7.0 + +### New Features + +- Added support for application/octet-stream MIME types. + +## v9.6.1 + +### Bug Fixes + +- Ensure Authorization header is added to request when polling for registration status. + +## v9.6.0 + +### New Features + +- Added support for acquiring tokens via MSI with a user assigned identity. + +## v9.5.3 + +### Bug Fixes + +- Don't remove encoding of existing URL Query parameters when calling autorest.WithQueryParameters. +- Set correct Content Type when using autorest.WithFormData. + +## v9.5.2 + +### Bug Fixes + +- Check for nil \*http.Response before dereferencing it. + +## v9.5.1 + +### Bug Fixes + +- Don't count http.StatusTooManyRequests (429) against the retry cap. +- Use retry logic when SkipResourceProviderRegistration is set to true. + +## v9.5.0 + +### New Features + +- Added support for username + password, API key, authoriazation code and cognitive services authentication. +- Added field SkipResourceProviderRegistration to clients to provide a way to skip auto-registration of RPs. +- Added utility function AsStringSlice() to convert its parameters to a string slice. + +### Bug Fixes + +- When checking for authentication failures look at the error type not the status code as it could vary. + +## v9.4.2 + +### Bug Fixes + +- Validate parameters when creating credentials. +- Don't retry requests if the returned status is a 401 (http.StatusUnauthorized) as it will never succeed. + +## v9.4.1 + +### Bug Fixes + +- Update the AccessTokensPath() to read access tokens path through AZURE_ACCESS_TOKEN_FILE. If this + environment variable is not set, it will fall back to use default path set by Azure CLI. +- Use case-insensitive string comparison for polling states. + +## v9.4.0 + +### New Features + +- Added WaitForCompletion() to Future as a default polling implementation. + +### Bug Fixes + +- Method Future.Done() shouldn't update polling status for unexpected HTTP status codes. + +## v9.3.1 + +### Bug Fixes + +- DoRetryForStatusCodes will retry if sender.Do returns a non-nil error. + +## v9.3.0 + +### New Features + +- Added PollingMethod() to Future so callers know what kind of polling mechanism is used. +- Added azure.ChangeToGet() which transforms an http.Request into a GET (to be used with LROs). + +## v9.2.0 + +### New Features + +- Added support for custom Azure Stack endpoints. +- Added type azure.Future used to track the status of long-running operations. + +### Bug Fixes + +- Preserve the original error in DoRetryWithRegistration when registration fails. + +## v9.1.1 + +- Fixes a bug regarding the cookie jar on `autorest.Client.Sender`. + +## v9.1.0 + +### New Features + +- In cases where there is a non-empty error from the service, attempt to unmarshal it instead of uniformly calling it an "Unknown" error. +- Support for loading Azure CLI Authentication files. +- Automatically register your subscription with the Azure Resource Provider if it hadn't been previously. + +### Bug Fixes + +- RetriableRequest can now tolerate a ReadSeekable body being read but not reset. +- Adding missing Apache Headers + +## v9.0.0 + +> **IMPORTANT:** This release was intially labeled incorrectly as `v8.4.0`. From the time it was released, it should have been marked `v9.0.0` because it contains breaking changes to the MSI packages. We appologize for any inconvenience this causes. + +Adding MSI Endpoint Support and CLI token rehydration. + +## v8.3.1 + +Pick up bug fix in adal for MSI support. + +## v8.3.0 + +Updates to Error string formats for clarity. Also, adding a copy of the http.Response to errors for an improved debugging experience. + +## v8.2.0 + +### New Features + +- Add support for bearer authentication callbacks +- Support 429 response codes that include "Retry-After" header +- Support validation constraint "Pattern" for map keys + +### Bug Fixes + +- Make RetriableRequest work with multiple versions of Go + +## v8.1.1 + +Updates the RetriableRequest to take advantage of GetBody() added in Go 1.8. + +## v8.1.0 + +Adds RetriableRequest type for more efficient handling of retrying HTTP requests. + +## v8.0.0 + +ADAL refactored into its own package. +Support for UNIX time. + +## v7.3.1 + +- Version Testing now removed from production bits that are shipped with the library. + +## v7.3.0 + +- Exposing new `RespondDecorator`, `ByDiscardingBody`. This allows operations + to acknowledge that they do not need either the entire or a trailing portion + of accepts response body. In doing so, Go's http library can reuse HTTP + connections more readily. +- Adding `PrepareDecorator` to target custom BaseURLs. +- Adding ACR suffix to public cloud environment. +- Updating Glide dependencies. + +## v7.2.5 + +- Fixed the Active Directory endpoint for the China cloud. +- Removes UTF-8 BOM if present in response payload. +- Added telemetry. + +## v7.2.3 + +- Fixing bug in calls to `DelayForBackoff` that caused doubling of delay + duration. + +## v7.2.2 + +- autorest/azure: added ASM and ARM VM DNS suffixes. + +## v7.2.1 + +- fixed parsing of UTC times that are not RFC3339 conformant. + +## v7.2.0 + +- autorest/validation: Reformat validation error for better error message. + +## v7.1.0 + +- preparer: Added support for multipart formdata - WithMultiPartFormdata() +- preparer: Added support for sending file in request body - WithFile +- client: Added RetryDuration parameter. +- autorest/validation: new package for validation code for Azure Go SDK. + +## v7.0.7 + +- Add trailing / to endpoint +- azure: add EnvironmentFromName + +## v7.0.6 + +- Add retry logic for 408, 500, 502, 503 and 504 status codes. +- Change url path and query encoding logic. +- Fix DelayForBackoff for proper exponential delay. +- Add CookieJar in Client. + +## v7.0.5 + +- Add check to start polling only when status is in [200,201,202]. +- Refactoring for unchecked errors. +- azure/persist changes. +- Fix 'file in use' issue in renewing token in deviceflow. +- Store header RetryAfter for subsequent requests in polling. +- Add attribute details in service error. + +## v7.0.4 + +- Better error messages for long running operation failures + +## v7.0.3 + +- Corrected DoPollForAsynchronous to properly handle the initial response + +## v7.0.2 + +- Corrected DoPollForAsynchronous to continue using the polling method first discovered + +## v7.0.1 + +- Fixed empty JSON input error in ByUnmarshallingJSON +- Fixed polling support for GET calls +- Changed format name from TimeRfc1123 to TimeRFC1123 + +## v7.0.0 + +- Added ByCopying responder with supporting TeeReadCloser +- Rewrote Azure asynchronous handling +- Reverted to only unmarshalling JSON +- Corrected handling of RFC3339 time strings and added support for Rfc1123 time format + +The `json.Decoder` does not catch bad data as thoroughly as `json.Unmarshal`. Since +`encoding/json` successfully deserializes all core types, and extended types normally provide +their custom JSON serialization handlers, the code has been reverted back to using +`json.Unmarshal`. The original change to use `json.Decode` was made to reduce duplicate +code; there is no loss of function, and there is a gain in accuracy, by reverting. + +Additionally, Azure services indicate requests to be polled by multiple means. The existing code +only checked for one of those (that is, the presence of the `Azure-AsyncOperation` header). +The new code correctly covers all cases and aligns with the other Azure SDKs. + +## v6.1.0 + +- Introduced `date.ByUnmarshallingJSONDate` and `date.ByUnmarshallingJSONTime` to enable JSON encoded values. + +## v6.0.0 + +- Completely reworked the handling of polled and asynchronous requests +- Removed unnecessary routines +- Reworked `mocks.Sender` to replay a series of `http.Response` objects +- Added `PrepareDecorators` for primitive types (e.g., bool, int32) + +Handling polled and asynchronous requests is no longer part of `Client#Send`. Instead new +`SendDecorators` implement different styles of polled behavior. See`autorest.DoPollForStatusCodes` +and `azure.DoPollForAsynchronous` for examples. + +## v5.0.0 + +- Added new RespondDecorators unmarshalling primitive types +- Corrected application of inspection and authorization PrependDecorators + +## v4.0.0 + +- Added support for Azure long-running operations. +- Added cancelation support to all decorators and functions that may delay. +- Breaking: `DelayForBackoff` now accepts a channel, which may be nil. + +## v3.1.0 + +- Add support for OAuth Device Flow authorization. +- Add support for ServicePrincipalTokens that are backed by an existing token, rather than other secret material. +- Add helpers for persisting and restoring Tokens. +- Increased code coverage in the github.com/Azure/autorest/azure package + +## v3.0.0 + +- Breaking: `NewErrorWithError` no longer takes `statusCode int`. +- Breaking: `NewErrorWithStatusCode` is replaced with `NewErrorWithResponse`. +- Breaking: `Client#Send()` no longer takes `codes ...int` argument. +- Add: XML unmarshaling support with `ByUnmarshallingXML()` +- Stopped vending dependencies locally and switched to [Glide](https://github.com/Masterminds/glide). + Applications using this library should either use Glide or vendor dependencies locally some other way. +- Add: `azure.WithErrorUnlessStatusCode()` decorator to handle Azure errors. +- Fix: use `net/http.DefaultClient` as base client. +- Fix: Missing inspection for polling responses added. +- Add: CopyAndDecode helpers. +- Improved `./autorest/to` with `[]string` helpers. +- Removed golint suppressions in .travis.yml. + +## v2.1.0 + +- Added `StatusCode` to `Error` for more easily obtaining the HTTP Reponse StatusCode (if any) + +## v2.0.0 + +- Changed `to.StringMapPtr` method signature to return a pointer +- Changed `ServicePrincipalCertificateSecret` and `NewServicePrincipalTokenFromCertificate` to support generic certificate and private keys + +## v1.0.0 + +- Added Logging inspectors to trace http.Request / Response +- Added support for User-Agent header +- Changed WithHeader PrepareDecorator to use set vs. add +- Added JSON to error when unmarshalling fails +- Added Client#Send method +- Corrected case of "Azure" in package paths +- Added "to" helpers, Azure helpers, and improved ease-of-use +- Corrected golint issues + +## v1.0.1 + +- Added CHANGELOG.md + +## v1.1.0 + +- Added mechanism to retrieve a ServicePrincipalToken using a certificate-signed JWT +- Added an example of creating a certificate-based ServicePrincipal and retrieving an OAuth token using the certificate + +## v1.1.1 + +- Introduce godeps and vendor dependencies introduced in v1.1.1 diff --git a/vendor/github.com/Azure/go-autorest/GNUmakefile b/vendor/github.com/Azure/go-autorest/GNUmakefile new file mode 100644 index 0000000..a434e73 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/GNUmakefile @@ -0,0 +1,23 @@ +DIR?=./autorest/ + +default: build + +build: fmt + go install $(DIR) + +test: + go test $(DIR) || exit 1 + +vet: + @echo "go vet ." + @go vet $(DIR)... ; if [ $$? -eq 1 ]; then \ + echo ""; \ + echo "Vet found suspicious constructs. Please check the reported constructs"; \ + echo "and fix them if necessary before submitting the code for review."; \ + exit 1; \ + fi + +fmt: + gofmt -w $(DIR) + +.PHONY: build test vet fmt diff --git a/vendor/github.com/Azure/go-autorest/Gopkg.lock b/vendor/github.com/Azure/go-autorest/Gopkg.lock new file mode 100644 index 0000000..dc6e3e6 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/Gopkg.lock @@ -0,0 +1,324 @@ +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + + +[[projects]] + digest = "1:892e39e5c083d0943f1e80ab8351690f183c6a5ab24e1d280adcad424c26255e" + name = "contrib.go.opencensus.io/exporter/ocagent" + packages = ["."] + pruneopts = "UT" + revision = "a8a6f458bbc1d5042322ad1f9b65eeb0b69be9ea" + version = "v0.6.0" + +[[projects]] + digest = "1:8f5acd4d4462b5136af644d25101f0968a7a94ee90fcb2059cec5b7cc42e0b20" + name = "github.com/census-instrumentation/opencensus-proto" + packages = [ + "gen-go/agent/common/v1", + "gen-go/agent/metrics/v1", + "gen-go/agent/trace/v1", + "gen-go/metrics/v1", + "gen-go/resource/v1", + "gen-go/trace/v1", + ] + pruneopts = "UT" + revision = "d89fa54de508111353cb0b06403c00569be780d8" + version = "v0.2.1" + +[[projects]] + digest = "1:ffe9824d294da03b391f44e1ae8281281b4afc1bdaa9588c9097785e3af10cec" + name = "github.com/davecgh/go-spew" + packages = ["spew"] + pruneopts = "UT" + revision = "8991bc29aa16c548c550c7ff78260e27b9ab7c73" + version = "v1.1.1" + +[[projects]] + digest = "1:76dc72490af7174349349838f2fe118996381b31ea83243812a97e5a0fd5ed55" + name = "github.com/dgrijalva/jwt-go" + packages = ["."] + pruneopts = "UT" + revision = "06ea1031745cb8b3dab3f6a236daf2b0aa468b7e" + version = "v3.2.0" + +[[projects]] + digest = "1:cf0d2e435fd4ce45b789e93ef24b5f08e86be0e9807a16beb3694e2d8c9af965" + name = "github.com/dimchansky/utfbom" + packages = ["."] + pruneopts = "UT" + revision = "d2133a1ce379ef6fa992b0514a77146c60db9d1c" + version = "v1.1.0" + +[[projects]] + branch = "master" + digest = "1:b7cb6054d3dff43b38ad2e92492f220f57ae6087ee797dca298139776749ace8" + name = "github.com/golang/groupcache" + packages = ["lru"] + pruneopts = "UT" + revision = "611e8accdfc92c4187d399e95ce826046d4c8d73" + +[[projects]] + digest = "1:e3839df32927e8d3403cd5aa7253d966e8ff80fc8f10e2e35d146461cd83fcfa" + name = "github.com/golang/protobuf" + packages = [ + "descriptor", + "jsonpb", + "proto", + "protoc-gen-go/descriptor", + "ptypes", + "ptypes/any", + "ptypes/duration", + "ptypes/struct", + "ptypes/timestamp", + "ptypes/wrappers", + ] + pruneopts = "UT" + revision = "6c65a5562fc06764971b7c5d05c76c75e84bdbf7" + version = "v1.3.2" + +[[projects]] + digest = "1:c560cd79300fac84f124b96225181a637a70b60155919a3c36db50b7cca6b806" + name = "github.com/grpc-ecosystem/grpc-gateway" + packages = [ + "internal", + "runtime", + "utilities", + ] + pruneopts = "UT" + revision = "f7120437bb4f6c71f7f5076ad65a45310de2c009" + version = "v1.12.1" + +[[projects]] + digest = "1:5d231480e1c64a726869bc4142d270184c419749d34f167646baa21008eb0a79" + name = "github.com/mitchellh/go-homedir" + packages = ["."] + pruneopts = "UT" + revision = "af06845cf3004701891bf4fdb884bfe4920b3727" + version = "v1.1.0" + +[[projects]] + digest = "1:0028cb19b2e4c3112225cd871870f2d9cf49b9b4276531f03438a88e94be86fe" + name = "github.com/pmezard/go-difflib" + packages = ["difflib"] + pruneopts = "UT" + revision = "792786c7400a136282c1664665ae0a8db921c6c2" + version = "v1.0.0" + +[[projects]] + digest = "1:99d32780e5238c2621fff621123997c3e3cca96db8be13179013aea77dfab551" + name = "github.com/stretchr/testify" + packages = [ + "assert", + "require", + ] + pruneopts = "UT" + revision = "221dbe5ed46703ee255b1da0dec05086f5035f62" + version = "v1.4.0" + +[[projects]] + digest = "1:7c5e00383399fe13de0b4b65c9fdde16275407ce8ac02d867eafeaa916edcc71" + name = "go.opencensus.io" + packages = [ + ".", + "internal", + "internal/tagencoding", + "metric/metricdata", + "metric/metricproducer", + "plugin/ocgrpc", + "plugin/ochttp", + "plugin/ochttp/propagation/b3", + "plugin/ochttp/propagation/tracecontext", + "resource", + "stats", + "stats/internal", + "stats/view", + "tag", + "trace", + "trace/internal", + "trace/propagation", + "trace/tracestate", + ] + pruneopts = "UT" + revision = "aad2c527c5defcf89b5afab7f37274304195a6b2" + version = "v0.22.2" + +[[projects]] + branch = "master" + digest = "1:f604f5e2ee721b6757d962dfe7bab4f28aae50c456e39cfb2f3819762a44a6ae" + name = "golang.org/x/crypto" + packages = [ + "pkcs12", + "pkcs12/internal/rc2", + ] + pruneopts = "UT" + revision = "e9b2fee46413994441b28dfca259d911d963dfed" + +[[projects]] + branch = "master" + digest = "1:334b27eac455cb6567ea28cd424230b07b1a64334a2f861a8075ac26ce10af43" + name = "golang.org/x/lint" + packages = [ + ".", + "golint", + ] + pruneopts = "UT" + revision = "fdd1cda4f05fd1fd86124f0ef9ce31a0b72c8448" + +[[projects]] + branch = "master" + digest = "1:257a75d024975428ab9192bfc334c3490882f8cb21322ea5784ca8eca000a910" + name = "golang.org/x/net" + packages = [ + "http/httpguts", + "http2", + "http2/hpack", + "idna", + "internal/timeseries", + "trace", + ] + pruneopts = "UT" + revision = "1ddd1de85cb0337b623b740a609d35817d516a8d" + +[[projects]] + branch = "master" + digest = "1:382bb5a7fb4034db3b6a2d19e5a4a6bcf52f4750530603c01ca18a172fa3089b" + name = "golang.org/x/sync" + packages = ["semaphore"] + pruneopts = "UT" + revision = "cd5d95a43a6e21273425c7ae415d3df9ea832eeb" + +[[projects]] + branch = "master" + digest = "1:4da420ceda5f68e8d748aa2169d0ed44ffadb1bbd6537cf778a49563104189b8" + name = "golang.org/x/sys" + packages = ["unix"] + pruneopts = "UT" + revision = "ce4227a45e2eb77e5c847278dcc6a626742e2945" + +[[projects]] + digest = "1:8d8faad6b12a3a4c819a3f9618cb6ee1fa1cfc33253abeeea8b55336721e3405" + name = "golang.org/x/text" + packages = [ + "collate", + "collate/build", + "internal/colltab", + "internal/gen", + "internal/language", + "internal/language/compact", + "internal/tag", + "internal/triegen", + "internal/ucd", + "language", + "secure/bidirule", + "transform", + "unicode/bidi", + "unicode/cldr", + "unicode/norm", + "unicode/rangetable", + ] + pruneopts = "UT" + revision = "342b2e1fbaa52c93f31447ad2c6abc048c63e475" + version = "v0.3.2" + +[[projects]] + branch = "master" + digest = "1:4eb5ea8395fb60212dd58b92c9db80bab59d5e99c7435f9a6a0a528c373b60e7" + name = "golang.org/x/tools" + packages = [ + "go/ast/astutil", + "go/gcexportdata", + "go/internal/gcimporter", + "go/types/typeutil", + ] + pruneopts = "UT" + revision = "259af5ff87bdcd4abf2ecda8edc3f13f04f26a42" + +[[projects]] + digest = "1:964bb30febc27fabfbec4759fa530c6ec35e77a7c85fed90b9317ea39a054877" + name = "google.golang.org/api" + packages = ["support/bundler"] + pruneopts = "UT" + revision = "8a410c21381766a810817fd6200fce8838ecb277" + version = "v0.14.0" + +[[projects]] + branch = "master" + digest = "1:a8d5c2c6e746b3485e36908ab2a9e3d77b86b81f8156d88403c7d2b462431dfd" + name = "google.golang.org/genproto" + packages = [ + "googleapis/api/httpbody", + "googleapis/rpc/status", + "protobuf/field_mask", + ] + pruneopts = "UT" + revision = "51378566eb590fa106d1025ea12835a4416dda84" + +[[projects]] + digest = "1:b59ce3ddb11daeeccccc9cb3183b58ebf8e9a779f1c853308cd91612e817a301" + name = "google.golang.org/grpc" + packages = [ + ".", + "backoff", + "balancer", + "balancer/base", + "balancer/roundrobin", + "binarylog/grpc_binarylog_v1", + "codes", + "connectivity", + "credentials", + "credentials/internal", + "encoding", + "encoding/proto", + "grpclog", + "internal", + "internal/backoff", + "internal/balancerload", + "internal/binarylog", + "internal/buffer", + "internal/channelz", + "internal/envconfig", + "internal/grpcrand", + "internal/grpcsync", + "internal/resolver/dns", + "internal/resolver/passthrough", + "internal/syscall", + "internal/transport", + "keepalive", + "metadata", + "naming", + "peer", + "resolver", + "serviceconfig", + "stats", + "status", + "tap", + ] + pruneopts = "UT" + revision = "1a3960e4bd028ac0cec0a2afd27d7d8e67c11514" + version = "v1.25.1" + +[[projects]] + digest = "1:b75b3deb2bce8bc079e16bb2aecfe01eb80098f5650f9e93e5643ca8b7b73737" + name = "gopkg.in/yaml.v2" + packages = ["."] + pruneopts = "UT" + revision = "1f64d6156d11335c3f22d9330b0ad14fc1e789ce" + version = "v2.2.7" + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + input-imports = [ + "contrib.go.opencensus.io/exporter/ocagent", + "github.com/dgrijalva/jwt-go", + "github.com/dimchansky/utfbom", + "github.com/mitchellh/go-homedir", + "github.com/stretchr/testify/require", + "go.opencensus.io/plugin/ochttp", + "go.opencensus.io/plugin/ochttp/propagation/tracecontext", + "go.opencensus.io/stats/view", + "go.opencensus.io/trace", + "golang.org/x/crypto/pkcs12", + "golang.org/x/lint/golint", + ] + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/vendor/github.com/Azure/go-autorest/Gopkg.toml b/vendor/github.com/Azure/go-autorest/Gopkg.toml new file mode 100644 index 0000000..1fc2865 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/Gopkg.toml @@ -0,0 +1,59 @@ +# Gopkg.toml example +# +# Refer to https://golang.github.io/dep/docs/Gopkg.toml.html +# for detailed Gopkg.toml documentation. +# +# required = ["github.com/user/thing/cmd/thing"] +# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] +# +# [[constraint]] +# name = "github.com/user/project" +# version = "1.0.0" +# +# [[constraint]] +# name = "github.com/user/project2" +# branch = "dev" +# source = "github.com/myfork/project2" +# +# [[override]] +# name = "github.com/x/y" +# version = "2.4.0" +# +# [prune] +# non-go = false +# go-tests = true +# unused-packages = true + +required = ["golang.org/x/lint/golint"] + +[prune] + go-tests = true + unused-packages = true + +[[constraint]] + name = "contrib.go.opencensus.io/exporter/ocagent" + version = "0.6.0" + +[[constraint]] + name = "github.com/dgrijalva/jwt-go" + version = "3.2.0" + +[[constraint]] + name = "github.com/dimchansky/utfbom" + version = "1.1.0" + +[[constraint]] + name = "github.com/mitchellh/go-homedir" + version = "1.1.0" + +[[constraint]] + name = "github.com/stretchr/testify" + version = "1.3.0" + +[[constraint]] + name = "go.opencensus.io" + version = "0.22.0" + +[[constraint]] + branch = "master" + name = "golang.org/x/crypto" diff --git a/vendor/github.com/Azure/go-autorest/LICENSE b/vendor/github.com/Azure/go-autorest/LICENSE new file mode 100644 index 0000000..b9d6a27 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Microsoft Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/Azure/go-autorest/README.md b/vendor/github.com/Azure/go-autorest/README.md new file mode 100644 index 0000000..de1e19a --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/README.md @@ -0,0 +1,165 @@ +# go-autorest + +[![GoDoc](https://godoc.org/github.com/Azure/go-autorest/autorest?status.png)](https://godoc.org/github.com/Azure/go-autorest/autorest) +[![Build Status](https://dev.azure.com/azure-sdk/public/_apis/build/status/go/Azure.go-autorest?branchName=master)](https://dev.azure.com/azure-sdk/public/_build/latest?definitionId=625&branchName=master) +[![Go Report Card](https://goreportcard.com/badge/Azure/go-autorest)](https://goreportcard.com/report/Azure/go-autorest) + +Package go-autorest provides an HTTP request client for use with [Autorest](https://github.com/Azure/autorest.go)-generated API client packages. + +An authentication client tested with Azure Active Directory (AAD) is also +provided in this repo in the package +`github.com/Azure/go-autorest/autorest/adal`. Despite its name, this package +is maintained only as part of the Azure Go SDK and is not related to other +"ADAL" libraries in [github.com/AzureAD](https://github.com/AzureAD). + +## Overview + +Package go-autorest implements an HTTP request pipeline suitable for use across +multiple goroutines and provides the shared routines used by packages generated +by [Autorest](https://github.com/Azure/autorest.go). + +The package breaks sending and responding to HTTP requests into three phases: Preparing, Sending, +and Responding. A typical pattern is: + +```go + req, err := Prepare(&http.Request{}, + token.WithAuthorization()) + + resp, err := Send(req, + WithLogging(logger), + DoErrorIfStatusCode(http.StatusInternalServerError), + DoCloseIfError(), + DoRetryForAttempts(5, time.Second)) + + err = Respond(resp, + ByDiscardingBody(), + ByClosing()) +``` + +Each phase relies on decorators to modify and / or manage processing. Decorators may first modify +and then pass the data along, pass the data first and then modify the result, or wrap themselves +around passing the data (such as a logger might do). Decorators run in the order provided. For +example, the following: + +```go + req, err := Prepare(&http.Request{}, + WithBaseURL("https://microsoft.com/"), + WithPath("a"), + WithPath("b"), + WithPath("c")) +``` + +will set the URL to: + +``` + https://microsoft.com/a/b/c +``` + +Preparers and Responders may be shared and re-used (assuming the underlying decorators support +sharing and re-use). Performant use is obtained by creating one or more Preparers and Responders +shared among multiple go-routines, and a single Sender shared among multiple sending go-routines, +all bound together by means of input / output channels. + +Decorators hold their passed state within a closure (such as the path components in the example +above). Be careful to share Preparers and Responders only in a context where such held state +applies. For example, it may not make sense to share a Preparer that applies a query string from a +fixed set of values. Similarly, sharing a Responder that reads the response body into a passed +struct (e.g., `ByUnmarshallingJson`) is likely incorrect. + +Errors raised by autorest objects and methods will conform to the `autorest.Error` interface. + +See the included examples for more detail. For details on the suggested use of this package by +generated clients, see the Client described below. + +## Helpers + +### Handling Swagger Dates + +The Swagger specification (https://swagger.io) that drives AutoRest +(https://github.com/Azure/autorest/) precisely defines two date forms: date and date-time. The +github.com/Azure/go-autorest/autorest/date package provides time.Time derivations to ensure correct +parsing and formatting. + +### Handling Empty Values + +In JSON, missing values have different semantics than empty values. This is especially true for +services using the HTTP PATCH verb. The JSON submitted with a PATCH request generally contains +only those values to modify. Missing values are to be left unchanged. Developers, then, require a +means to both specify an empty value and to leave the value out of the submitted JSON. + +The Go JSON package (`encoding/json`) supports the `omitempty` tag. When specified, it omits +empty values from the rendered JSON. Since Go defines default values for all base types (such as "" +for string and 0 for int) and provides no means to mark a value as actually empty, the JSON package +treats default values as meaning empty, omitting them from the rendered JSON. This means that, using +the Go base types encoded through the default JSON package, it is not possible to create JSON to +clear a value at the server. + +The workaround within the Go community is to use pointers to base types in lieu of base types within +structures that map to JSON. For example, instead of a value of type `string`, the workaround uses +`*string`. While this enables distinguishing empty values from those to be unchanged, creating +pointers to a base type (notably constant, in-line values) requires additional variables. This, for +example, + +```go + s := struct { + S *string + }{ S: &"foo" } +``` +fails, while, this + +```go + v := "foo" + s := struct { + S *string + }{ S: &v } +``` +succeeds. + +To ease using pointers, the subpackage `to` contains helpers that convert to and from pointers for +Go base types which have Swagger analogs. It also provides a helper that converts between +`map[string]string` and `map[string]*string`, enabling the JSON to specify that the value +associated with a key should be cleared. With the helpers, the previous example becomes + +```go + s := struct { + S *string + }{ S: to.StringPtr("foo") } +``` + +## Install + +```bash +go get github.com/Azure/go-autorest/autorest +go get github.com/Azure/go-autorest/autorest/azure +go get github.com/Azure/go-autorest/autorest/date +go get github.com/Azure/go-autorest/autorest/to +``` + +### Using with Go Modules +In [v12.0.1](https://github.com/Azure/go-autorest/pull/386), this repository introduced the following modules. + +- autorest/adal +- autorest/azure/auth +- autorest/azure/cli +- autorest/date +- autorest/mocks +- autorest/to +- autorest/validation +- autorest +- logger +- tracing + +Tagging cumulative SDK releases as a whole (e.g. `v12.3.0`) is still enabled to support consumers of this repo that have not yet migrated to modules. + +## License + +See LICENSE file. + +----- + +This project has adopted the [Microsoft Open Source Code of +Conduct](https://opensource.microsoft.com/codeofconduct/). For more information +see the [Code of Conduct +FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact +[opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional +questions or comments. diff --git a/vendor/github.com/Azure/go-autorest/autorest/LICENSE b/vendor/github.com/Azure/go-autorest/autorest/LICENSE new file mode 100644 index 0000000..b9d6a27 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Microsoft Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/LICENSE b/vendor/github.com/Azure/go-autorest/autorest/adal/LICENSE new file mode 100644 index 0000000..b9d6a27 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Microsoft Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/README.md b/vendor/github.com/Azure/go-autorest/autorest/adal/README.md new file mode 100644 index 0000000..fec416a --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/README.md @@ -0,0 +1,292 @@ +# Azure Active Directory authentication for Go + +This is a standalone package for authenticating with Azure Active +Directory from other Go libraries and applications, in particular the [Azure SDK +for Go](https://github.com/Azure/azure-sdk-for-go). + +Note: Despite the package's name it is not related to other "ADAL" libraries +maintained in the [github.com/AzureAD](https://github.com/AzureAD) org. Issues +should be opened in [this repo's](https://github.com/Azure/go-autorest/issues) +or [the SDK's](https://github.com/Azure/azure-sdk-for-go/issues) issue +trackers. + +## Install + +```bash +go get -u github.com/Azure/go-autorest/autorest/adal +``` + +## Usage + +An Active Directory application is required in order to use this library. An application can be registered in the [Azure Portal](https://portal.azure.com/) by following these [guidelines](https://docs.microsoft.com/en-us/azure/active-directory/develop/active-directory-integrating-applications) or using the [Azure CLI](https://github.com/Azure/azure-cli). + +### Register an Azure AD Application with secret + + +1. Register a new application with a `secret` credential + + ``` + az ad app create \ + --display-name example-app \ + --homepage https://example-app/home \ + --identifier-uris https://example-app/app \ + --password secret + ``` + +2. Create a service principal using the `Application ID` from previous step + + ``` + az ad sp create --id "Application ID" + ``` + + * Replace `Application ID` with `appId` from step 1. + +### Register an Azure AD Application with certificate + +1. Create a private key + + ``` + openssl genrsa -out "example-app.key" 2048 + ``` + +2. Create the certificate + + ``` + openssl req -new -key "example-app.key" -subj "/CN=example-app" -out "example-app.csr" + openssl x509 -req -in "example-app.csr" -signkey "example-app.key" -out "example-app.crt" -days 10000 + ``` + +3. Create the PKCS12 version of the certificate containing also the private key + + ``` + openssl pkcs12 -export -out "example-app.pfx" -inkey "example-app.key" -in "example-app.crt" -passout pass: + + ``` + +4. Register a new application with the certificate content form `example-app.crt` + + ``` + certificateContents="$(tail -n+2 "example-app.crt" | head -n-1)" + + az ad app create \ + --display-name example-app \ + --homepage https://example-app/home \ + --identifier-uris https://example-app/app \ + --key-usage Verify --end-date 2018-01-01 \ + --key-value "${certificateContents}" + ``` + +5. Create a service principal using the `Application ID` from previous step + + ``` + az ad sp create --id "APPLICATION_ID" + ``` + + * Replace `APPLICATION_ID` with `appId` from step 4. + + +### Grant the necessary permissions + +Azure relies on a Role-Based Access Control (RBAC) model to manage the access to resources at a fine-grained +level. There is a set of [pre-defined roles](https://docs.microsoft.com/en-us/azure/active-directory/role-based-access-built-in-roles) +which can be assigned to a service principal of an Azure AD application depending of your needs. + +``` +az role assignment create --assigner "SERVICE_PRINCIPAL_ID" --role "ROLE_NAME" +``` + +* Replace the `SERVICE_PRINCIPAL_ID` with the `appId` from previous step. +* Replace the `ROLE_NAME` with a role name of your choice. + +It is also possible to define custom role definitions. + +``` +az role definition create --role-definition role-definition.json +``` + +* Check [custom roles](https://docs.microsoft.com/en-us/azure/active-directory/role-based-access-control-custom-roles) for more details regarding the content of `role-definition.json` file. + + +### Acquire Access Token + +The common configuration used by all flows: + +```Go +const activeDirectoryEndpoint = "https://login.microsoftonline.com/" +tenantID := "TENANT_ID" +oauthConfig, err := adal.NewOAuthConfig(activeDirectoryEndpoint, tenantID) + +applicationID := "APPLICATION_ID" + +callback := func(token adal.Token) error { + // This is called after the token is acquired +} + +// The resource for which the token is acquired +resource := "https://management.core.windows.net/" +``` + +* Replace the `TENANT_ID` with your tenant ID. +* Replace the `APPLICATION_ID` with the value from previous section. + +#### Client Credentials + +```Go +applicationSecret := "APPLICATION_SECRET" + +spt, err := adal.NewServicePrincipalToken( + *oauthConfig, + appliationID, + applicationSecret, + resource, + callbacks...) +if err != nil { + return nil, err +} + +// Acquire a new access token +err = spt.Refresh() +if (err == nil) { + token := spt.Token +} +``` + +* Replace the `APPLICATION_SECRET` with the `password` value from previous section. + +#### Client Certificate + +```Go +certificatePath := "./example-app.pfx" + +certData, err := ioutil.ReadFile(certificatePath) +if err != nil { + return nil, fmt.Errorf("failed to read the certificate file (%s): %v", certificatePath, err) +} + +// Get the certificate and private key from pfx file +certificate, rsaPrivateKey, err := decodePkcs12(certData, "") +if err != nil { + return nil, fmt.Errorf("failed to decode pkcs12 certificate while creating spt: %v", err) +} + +spt, err := adal.NewServicePrincipalTokenFromCertificate( + *oauthConfig, + applicationID, + certificate, + rsaPrivateKey, + resource, + callbacks...) + +// Acquire a new access token +err = spt.Refresh() +if (err == nil) { + token := spt.Token +} +``` + +* Update the certificate path to point to the example-app.pfx file which was created in previous section. + + +#### Device Code + +```Go +oauthClient := &http.Client{} + +// Acquire the device code +deviceCode, err := adal.InitiateDeviceAuth( + oauthClient, + *oauthConfig, + applicationID, + resource) +if err != nil { + return nil, fmt.Errorf("Failed to start device auth flow: %s", err) +} + +// Display the authentication message +fmt.Println(*deviceCode.Message) + +// Wait here until the user is authenticated +token, err := adal.WaitForUserCompletion(oauthClient, deviceCode) +if err != nil { + return nil, fmt.Errorf("Failed to finish device auth flow: %s", err) +} + +spt, err := adal.NewServicePrincipalTokenFromManualToken( + *oauthConfig, + applicationID, + resource, + *token, + callbacks...) + +if (err == nil) { + token := spt.Token +} +``` + +#### Username password authenticate + +```Go +spt, err := adal.NewServicePrincipalTokenFromUsernamePassword( + *oauthConfig, + applicationID, + username, + password, + resource, + callbacks...) + +if (err == nil) { + token := spt.Token +} +``` + +#### Authorization code authenticate + +``` Go +spt, err := adal.NewServicePrincipalTokenFromAuthorizationCode( + *oauthConfig, + applicationID, + clientSecret, + authorizationCode, + redirectURI, + resource, + callbacks...) + +err = spt.Refresh() +if (err == nil) { + token := spt.Token +} +``` + +### Command Line Tool + +A command line tool is available in `cmd/adal.go` that can acquire a token for a given resource. It supports all flows mentioned above. + +``` +adal -h + +Usage of ./adal: + -applicationId string + application id + -certificatePath string + path to pk12/PFC application certificate + -mode string + authentication mode (device, secret, cert, refresh) (default "device") + -resource string + resource for which the token is requested + -secret string + application secret + -tenantId string + tenant id + -tokenCachePath string + location of oath token cache (default "/home/cgc/.adal/accessToken.json") +``` + +Example acquire a token for `https://management.core.windows.net/` using device code flow: + +``` +adal -mode device \ + -applicationId "APPLICATION_ID" \ + -tenantId "TENANT_ID" \ + -resource https://management.core.windows.net/ + +``` diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/config.go b/vendor/github.com/Azure/go-autorest/autorest/adal/config.go new file mode 100644 index 0000000..fa59647 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/config.go @@ -0,0 +1,151 @@ +package adal + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "errors" + "fmt" + "net/url" +) + +const ( + activeDirectoryEndpointTemplate = "%s/oauth2/%s%s" +) + +// OAuthConfig represents the endpoints needed +// in OAuth operations +type OAuthConfig struct { + AuthorityEndpoint url.URL `json:"authorityEndpoint"` + AuthorizeEndpoint url.URL `json:"authorizeEndpoint"` + TokenEndpoint url.URL `json:"tokenEndpoint"` + DeviceCodeEndpoint url.URL `json:"deviceCodeEndpoint"` +} + +// IsZero returns true if the OAuthConfig object is zero-initialized. +func (oac OAuthConfig) IsZero() bool { + return oac == OAuthConfig{} +} + +func validateStringParam(param, name string) error { + if len(param) == 0 { + return fmt.Errorf("parameter '" + name + "' cannot be empty") + } + return nil +} + +// NewOAuthConfig returns an OAuthConfig with tenant specific urls +func NewOAuthConfig(activeDirectoryEndpoint, tenantID string) (*OAuthConfig, error) { + apiVer := "1.0" + return NewOAuthConfigWithAPIVersion(activeDirectoryEndpoint, tenantID, &apiVer) +} + +// NewOAuthConfigWithAPIVersion returns an OAuthConfig with tenant specific urls. +// If apiVersion is not nil the "api-version" query parameter will be appended to the endpoint URLs with the specified value. +func NewOAuthConfigWithAPIVersion(activeDirectoryEndpoint, tenantID string, apiVersion *string) (*OAuthConfig, error) { + if err := validateStringParam(activeDirectoryEndpoint, "activeDirectoryEndpoint"); err != nil { + return nil, err + } + api := "" + // it's legal for tenantID to be empty so don't validate it + if apiVersion != nil { + if err := validateStringParam(*apiVersion, "apiVersion"); err != nil { + return nil, err + } + api = fmt.Sprintf("?api-version=%s", *apiVersion) + } + u, err := url.Parse(activeDirectoryEndpoint) + if err != nil { + return nil, err + } + authorityURL, err := u.Parse(tenantID) + if err != nil { + return nil, err + } + authorizeURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "authorize", api)) + if err != nil { + return nil, err + } + tokenURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "token", api)) + if err != nil { + return nil, err + } + deviceCodeURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "devicecode", api)) + if err != nil { + return nil, err + } + + return &OAuthConfig{ + AuthorityEndpoint: *authorityURL, + AuthorizeEndpoint: *authorizeURL, + TokenEndpoint: *tokenURL, + DeviceCodeEndpoint: *deviceCodeURL, + }, nil +} + +// MultiTenantOAuthConfig provides endpoints for primary and aulixiary tenant IDs. +type MultiTenantOAuthConfig interface { + PrimaryTenant() *OAuthConfig + AuxiliaryTenants() []*OAuthConfig +} + +// OAuthOptions contains optional OAuthConfig creation arguments. +type OAuthOptions struct { + APIVersion string +} + +func (c OAuthOptions) apiVersion() string { + if c.APIVersion != "" { + return fmt.Sprintf("?api-version=%s", c.APIVersion) + } + return "1.0" +} + +// NewMultiTenantOAuthConfig creates an object that support multitenant OAuth configuration. +// See https://docs.microsoft.com/en-us/azure/azure-resource-manager/authenticate-multi-tenant for more information. +func NewMultiTenantOAuthConfig(activeDirectoryEndpoint, primaryTenantID string, auxiliaryTenantIDs []string, options OAuthOptions) (MultiTenantOAuthConfig, error) { + if len(auxiliaryTenantIDs) == 0 || len(auxiliaryTenantIDs) > 3 { + return nil, errors.New("must specify one to three auxiliary tenants") + } + mtCfg := multiTenantOAuthConfig{ + cfgs: make([]*OAuthConfig, len(auxiliaryTenantIDs)+1), + } + apiVer := options.apiVersion() + pri, err := NewOAuthConfigWithAPIVersion(activeDirectoryEndpoint, primaryTenantID, &apiVer) + if err != nil { + return nil, fmt.Errorf("failed to create OAuthConfig for primary tenant: %v", err) + } + mtCfg.cfgs[0] = pri + for i := range auxiliaryTenantIDs { + aux, err := NewOAuthConfig(activeDirectoryEndpoint, auxiliaryTenantIDs[i]) + if err != nil { + return nil, fmt.Errorf("failed to create OAuthConfig for tenant '%s': %v", auxiliaryTenantIDs[i], err) + } + mtCfg.cfgs[i+1] = aux + } + return mtCfg, nil +} + +type multiTenantOAuthConfig struct { + // first config in the slice is the primary tenant + cfgs []*OAuthConfig +} + +func (m multiTenantOAuthConfig) PrimaryTenant() *OAuthConfig { + return m.cfgs[0] +} + +func (m multiTenantOAuthConfig) AuxiliaryTenants() []*OAuthConfig { + return m.cfgs[1:] +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go b/vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go new file mode 100644 index 0000000..9daa4b5 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go @@ -0,0 +1,273 @@ +package adal + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* + This file is largely based on rjw57/oauth2device's code, with the follow differences: + * scope -> resource, and only allow a single one + * receive "Message" in the DeviceCode struct and show it to users as the prompt + * azure-xplat-cli has the following behavior that this emulates: + - does not send client_secret during the token exchange + - sends resource again in the token exchange request +*/ + +import ( + "context" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "strings" + "time" +) + +const ( + logPrefix = "autorest/adal/devicetoken:" +) + +var ( + // ErrDeviceGeneric represents an unknown error from the token endpoint when using device flow + ErrDeviceGeneric = fmt.Errorf("%s Error while retrieving OAuth token: Unknown Error", logPrefix) + + // ErrDeviceAccessDenied represents an access denied error from the token endpoint when using device flow + ErrDeviceAccessDenied = fmt.Errorf("%s Error while retrieving OAuth token: Access Denied", logPrefix) + + // ErrDeviceAuthorizationPending represents the server waiting on the user to complete the device flow + ErrDeviceAuthorizationPending = fmt.Errorf("%s Error while retrieving OAuth token: Authorization Pending", logPrefix) + + // ErrDeviceCodeExpired represents the server timing out and expiring the code during device flow + ErrDeviceCodeExpired = fmt.Errorf("%s Error while retrieving OAuth token: Code Expired", logPrefix) + + // ErrDeviceSlowDown represents the service telling us we're polling too often during device flow + ErrDeviceSlowDown = fmt.Errorf("%s Error while retrieving OAuth token: Slow Down", logPrefix) + + // ErrDeviceCodeEmpty represents an empty device code from the device endpoint while using device flow + ErrDeviceCodeEmpty = fmt.Errorf("%s Error while retrieving device code: Device Code Empty", logPrefix) + + // ErrOAuthTokenEmpty represents an empty OAuth token from the token endpoint when using device flow + ErrOAuthTokenEmpty = fmt.Errorf("%s Error while retrieving OAuth token: Token Empty", logPrefix) + + errCodeSendingFails = "Error occurred while sending request for Device Authorization Code" + errCodeHandlingFails = "Error occurred while handling response from the Device Endpoint" + errTokenSendingFails = "Error occurred while sending request with device code for a token" + errTokenHandlingFails = "Error occurred while handling response from the Token Endpoint (during device flow)" + errStatusNotOK = "Error HTTP status != 200" +) + +// DeviceCode is the object returned by the device auth endpoint +// It contains information to instruct the user to complete the auth flow +type DeviceCode struct { + DeviceCode *string `json:"device_code,omitempty"` + UserCode *string `json:"user_code,omitempty"` + VerificationURL *string `json:"verification_url,omitempty"` + ExpiresIn *int64 `json:"expires_in,string,omitempty"` + Interval *int64 `json:"interval,string,omitempty"` + + Message *string `json:"message"` // Azure specific + Resource string // store the following, stored when initiating, used when exchanging + OAuthConfig OAuthConfig + ClientID string +} + +// TokenError is the object returned by the token exchange endpoint +// when something is amiss +type TokenError struct { + Error *string `json:"error,omitempty"` + ErrorCodes []int `json:"error_codes,omitempty"` + ErrorDescription *string `json:"error_description,omitempty"` + Timestamp *string `json:"timestamp,omitempty"` + TraceID *string `json:"trace_id,omitempty"` +} + +// DeviceToken is the object return by the token exchange endpoint +// It can either look like a Token or an ErrorToken, so put both here +// and check for presence of "Error" to know if we are in error state +type deviceToken struct { + Token + TokenError +} + +// InitiateDeviceAuth initiates a device auth flow. It returns a DeviceCode +// that can be used with CheckForUserCompletion or WaitForUserCompletion. +// Deprecated: use InitiateDeviceAuthWithContext() instead. +func InitiateDeviceAuth(sender Sender, oauthConfig OAuthConfig, clientID, resource string) (*DeviceCode, error) { + return InitiateDeviceAuthWithContext(context.Background(), sender, oauthConfig, clientID, resource) +} + +// InitiateDeviceAuthWithContext initiates a device auth flow. It returns a DeviceCode +// that can be used with CheckForUserCompletion or WaitForUserCompletion. +func InitiateDeviceAuthWithContext(ctx context.Context, sender Sender, oauthConfig OAuthConfig, clientID, resource string) (*DeviceCode, error) { + v := url.Values{ + "client_id": []string{clientID}, + "resource": []string{resource}, + } + + s := v.Encode() + body := ioutil.NopCloser(strings.NewReader(s)) + + req, err := http.NewRequest(http.MethodPost, oauthConfig.DeviceCodeEndpoint.String(), body) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeSendingFails, err.Error()) + } + + req.ContentLength = int64(len(s)) + req.Header.Set(contentType, mimeTypeFormPost) + resp, err := sender.Do(req.WithContext(ctx)) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeSendingFails, err.Error()) + } + defer resp.Body.Close() + + rb, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeHandlingFails, err.Error()) + } + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeHandlingFails, errStatusNotOK) + } + + if len(strings.Trim(string(rb), " ")) == 0 { + return nil, ErrDeviceCodeEmpty + } + + var code DeviceCode + err = json.Unmarshal(rb, &code) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeHandlingFails, err.Error()) + } + + code.ClientID = clientID + code.Resource = resource + code.OAuthConfig = oauthConfig + + return &code, nil +} + +// CheckForUserCompletion takes a DeviceCode and checks with the Azure AD OAuth endpoint +// to see if the device flow has: been completed, timed out, or otherwise failed +// Deprecated: use CheckForUserCompletionWithContext() instead. +func CheckForUserCompletion(sender Sender, code *DeviceCode) (*Token, error) { + return CheckForUserCompletionWithContext(context.Background(), sender, code) +} + +// CheckForUserCompletionWithContext takes a DeviceCode and checks with the Azure AD OAuth endpoint +// to see if the device flow has: been completed, timed out, or otherwise failed +func CheckForUserCompletionWithContext(ctx context.Context, sender Sender, code *DeviceCode) (*Token, error) { + v := url.Values{ + "client_id": []string{code.ClientID}, + "code": []string{*code.DeviceCode}, + "grant_type": []string{OAuthGrantTypeDeviceCode}, + "resource": []string{code.Resource}, + } + + s := v.Encode() + body := ioutil.NopCloser(strings.NewReader(s)) + + req, err := http.NewRequest(http.MethodPost, code.OAuthConfig.TokenEndpoint.String(), body) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenSendingFails, err.Error()) + } + + req.ContentLength = int64(len(s)) + req.Header.Set(contentType, mimeTypeFormPost) + resp, err := sender.Do(req.WithContext(ctx)) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenSendingFails, err.Error()) + } + defer resp.Body.Close() + + rb, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenHandlingFails, err.Error()) + } + + if resp.StatusCode != http.StatusOK && len(strings.Trim(string(rb), " ")) == 0 { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenHandlingFails, errStatusNotOK) + } + if len(strings.Trim(string(rb), " ")) == 0 { + return nil, ErrOAuthTokenEmpty + } + + var token deviceToken + err = json.Unmarshal(rb, &token) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenHandlingFails, err.Error()) + } + + if token.Error == nil { + return &token.Token, nil + } + + switch *token.Error { + case "authorization_pending": + return nil, ErrDeviceAuthorizationPending + case "slow_down": + return nil, ErrDeviceSlowDown + case "access_denied": + return nil, ErrDeviceAccessDenied + case "code_expired": + return nil, ErrDeviceCodeExpired + default: + // return a more meaningful error message if available + if token.ErrorDescription != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, *token.Error, *token.ErrorDescription) + } + return nil, ErrDeviceGeneric + } +} + +// WaitForUserCompletion calls CheckForUserCompletion repeatedly until a token is granted or an error state occurs. +// This prevents the user from looping and checking against 'ErrDeviceAuthorizationPending'. +// Deprecated: use WaitForUserCompletionWithContext() instead. +func WaitForUserCompletion(sender Sender, code *DeviceCode) (*Token, error) { + return WaitForUserCompletionWithContext(context.Background(), sender, code) +} + +// WaitForUserCompletionWithContext calls CheckForUserCompletion repeatedly until a token is granted or an error +// state occurs. This prevents the user from looping and checking against 'ErrDeviceAuthorizationPending'. +func WaitForUserCompletionWithContext(ctx context.Context, sender Sender, code *DeviceCode) (*Token, error) { + intervalDuration := time.Duration(*code.Interval) * time.Second + waitDuration := intervalDuration + + for { + token, err := CheckForUserCompletionWithContext(ctx, sender, code) + + if err == nil { + return token, nil + } + + switch err { + case ErrDeviceSlowDown: + waitDuration += waitDuration + case ErrDeviceAuthorizationPending: + // noop + default: // everything else is "fatal" to us + return nil, err + } + + if waitDuration > (intervalDuration * 3) { + return nil, fmt.Errorf("%s Error waiting for user to complete device flow. Server told us to slow_down too much", logPrefix) + } + + select { + case <-time.After(waitDuration): + // noop + case <-ctx.Done(): + return nil, ctx.Err() + } + } +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/go.mod b/vendor/github.com/Azure/go-autorest/autorest/adal/go.mod new file mode 100644 index 0000000..8c5d36c --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/go.mod @@ -0,0 +1,13 @@ +module github.com/Azure/go-autorest/autorest/adal + +go 1.12 + +require ( + github.com/Azure/go-autorest v14.2.0+incompatible + github.com/Azure/go-autorest/autorest/date v0.3.0 + github.com/Azure/go-autorest/autorest/mocks v0.4.1 + github.com/Azure/go-autorest/logger v0.2.1 + github.com/Azure/go-autorest/tracing v0.6.0 + github.com/form3tech-oss/jwt-go v3.2.2+incompatible + golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0 +) diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/go.sum b/vendor/github.com/Azure/go-autorest/autorest/adal/go.sum new file mode 100644 index 0000000..5ee68e7 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/go.sum @@ -0,0 +1,21 @@ +github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= +github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= +github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= +github.com/Azure/go-autorest/autorest/mocks v0.4.1 h1:K0laFcLE6VLTOwNgSxaGbUcLPuGXlNkbVvq4cW4nIHk= +github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= +github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+ZtXWSmf4Tg= +github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= +github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= +github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= +github.com/form3tech-oss/jwt-go v3.2.2+incompatible h1:TcekIExNqud5crz4xD2pavyTgWiPvpYe4Xau31I0PRk= +github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0 h1:hb9wdF1z5waM+dSIICn1l0DkLVDT3hqhhQsDNUmHPRE= +golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/go_mod_tidy_hack.go b/vendor/github.com/Azure/go-autorest/autorest/adal/go_mod_tidy_hack.go new file mode 100644 index 0000000..7551b79 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/go_mod_tidy_hack.go @@ -0,0 +1,24 @@ +// +build modhack + +package adal + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file, and the github.com/Azure/go-autorest import, won't actually become part of +// the resultant binary. + +// Necessary for safely adding multi-module repo. +// See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository +import _ "github.com/Azure/go-autorest" diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/persist.go b/vendor/github.com/Azure/go-autorest/autorest/adal/persist.go new file mode 100644 index 0000000..2a974a3 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/persist.go @@ -0,0 +1,135 @@ +package adal + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "crypto/rsa" + "crypto/x509" + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "os" + "path/filepath" + + "golang.org/x/crypto/pkcs12" +) + +var ( + // ErrMissingCertificate is returned when no local certificate is found in the provided PFX data. + ErrMissingCertificate = errors.New("adal: certificate missing") + + // ErrMissingPrivateKey is returned when no private key is found in the provided PFX data. + ErrMissingPrivateKey = errors.New("adal: private key missing") +) + +// LoadToken restores a Token object from a file located at 'path'. +func LoadToken(path string) (*Token, error) { + file, err := os.Open(path) + if err != nil { + return nil, fmt.Errorf("failed to open file (%s) while loading token: %v", path, err) + } + defer file.Close() + + var token Token + + dec := json.NewDecoder(file) + if err = dec.Decode(&token); err != nil { + return nil, fmt.Errorf("failed to decode contents of file (%s) into Token representation: %v", path, err) + } + return &token, nil +} + +// SaveToken persists an oauth token at the given location on disk. +// It moves the new file into place so it can safely be used to replace an existing file +// that maybe accessed by multiple processes. +func SaveToken(path string, mode os.FileMode, token Token) error { + dir := filepath.Dir(path) + err := os.MkdirAll(dir, os.ModePerm) + if err != nil { + return fmt.Errorf("failed to create directory (%s) to store token in: %v", dir, err) + } + + newFile, err := ioutil.TempFile(dir, "token") + if err != nil { + return fmt.Errorf("failed to create the temp file to write the token: %v", err) + } + tempPath := newFile.Name() + + if err := json.NewEncoder(newFile).Encode(token); err != nil { + return fmt.Errorf("failed to encode token to file (%s) while saving token: %v", tempPath, err) + } + if err := newFile.Close(); err != nil { + return fmt.Errorf("failed to close temp file %s: %v", tempPath, err) + } + + // Atomic replace to avoid multi-writer file corruptions + if err := os.Rename(tempPath, path); err != nil { + return fmt.Errorf("failed to move temporary token to desired output location. src=%s dst=%s: %v", tempPath, path, err) + } + if err := os.Chmod(path, mode); err != nil { + return fmt.Errorf("failed to chmod the token file %s: %v", path, err) + } + return nil +} + +// DecodePfxCertificateData extracts the x509 certificate and RSA private key from the provided PFX data. +// The PFX data must contain a private key along with a certificate whose public key matches that of the +// private key or an error is returned. +// If the private key is not password protected pass the empty string for password. +func DecodePfxCertificateData(pfxData []byte, password string) (*x509.Certificate, *rsa.PrivateKey, error) { + blocks, err := pkcs12.ToPEM(pfxData, password) + if err != nil { + return nil, nil, err + } + // first extract the private key + var priv *rsa.PrivateKey + for _, block := range blocks { + if block.Type == "PRIVATE KEY" { + priv, err = x509.ParsePKCS1PrivateKey(block.Bytes) + if err != nil { + return nil, nil, err + } + break + } + } + if priv == nil { + return nil, nil, ErrMissingPrivateKey + } + // now find the certificate with the matching public key of our private key + var cert *x509.Certificate + for _, block := range blocks { + if block.Type == "CERTIFICATE" { + pcert, err := x509.ParseCertificate(block.Bytes) + if err != nil { + return nil, nil, err + } + certKey, ok := pcert.PublicKey.(*rsa.PublicKey) + if !ok { + // keep looking + continue + } + if priv.E == certKey.E && priv.N.Cmp(certKey.N) == 0 { + // found a match + cert = pcert + break + } + } + } + if cert == nil { + return nil, nil, ErrMissingCertificate + } + return cert, priv, nil +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/sender.go b/vendor/github.com/Azure/go-autorest/autorest/adal/sender.go new file mode 100644 index 0000000..1826a68 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/sender.go @@ -0,0 +1,96 @@ +package adal + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "crypto/tls" + "net/http" + "net/http/cookiejar" + "sync" + + "github.com/Azure/go-autorest/tracing" +) + +const ( + contentType = "Content-Type" + mimeTypeFormPost = "application/x-www-form-urlencoded" +) + +// DO NOT ACCESS THIS DIRECTLY. go through sender() +var defaultSender Sender +var defaultSenderInit = &sync.Once{} + +// Sender is the interface that wraps the Do method to send HTTP requests. +// +// The standard http.Client conforms to this interface. +type Sender interface { + Do(*http.Request) (*http.Response, error) +} + +// SenderFunc is a method that implements the Sender interface. +type SenderFunc func(*http.Request) (*http.Response, error) + +// Do implements the Sender interface on SenderFunc. +func (sf SenderFunc) Do(r *http.Request) (*http.Response, error) { + return sf(r) +} + +// SendDecorator takes and possibly decorates, by wrapping, a Sender. Decorators may affect the +// http.Request and pass it along or, first, pass the http.Request along then react to the +// http.Response result. +type SendDecorator func(Sender) Sender + +// CreateSender creates, decorates, and returns, as a Sender, the default http.Client. +func CreateSender(decorators ...SendDecorator) Sender { + return DecorateSender(sender(), decorators...) +} + +// DecorateSender accepts a Sender and a, possibly empty, set of SendDecorators, which is applies to +// the Sender. Decorators are applied in the order received, but their affect upon the request +// depends on whether they are a pre-decorator (change the http.Request and then pass it along) or a +// post-decorator (pass the http.Request along and react to the results in http.Response). +func DecorateSender(s Sender, decorators ...SendDecorator) Sender { + for _, decorate := range decorators { + s = decorate(s) + } + return s +} + +func sender() Sender { + // note that we can't init defaultSender in init() since it will + // execute before calling code has had a chance to enable tracing + defaultSenderInit.Do(func() { + // Use behaviour compatible with DefaultTransport, but require TLS minimum version. + defaultTransport := http.DefaultTransport.(*http.Transport) + transport := &http.Transport{ + Proxy: defaultTransport.Proxy, + DialContext: defaultTransport.DialContext, + MaxIdleConns: defaultTransport.MaxIdleConns, + IdleConnTimeout: defaultTransport.IdleConnTimeout, + TLSHandshakeTimeout: defaultTransport.TLSHandshakeTimeout, + ExpectContinueTimeout: defaultTransport.ExpectContinueTimeout, + TLSClientConfig: &tls.Config{ + MinVersion: tls.VersionTLS12, + }, + } + var roundTripper http.RoundTripper = transport + if tracing.IsEnabled() { + roundTripper = tracing.NewTransport(transport) + } + j, _ := cookiejar.New(nil) + defaultSender = &http.Client{Jar: j, Transport: roundTripper} + }) + return defaultSender +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/token.go b/vendor/github.com/Azure/go-autorest/autorest/adal/token.go new file mode 100644 index 0000000..c870ef4 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/token.go @@ -0,0 +1,1336 @@ +package adal + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "context" + "crypto/rand" + "crypto/rsa" + "crypto/sha1" + "crypto/x509" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "math" + "net/http" + "net/url" + "os" + "strconv" + "strings" + "sync" + "time" + + "github.com/Azure/go-autorest/autorest/date" + "github.com/Azure/go-autorest/logger" + "github.com/form3tech-oss/jwt-go" +) + +const ( + defaultRefresh = 5 * time.Minute + + // OAuthGrantTypeDeviceCode is the "grant_type" identifier used in device flow + OAuthGrantTypeDeviceCode = "device_code" + + // OAuthGrantTypeClientCredentials is the "grant_type" identifier used in credential flows + OAuthGrantTypeClientCredentials = "client_credentials" + + // OAuthGrantTypeUserPass is the "grant_type" identifier used in username and password auth flows + OAuthGrantTypeUserPass = "password" + + // OAuthGrantTypeRefreshToken is the "grant_type" identifier used in refresh token flows + OAuthGrantTypeRefreshToken = "refresh_token" + + // OAuthGrantTypeAuthorizationCode is the "grant_type" identifier used in authorization code flows + OAuthGrantTypeAuthorizationCode = "authorization_code" + + // metadataHeader is the header required by MSI extension + metadataHeader = "Metadata" + + // msiEndpoint is the well known endpoint for getting MSI authentications tokens + msiEndpoint = "http://169.254.169.254/metadata/identity/oauth2/token" + + // the API version to use for the MSI endpoint + msiAPIVersion = "2018-02-01" + + // the default number of attempts to refresh an MSI authentication token + defaultMaxMSIRefreshAttempts = 5 + + // asMSIEndpointEnv is the environment variable used to store the endpoint on App Service and Functions + msiEndpointEnv = "MSI_ENDPOINT" + + // asMSISecretEnv is the environment variable used to store the request secret on App Service and Functions + msiSecretEnv = "MSI_SECRET" + + // the API version to use for the legacy App Service MSI endpoint + appServiceAPIVersion2017 = "2017-09-01" + + // secret header used when authenticating against app service MSI endpoint + secretHeader = "Secret" + + // the format for expires_on in UTC with AM/PM + expiresOnDateFormatPM = "1/2/2006 15:04:05 PM +00:00" + + // the format for expires_on in UTC without AM/PM + expiresOnDateFormat = "1/2/2006 15:04:05 +00:00" +) + +// OAuthTokenProvider is an interface which should be implemented by an access token retriever +type OAuthTokenProvider interface { + OAuthToken() string +} + +// MultitenantOAuthTokenProvider provides tokens used for multi-tenant authorization. +type MultitenantOAuthTokenProvider interface { + PrimaryOAuthToken() string + AuxiliaryOAuthTokens() []string +} + +// TokenRefreshError is an interface used by errors returned during token refresh. +type TokenRefreshError interface { + error + Response() *http.Response +} + +// Refresher is an interface for token refresh functionality +type Refresher interface { + Refresh() error + RefreshExchange(resource string) error + EnsureFresh() error +} + +// RefresherWithContext is an interface for token refresh functionality +type RefresherWithContext interface { + RefreshWithContext(ctx context.Context) error + RefreshExchangeWithContext(ctx context.Context, resource string) error + EnsureFreshWithContext(ctx context.Context) error +} + +// TokenRefreshCallback is the type representing callbacks that will be called after +// a successful token refresh +type TokenRefreshCallback func(Token) error + +// TokenRefresh is a type representing a custom callback to refresh a token +type TokenRefresh func(ctx context.Context, resource string) (*Token, error) + +// Token encapsulates the access token used to authorize Azure requests. +// https://docs.microsoft.com/en-us/azure/active-directory/develop/v1-oauth2-client-creds-grant-flow#service-to-service-access-token-response +type Token struct { + AccessToken string `json:"access_token"` + RefreshToken string `json:"refresh_token"` + + ExpiresIn json.Number `json:"expires_in"` + ExpiresOn json.Number `json:"expires_on"` + NotBefore json.Number `json:"not_before"` + + Resource string `json:"resource"` + Type string `json:"token_type"` +} + +func newToken() Token { + return Token{ + ExpiresIn: "0", + ExpiresOn: "0", + NotBefore: "0", + } +} + +// IsZero returns true if the token object is zero-initialized. +func (t Token) IsZero() bool { + return t == Token{} +} + +// Expires returns the time.Time when the Token expires. +func (t Token) Expires() time.Time { + s, err := t.ExpiresOn.Float64() + if err != nil { + s = -3600 + } + + expiration := date.NewUnixTimeFromSeconds(s) + + return time.Time(expiration).UTC() +} + +// IsExpired returns true if the Token is expired, false otherwise. +func (t Token) IsExpired() bool { + return t.WillExpireIn(0) +} + +// WillExpireIn returns true if the Token will expire after the passed time.Duration interval +// from now, false otherwise. +func (t Token) WillExpireIn(d time.Duration) bool { + return !t.Expires().After(time.Now().Add(d)) +} + +//OAuthToken return the current access token +func (t *Token) OAuthToken() string { + return t.AccessToken +} + +// ServicePrincipalSecret is an interface that allows various secret mechanism to fill the form +// that is submitted when acquiring an oAuth token. +type ServicePrincipalSecret interface { + SetAuthenticationValues(spt *ServicePrincipalToken, values *url.Values) error +} + +// ServicePrincipalNoSecret represents a secret type that contains no secret +// meaning it is not valid for fetching a fresh token. This is used by Manual +type ServicePrincipalNoSecret struct { +} + +// SetAuthenticationValues is a method of the interface ServicePrincipalSecret +// It only returns an error for the ServicePrincipalNoSecret type +func (noSecret *ServicePrincipalNoSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { + return fmt.Errorf("Manually created ServicePrincipalToken does not contain secret material to retrieve a new access token") +} + +// MarshalJSON implements the json.Marshaler interface. +func (noSecret ServicePrincipalNoSecret) MarshalJSON() ([]byte, error) { + type tokenType struct { + Type string `json:"type"` + } + return json.Marshal(tokenType{ + Type: "ServicePrincipalNoSecret", + }) +} + +// ServicePrincipalTokenSecret implements ServicePrincipalSecret for client_secret type authorization. +type ServicePrincipalTokenSecret struct { + ClientSecret string `json:"value"` +} + +// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. +// It will populate the form submitted during oAuth Token Acquisition using the client_secret. +func (tokenSecret *ServicePrincipalTokenSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { + v.Set("client_secret", tokenSecret.ClientSecret) + return nil +} + +// MarshalJSON implements the json.Marshaler interface. +func (tokenSecret ServicePrincipalTokenSecret) MarshalJSON() ([]byte, error) { + type tokenType struct { + Type string `json:"type"` + Value string `json:"value"` + } + return json.Marshal(tokenType{ + Type: "ServicePrincipalTokenSecret", + Value: tokenSecret.ClientSecret, + }) +} + +// ServicePrincipalCertificateSecret implements ServicePrincipalSecret for generic RSA cert auth with signed JWTs. +type ServicePrincipalCertificateSecret struct { + Certificate *x509.Certificate + PrivateKey *rsa.PrivateKey +} + +// SignJwt returns the JWT signed with the certificate's private key. +func (secret *ServicePrincipalCertificateSecret) SignJwt(spt *ServicePrincipalToken) (string, error) { + hasher := sha1.New() + _, err := hasher.Write(secret.Certificate.Raw) + if err != nil { + return "", err + } + + thumbprint := base64.URLEncoding.EncodeToString(hasher.Sum(nil)) + + // The jti (JWT ID) claim provides a unique identifier for the JWT. + jti := make([]byte, 20) + _, err = rand.Read(jti) + if err != nil { + return "", err + } + + token := jwt.New(jwt.SigningMethodRS256) + token.Header["x5t"] = thumbprint + x5c := []string{base64.StdEncoding.EncodeToString(secret.Certificate.Raw)} + token.Header["x5c"] = x5c + token.Claims = jwt.MapClaims{ + "aud": spt.inner.OauthConfig.TokenEndpoint.String(), + "iss": spt.inner.ClientID, + "sub": spt.inner.ClientID, + "jti": base64.URLEncoding.EncodeToString(jti), + "nbf": time.Now().Unix(), + "exp": time.Now().Add(24 * time.Hour).Unix(), + } + + signedString, err := token.SignedString(secret.PrivateKey) + return signedString, err +} + +// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. +// It will populate the form submitted during oAuth Token Acquisition using a JWT signed with a certificate. +func (secret *ServicePrincipalCertificateSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { + jwt, err := secret.SignJwt(spt) + if err != nil { + return err + } + + v.Set("client_assertion", jwt) + v.Set("client_assertion_type", "urn:ietf:params:oauth:client-assertion-type:jwt-bearer") + return nil +} + +// MarshalJSON implements the json.Marshaler interface. +func (secret ServicePrincipalCertificateSecret) MarshalJSON() ([]byte, error) { + return nil, errors.New("marshalling ServicePrincipalCertificateSecret is not supported") +} + +// ServicePrincipalMSISecret implements ServicePrincipalSecret for machines running the MSI Extension. +type ServicePrincipalMSISecret struct { + msiType msiType + clientResourceID string +} + +// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. +func (msiSecret *ServicePrincipalMSISecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { + return nil +} + +// MarshalJSON implements the json.Marshaler interface. +func (msiSecret ServicePrincipalMSISecret) MarshalJSON() ([]byte, error) { + return nil, errors.New("marshalling ServicePrincipalMSISecret is not supported") +} + +// ServicePrincipalUsernamePasswordSecret implements ServicePrincipalSecret for username and password auth. +type ServicePrincipalUsernamePasswordSecret struct { + Username string `json:"username"` + Password string `json:"password"` +} + +// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. +func (secret *ServicePrincipalUsernamePasswordSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { + v.Set("username", secret.Username) + v.Set("password", secret.Password) + return nil +} + +// MarshalJSON implements the json.Marshaler interface. +func (secret ServicePrincipalUsernamePasswordSecret) MarshalJSON() ([]byte, error) { + type tokenType struct { + Type string `json:"type"` + Username string `json:"username"` + Password string `json:"password"` + } + return json.Marshal(tokenType{ + Type: "ServicePrincipalUsernamePasswordSecret", + Username: secret.Username, + Password: secret.Password, + }) +} + +// ServicePrincipalAuthorizationCodeSecret implements ServicePrincipalSecret for authorization code auth. +type ServicePrincipalAuthorizationCodeSecret struct { + ClientSecret string `json:"value"` + AuthorizationCode string `json:"authCode"` + RedirectURI string `json:"redirect"` +} + +// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. +func (secret *ServicePrincipalAuthorizationCodeSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { + v.Set("code", secret.AuthorizationCode) + v.Set("client_secret", secret.ClientSecret) + v.Set("redirect_uri", secret.RedirectURI) + return nil +} + +// MarshalJSON implements the json.Marshaler interface. +func (secret ServicePrincipalAuthorizationCodeSecret) MarshalJSON() ([]byte, error) { + type tokenType struct { + Type string `json:"type"` + Value string `json:"value"` + AuthCode string `json:"authCode"` + Redirect string `json:"redirect"` + } + return json.Marshal(tokenType{ + Type: "ServicePrincipalAuthorizationCodeSecret", + Value: secret.ClientSecret, + AuthCode: secret.AuthorizationCode, + Redirect: secret.RedirectURI, + }) +} + +// ServicePrincipalToken encapsulates a Token created for a Service Principal. +type ServicePrincipalToken struct { + inner servicePrincipalToken + refreshLock *sync.RWMutex + sender Sender + customRefreshFunc TokenRefresh + refreshCallbacks []TokenRefreshCallback + // MaxMSIRefreshAttempts is the maximum number of attempts to refresh an MSI token. + // Settings this to a value less than 1 will use the default value. + MaxMSIRefreshAttempts int +} + +// MarshalTokenJSON returns the marshalled inner token. +func (spt ServicePrincipalToken) MarshalTokenJSON() ([]byte, error) { + return json.Marshal(spt.inner.Token) +} + +// SetRefreshCallbacks replaces any existing refresh callbacks with the specified callbacks. +func (spt *ServicePrincipalToken) SetRefreshCallbacks(callbacks []TokenRefreshCallback) { + spt.refreshCallbacks = callbacks +} + +// SetCustomRefreshFunc sets a custom refresh function used to refresh the token. +func (spt *ServicePrincipalToken) SetCustomRefreshFunc(customRefreshFunc TokenRefresh) { + spt.customRefreshFunc = customRefreshFunc +} + +// MarshalJSON implements the json.Marshaler interface. +func (spt ServicePrincipalToken) MarshalJSON() ([]byte, error) { + return json.Marshal(spt.inner) +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (spt *ServicePrincipalToken) UnmarshalJSON(data []byte) error { + // need to determine the token type + raw := map[string]interface{}{} + err := json.Unmarshal(data, &raw) + if err != nil { + return err + } + secret := raw["secret"].(map[string]interface{}) + switch secret["type"] { + case "ServicePrincipalNoSecret": + spt.inner.Secret = &ServicePrincipalNoSecret{} + case "ServicePrincipalTokenSecret": + spt.inner.Secret = &ServicePrincipalTokenSecret{} + case "ServicePrincipalCertificateSecret": + return errors.New("unmarshalling ServicePrincipalCertificateSecret is not supported") + case "ServicePrincipalMSISecret": + return errors.New("unmarshalling ServicePrincipalMSISecret is not supported") + case "ServicePrincipalUsernamePasswordSecret": + spt.inner.Secret = &ServicePrincipalUsernamePasswordSecret{} + case "ServicePrincipalAuthorizationCodeSecret": + spt.inner.Secret = &ServicePrincipalAuthorizationCodeSecret{} + default: + return fmt.Errorf("unrecognized token type '%s'", secret["type"]) + } + err = json.Unmarshal(data, &spt.inner) + if err != nil { + return err + } + // Don't override the refreshLock or the sender if those have been already set. + if spt.refreshLock == nil { + spt.refreshLock = &sync.RWMutex{} + } + if spt.sender == nil { + spt.sender = sender() + } + return nil +} + +// internal type used for marshalling/unmarshalling +type servicePrincipalToken struct { + Token Token `json:"token"` + Secret ServicePrincipalSecret `json:"secret"` + OauthConfig OAuthConfig `json:"oauth"` + ClientID string `json:"clientID"` + Resource string `json:"resource"` + AutoRefresh bool `json:"autoRefresh"` + RefreshWithin time.Duration `json:"refreshWithin"` +} + +func validateOAuthConfig(oac OAuthConfig) error { + if oac.IsZero() { + return fmt.Errorf("parameter 'oauthConfig' cannot be zero-initialized") + } + return nil +} + +// NewServicePrincipalTokenWithSecret create a ServicePrincipalToken using the supplied ServicePrincipalSecret implementation. +func NewServicePrincipalTokenWithSecret(oauthConfig OAuthConfig, id string, resource string, secret ServicePrincipalSecret, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if err := validateOAuthConfig(oauthConfig); err != nil { + return nil, err + } + if err := validateStringParam(id, "id"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + if secret == nil { + return nil, fmt.Errorf("parameter 'secret' cannot be nil") + } + spt := &ServicePrincipalToken{ + inner: servicePrincipalToken{ + Token: newToken(), + OauthConfig: oauthConfig, + Secret: secret, + ClientID: id, + Resource: resource, + AutoRefresh: true, + RefreshWithin: defaultRefresh, + }, + refreshLock: &sync.RWMutex{}, + sender: sender(), + refreshCallbacks: callbacks, + } + return spt, nil +} + +// NewServicePrincipalTokenFromManualToken creates a ServicePrincipalToken using the supplied token +func NewServicePrincipalTokenFromManualToken(oauthConfig OAuthConfig, clientID string, resource string, token Token, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if err := validateOAuthConfig(oauthConfig); err != nil { + return nil, err + } + if err := validateStringParam(clientID, "clientID"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + if token.IsZero() { + return nil, fmt.Errorf("parameter 'token' cannot be zero-initialized") + } + spt, err := NewServicePrincipalTokenWithSecret( + oauthConfig, + clientID, + resource, + &ServicePrincipalNoSecret{}, + callbacks...) + if err != nil { + return nil, err + } + + spt.inner.Token = token + + return spt, nil +} + +// NewServicePrincipalTokenFromManualTokenSecret creates a ServicePrincipalToken using the supplied token and secret +func NewServicePrincipalTokenFromManualTokenSecret(oauthConfig OAuthConfig, clientID string, resource string, token Token, secret ServicePrincipalSecret, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if err := validateOAuthConfig(oauthConfig); err != nil { + return nil, err + } + if err := validateStringParam(clientID, "clientID"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + if secret == nil { + return nil, fmt.Errorf("parameter 'secret' cannot be nil") + } + if token.IsZero() { + return nil, fmt.Errorf("parameter 'token' cannot be zero-initialized") + } + spt, err := NewServicePrincipalTokenWithSecret( + oauthConfig, + clientID, + resource, + secret, + callbacks...) + if err != nil { + return nil, err + } + + spt.inner.Token = token + + return spt, nil +} + +// NewServicePrincipalToken creates a ServicePrincipalToken from the supplied Service Principal +// credentials scoped to the named resource. +func NewServicePrincipalToken(oauthConfig OAuthConfig, clientID string, secret string, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if err := validateOAuthConfig(oauthConfig); err != nil { + return nil, err + } + if err := validateStringParam(clientID, "clientID"); err != nil { + return nil, err + } + if err := validateStringParam(secret, "secret"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + return NewServicePrincipalTokenWithSecret( + oauthConfig, + clientID, + resource, + &ServicePrincipalTokenSecret{ + ClientSecret: secret, + }, + callbacks..., + ) +} + +// NewServicePrincipalTokenFromCertificate creates a ServicePrincipalToken from the supplied pkcs12 bytes. +func NewServicePrincipalTokenFromCertificate(oauthConfig OAuthConfig, clientID string, certificate *x509.Certificate, privateKey *rsa.PrivateKey, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if err := validateOAuthConfig(oauthConfig); err != nil { + return nil, err + } + if err := validateStringParam(clientID, "clientID"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + if certificate == nil { + return nil, fmt.Errorf("parameter 'certificate' cannot be nil") + } + if privateKey == nil { + return nil, fmt.Errorf("parameter 'privateKey' cannot be nil") + } + return NewServicePrincipalTokenWithSecret( + oauthConfig, + clientID, + resource, + &ServicePrincipalCertificateSecret{ + PrivateKey: privateKey, + Certificate: certificate, + }, + callbacks..., + ) +} + +// NewServicePrincipalTokenFromUsernamePassword creates a ServicePrincipalToken from the username and password. +func NewServicePrincipalTokenFromUsernamePassword(oauthConfig OAuthConfig, clientID string, username string, password string, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if err := validateOAuthConfig(oauthConfig); err != nil { + return nil, err + } + if err := validateStringParam(clientID, "clientID"); err != nil { + return nil, err + } + if err := validateStringParam(username, "username"); err != nil { + return nil, err + } + if err := validateStringParam(password, "password"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + return NewServicePrincipalTokenWithSecret( + oauthConfig, + clientID, + resource, + &ServicePrincipalUsernamePasswordSecret{ + Username: username, + Password: password, + }, + callbacks..., + ) +} + +// NewServicePrincipalTokenFromAuthorizationCode creates a ServicePrincipalToken from the +func NewServicePrincipalTokenFromAuthorizationCode(oauthConfig OAuthConfig, clientID string, clientSecret string, authorizationCode string, redirectURI string, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + + if err := validateOAuthConfig(oauthConfig); err != nil { + return nil, err + } + if err := validateStringParam(clientID, "clientID"); err != nil { + return nil, err + } + if err := validateStringParam(clientSecret, "clientSecret"); err != nil { + return nil, err + } + if err := validateStringParam(authorizationCode, "authorizationCode"); err != nil { + return nil, err + } + if err := validateStringParam(redirectURI, "redirectURI"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + + return NewServicePrincipalTokenWithSecret( + oauthConfig, + clientID, + resource, + &ServicePrincipalAuthorizationCodeSecret{ + ClientSecret: clientSecret, + AuthorizationCode: authorizationCode, + RedirectURI: redirectURI, + }, + callbacks..., + ) +} + +type msiType int + +const ( + msiTypeUnavailable msiType = iota + msiTypeAppServiceV20170901 + msiTypeCloudShell + msiTypeIMDS +) + +func (m msiType) String() string { + switch m { + case msiTypeUnavailable: + return "unavailable" + case msiTypeAppServiceV20170901: + return "AppServiceV20170901" + case msiTypeCloudShell: + return "CloudShell" + case msiTypeIMDS: + return "IMDS" + default: + return fmt.Sprintf("unhandled MSI type %d", m) + } +} + +// returns the MSI type and endpoint, or an error +func getMSIType() (msiType, string, error) { + if endpointEnvVar := os.Getenv(msiEndpointEnv); endpointEnvVar != "" { + // if the env var MSI_ENDPOINT is set + if secretEnvVar := os.Getenv(msiSecretEnv); secretEnvVar != "" { + // if BOTH the env vars MSI_ENDPOINT and MSI_SECRET are set the msiType is AppService + return msiTypeAppServiceV20170901, endpointEnvVar, nil + } + // if ONLY the env var MSI_ENDPOINT is set the msiType is CloudShell + return msiTypeCloudShell, endpointEnvVar, nil + } else if msiAvailableHook(context.Background(), sender()) { + // if MSI_ENDPOINT is NOT set AND the IMDS endpoint is available the msiType is IMDS. This will timeout after 500 milliseconds + return msiTypeIMDS, msiEndpoint, nil + } else { + // if MSI_ENDPOINT is NOT set and IMDS endpoint is not available Managed Identity is not available + return msiTypeUnavailable, "", errors.New("MSI not available") + } +} + +// GetMSIVMEndpoint gets the MSI endpoint on Virtual Machines. +// NOTE: this always returns the IMDS endpoint, it does not work for app services or cloud shell. +// Deprecated: NewServicePrincipalTokenFromMSI() and variants will automatically detect the endpoint. +func GetMSIVMEndpoint() (string, error) { + return msiEndpoint, nil +} + +// GetMSIAppServiceEndpoint get the MSI endpoint for App Service and Functions. +// It will return an error when not running in an app service/functions environment. +// Deprecated: NewServicePrincipalTokenFromMSI() and variants will automatically detect the endpoint. +func GetMSIAppServiceEndpoint() (string, error) { + msiType, endpoint, err := getMSIType() + if err != nil { + return "", err + } + switch msiType { + case msiTypeAppServiceV20170901: + return endpoint, nil + default: + return "", fmt.Errorf("%s is not app service environment", msiType) + } +} + +// GetMSIEndpoint get the appropriate MSI endpoint depending on the runtime environment +// Deprecated: NewServicePrincipalTokenFromMSI() and variants will automatically detect the endpoint. +func GetMSIEndpoint() (string, error) { + _, endpoint, err := getMSIType() + return endpoint, err +} + +// NewServicePrincipalTokenFromMSI creates a ServicePrincipalToken via the MSI VM Extension. +// It will use the system assigned identity when creating the token. +// msiEndpoint - empty string, or pass a non-empty string to override the default value. +// Deprecated: use NewServicePrincipalTokenFromManagedIdentity() instead. +func NewServicePrincipalTokenFromMSI(msiEndpoint, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + return newServicePrincipalTokenFromMSI(msiEndpoint, resource, "", "", callbacks...) +} + +// NewServicePrincipalTokenFromMSIWithUserAssignedID creates a ServicePrincipalToken via the MSI VM Extension. +// It will use the clientID of specified user assigned identity when creating the token. +// msiEndpoint - empty string, or pass a non-empty string to override the default value. +// Deprecated: use NewServicePrincipalTokenFromManagedIdentity() instead. +func NewServicePrincipalTokenFromMSIWithUserAssignedID(msiEndpoint, resource string, userAssignedID string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if err := validateStringParam(userAssignedID, "userAssignedID"); err != nil { + return nil, err + } + return newServicePrincipalTokenFromMSI(msiEndpoint, resource, userAssignedID, "", callbacks...) +} + +// NewServicePrincipalTokenFromMSIWithIdentityResourceID creates a ServicePrincipalToken via the MSI VM Extension. +// It will use the azure resource id of user assigned identity when creating the token. +// msiEndpoint - empty string, or pass a non-empty string to override the default value. +// Deprecated: use NewServicePrincipalTokenFromManagedIdentity() instead. +func NewServicePrincipalTokenFromMSIWithIdentityResourceID(msiEndpoint, resource string, identityResourceID string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if err := validateStringParam(identityResourceID, "identityResourceID"); err != nil { + return nil, err + } + return newServicePrincipalTokenFromMSI(msiEndpoint, resource, "", identityResourceID, callbacks...) +} + +// ManagedIdentityOptions contains optional values for configuring managed identity authentication. +type ManagedIdentityOptions struct { + // ClientID is the user-assigned identity to use during authentication. + // It is mutually exclusive with IdentityResourceID. + ClientID string + + // IdentityResourceID is the resource ID of the user-assigned identity to use during authentication. + // It is mutually exclusive with ClientID. + IdentityResourceID string +} + +// NewServicePrincipalTokenFromManagedIdentity creates a ServicePrincipalToken using a managed identity. +// It supports the following managed identity environments. +// - App Service Environment (API version 2017-09-01 only) +// - Cloud shell +// - IMDS with a system or user assigned identity +func NewServicePrincipalTokenFromManagedIdentity(resource string, options *ManagedIdentityOptions, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if options == nil { + options = &ManagedIdentityOptions{} + } + return newServicePrincipalTokenFromMSI("", resource, options.ClientID, options.IdentityResourceID, callbacks...) +} + +func newServicePrincipalTokenFromMSI(msiEndpoint, resource, userAssignedID, identityResourceID string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + if userAssignedID != "" && identityResourceID != "" { + return nil, errors.New("cannot specify userAssignedID and identityResourceID") + } + msiType, endpoint, err := getMSIType() + if err != nil { + logger.Instance.Writef(logger.LogError, "Error determining managed identity environment: %v", err) + return nil, err + } + logger.Instance.Writef(logger.LogInfo, "Managed identity environment is %s, endpoint is %s", msiType, endpoint) + if msiEndpoint != "" { + endpoint = msiEndpoint + logger.Instance.Writef(logger.LogInfo, "Managed identity custom endpoint is %s", endpoint) + } + msiEndpointURL, err := url.Parse(endpoint) + if err != nil { + return nil, err + } + // cloud shell sends its data in the request body + if msiType != msiTypeCloudShell { + v := url.Values{} + v.Set("resource", resource) + clientIDParam := "client_id" + switch msiType { + case msiTypeAppServiceV20170901: + clientIDParam = "clientid" + v.Set("api-version", appServiceAPIVersion2017) + break + case msiTypeIMDS: + v.Set("api-version", msiAPIVersion) + } + if userAssignedID != "" { + v.Set(clientIDParam, userAssignedID) + } else if identityResourceID != "" { + v.Set("mi_res_id", identityResourceID) + } + msiEndpointURL.RawQuery = v.Encode() + } + + spt := &ServicePrincipalToken{ + inner: servicePrincipalToken{ + Token: newToken(), + OauthConfig: OAuthConfig{ + TokenEndpoint: *msiEndpointURL, + }, + Secret: &ServicePrincipalMSISecret{ + msiType: msiType, + clientResourceID: identityResourceID, + }, + Resource: resource, + AutoRefresh: true, + RefreshWithin: defaultRefresh, + ClientID: userAssignedID, + }, + refreshLock: &sync.RWMutex{}, + sender: sender(), + refreshCallbacks: callbacks, + MaxMSIRefreshAttempts: defaultMaxMSIRefreshAttempts, + } + + return spt, nil +} + +// internal type that implements TokenRefreshError +type tokenRefreshError struct { + message string + resp *http.Response +} + +// Error implements the error interface which is part of the TokenRefreshError interface. +func (tre tokenRefreshError) Error() string { + return tre.message +} + +// Response implements the TokenRefreshError interface, it returns the raw HTTP response from the refresh operation. +func (tre tokenRefreshError) Response() *http.Response { + return tre.resp +} + +func newTokenRefreshError(message string, resp *http.Response) TokenRefreshError { + return tokenRefreshError{message: message, resp: resp} +} + +// EnsureFresh will refresh the token if it will expire within the refresh window (as set by +// RefreshWithin) and autoRefresh flag is on. This method is safe for concurrent use. +func (spt *ServicePrincipalToken) EnsureFresh() error { + return spt.EnsureFreshWithContext(context.Background()) +} + +// EnsureFreshWithContext will refresh the token if it will expire within the refresh window (as set by +// RefreshWithin) and autoRefresh flag is on. This method is safe for concurrent use. +func (spt *ServicePrincipalToken) EnsureFreshWithContext(ctx context.Context) error { + // must take the read lock when initially checking the token's expiration + if spt.inner.AutoRefresh && spt.Token().WillExpireIn(spt.inner.RefreshWithin) { + // take the write lock then check again to see if the token was already refreshed + spt.refreshLock.Lock() + defer spt.refreshLock.Unlock() + if spt.inner.Token.WillExpireIn(spt.inner.RefreshWithin) { + return spt.refreshInternal(ctx, spt.inner.Resource) + } + } + return nil +} + +// InvokeRefreshCallbacks calls any TokenRefreshCallbacks that were added to the SPT during initialization +func (spt *ServicePrincipalToken) InvokeRefreshCallbacks(token Token) error { + if spt.refreshCallbacks != nil { + for _, callback := range spt.refreshCallbacks { + err := callback(spt.inner.Token) + if err != nil { + return fmt.Errorf("adal: TokenRefreshCallback handler failed. Error = '%v'", err) + } + } + } + return nil +} + +// Refresh obtains a fresh token for the Service Principal. +// This method is safe for concurrent use. +func (spt *ServicePrincipalToken) Refresh() error { + return spt.RefreshWithContext(context.Background()) +} + +// RefreshWithContext obtains a fresh token for the Service Principal. +// This method is safe for concurrent use. +func (spt *ServicePrincipalToken) RefreshWithContext(ctx context.Context) error { + spt.refreshLock.Lock() + defer spt.refreshLock.Unlock() + return spt.refreshInternal(ctx, spt.inner.Resource) +} + +// RefreshExchange refreshes the token, but for a different resource. +// This method is safe for concurrent use. +func (spt *ServicePrincipalToken) RefreshExchange(resource string) error { + return spt.RefreshExchangeWithContext(context.Background(), resource) +} + +// RefreshExchangeWithContext refreshes the token, but for a different resource. +// This method is safe for concurrent use. +func (spt *ServicePrincipalToken) RefreshExchangeWithContext(ctx context.Context, resource string) error { + spt.refreshLock.Lock() + defer spt.refreshLock.Unlock() + return spt.refreshInternal(ctx, resource) +} + +func (spt *ServicePrincipalToken) getGrantType() string { + switch spt.inner.Secret.(type) { + case *ServicePrincipalUsernamePasswordSecret: + return OAuthGrantTypeUserPass + case *ServicePrincipalAuthorizationCodeSecret: + return OAuthGrantTypeAuthorizationCode + default: + return OAuthGrantTypeClientCredentials + } +} + +func (spt *ServicePrincipalToken) refreshInternal(ctx context.Context, resource string) error { + if spt.customRefreshFunc != nil { + token, err := spt.customRefreshFunc(ctx, resource) + if err != nil { + return err + } + spt.inner.Token = *token + return spt.InvokeRefreshCallbacks(spt.inner.Token) + } + req, err := http.NewRequest(http.MethodPost, spt.inner.OauthConfig.TokenEndpoint.String(), nil) + if err != nil { + return fmt.Errorf("adal: Failed to build the refresh request. Error = '%v'", err) + } + req.Header.Add("User-Agent", UserAgent()) + req = req.WithContext(ctx) + var resp *http.Response + authBodyFilter := func(b []byte) []byte { + if logger.Level() != logger.LogAuth { + return []byte("**REDACTED** authentication body") + } + return b + } + if msiSecret, ok := spt.inner.Secret.(*ServicePrincipalMSISecret); ok { + switch msiSecret.msiType { + case msiTypeAppServiceV20170901: + req.Method = http.MethodGet + req.Header.Set("secret", os.Getenv(msiSecretEnv)) + break + case msiTypeCloudShell: + req.Header.Set("Metadata", "true") + data := url.Values{} + data.Set("resource", spt.inner.Resource) + if spt.inner.ClientID != "" { + data.Set("client_id", spt.inner.ClientID) + } else if msiSecret.clientResourceID != "" { + data.Set("msi_res_id", msiSecret.clientResourceID) + } + req.Body = ioutil.NopCloser(strings.NewReader(data.Encode())) + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + break + case msiTypeIMDS: + req.Method = http.MethodGet + req.Header.Set("Metadata", "true") + break + } + logger.Instance.WriteRequest(req, logger.Filter{Body: authBodyFilter}) + resp, err = retryForIMDS(spt.sender, req, spt.MaxMSIRefreshAttempts) + } else { + v := url.Values{} + v.Set("client_id", spt.inner.ClientID) + v.Set("resource", resource) + + if spt.inner.Token.RefreshToken != "" { + v.Set("grant_type", OAuthGrantTypeRefreshToken) + v.Set("refresh_token", spt.inner.Token.RefreshToken) + // web apps must specify client_secret when refreshing tokens + // see https://docs.microsoft.com/en-us/azure/active-directory/develop/active-directory-protocols-oauth-code#refreshing-the-access-tokens + if spt.getGrantType() == OAuthGrantTypeAuthorizationCode { + err := spt.inner.Secret.SetAuthenticationValues(spt, &v) + if err != nil { + return err + } + } + } else { + v.Set("grant_type", spt.getGrantType()) + err := spt.inner.Secret.SetAuthenticationValues(spt, &v) + if err != nil { + return err + } + } + + s := v.Encode() + body := ioutil.NopCloser(strings.NewReader(s)) + req.ContentLength = int64(len(s)) + req.Header.Set(contentType, mimeTypeFormPost) + req.Body = body + logger.Instance.WriteRequest(req, logger.Filter{Body: authBodyFilter}) + resp, err = spt.sender.Do(req) + } + + // don't return a TokenRefreshError here; this will allow retry logic to apply + if err != nil { + return fmt.Errorf("adal: Failed to execute the refresh request. Error = '%v'", err) + } else if resp == nil { + return fmt.Errorf("adal: received nil response and error") + } + + logger.Instance.WriteResponse(resp, logger.Filter{Body: authBodyFilter}) + defer resp.Body.Close() + rb, err := ioutil.ReadAll(resp.Body) + + if resp.StatusCode != http.StatusOK { + if err != nil { + return newTokenRefreshError(fmt.Sprintf("adal: Refresh request failed. Status Code = '%d'. Failed reading response body: %v Endpoint %s", resp.StatusCode, err, req.URL.String()), resp) + } + return newTokenRefreshError(fmt.Sprintf("adal: Refresh request failed. Status Code = '%d'. Response body: %s Endpoint %s", resp.StatusCode, string(rb), req.URL.String()), resp) + } + + // for the following error cases don't return a TokenRefreshError. the operation succeeded + // but some transient failure happened during deserialization. by returning a generic error + // the retry logic will kick in (we don't retry on TokenRefreshError). + + if err != nil { + return fmt.Errorf("adal: Failed to read a new service principal token during refresh. Error = '%v'", err) + } + if len(strings.Trim(string(rb), " ")) == 0 { + return fmt.Errorf("adal: Empty service principal token received during refresh") + } + token := struct { + AccessToken string `json:"access_token"` + RefreshToken string `json:"refresh_token"` + + // AAD returns expires_in as a string, ADFS returns it as an int + ExpiresIn json.Number `json:"expires_in"` + // expires_on can be in two formats, a UTC time stamp or the number of seconds. + ExpiresOn string `json:"expires_on"` + NotBefore json.Number `json:"not_before"` + + Resource string `json:"resource"` + Type string `json:"token_type"` + }{} + // return a TokenRefreshError in the follow error cases as the token is in an unexpected format + err = json.Unmarshal(rb, &token) + if err != nil { + return newTokenRefreshError(fmt.Sprintf("adal: Failed to unmarshal the service principal token during refresh. Error = '%v' JSON = '%s'", err, string(rb)), resp) + } + expiresOn := json.Number("") + // ADFS doesn't include the expires_on field + if token.ExpiresOn != "" { + if expiresOn, err = parseExpiresOn(token.ExpiresOn); err != nil { + return newTokenRefreshError(fmt.Sprintf("adal: failed to parse expires_on: %v value '%s'", err, token.ExpiresOn), resp) + } + } + spt.inner.Token.AccessToken = token.AccessToken + spt.inner.Token.RefreshToken = token.RefreshToken + spt.inner.Token.ExpiresIn = token.ExpiresIn + spt.inner.Token.ExpiresOn = expiresOn + spt.inner.Token.NotBefore = token.NotBefore + spt.inner.Token.Resource = token.Resource + spt.inner.Token.Type = token.Type + + return spt.InvokeRefreshCallbacks(spt.inner.Token) +} + +// converts expires_on to the number of seconds +func parseExpiresOn(s string) (json.Number, error) { + // convert the expiration date to the number of seconds from now + timeToDuration := func(t time.Time) json.Number { + dur := t.Sub(time.Now().UTC()) + return json.Number(strconv.FormatInt(int64(dur.Round(time.Second).Seconds()), 10)) + } + if _, err := strconv.ParseInt(s, 10, 64); err == nil { + // this is the number of seconds case, no conversion required + return json.Number(s), nil + } else if eo, err := time.Parse(expiresOnDateFormatPM, s); err == nil { + return timeToDuration(eo), nil + } else if eo, err := time.Parse(expiresOnDateFormat, s); err == nil { + return timeToDuration(eo), nil + } else { + // unknown format + return json.Number(""), err + } +} + +// retry logic specific to retrieving a token from the IMDS endpoint +func retryForIMDS(sender Sender, req *http.Request, maxAttempts int) (resp *http.Response, err error) { + // copied from client.go due to circular dependency + retries := []int{ + http.StatusRequestTimeout, // 408 + http.StatusTooManyRequests, // 429 + http.StatusInternalServerError, // 500 + http.StatusBadGateway, // 502 + http.StatusServiceUnavailable, // 503 + http.StatusGatewayTimeout, // 504 + } + // extra retry status codes specific to IMDS + retries = append(retries, + http.StatusNotFound, + http.StatusGone, + // all remaining 5xx + http.StatusNotImplemented, + http.StatusHTTPVersionNotSupported, + http.StatusVariantAlsoNegotiates, + http.StatusInsufficientStorage, + http.StatusLoopDetected, + http.StatusNotExtended, + http.StatusNetworkAuthenticationRequired) + + // see https://docs.microsoft.com/en-us/azure/active-directory/managed-service-identity/how-to-use-vm-token#retry-guidance + + const maxDelay time.Duration = 60 * time.Second + + attempt := 0 + delay := time.Duration(0) + + // maxAttempts is user-specified, ensure that its value is greater than zero else no request will be made + if maxAttempts < 1 { + maxAttempts = defaultMaxMSIRefreshAttempts + } + + for attempt < maxAttempts { + if resp != nil && resp.Body != nil { + io.Copy(ioutil.Discard, resp.Body) + resp.Body.Close() + } + resp, err = sender.Do(req) + // we want to retry if err is not nil or the status code is in the list of retry codes + if err == nil && !responseHasStatusCode(resp, retries...) { + return + } + + // perform exponential backoff with a cap. + // must increment attempt before calculating delay. + attempt++ + // the base value of 2 is the "delta backoff" as specified in the guidance doc + delay += (time.Duration(math.Pow(2, float64(attempt))) * time.Second) + if delay > maxDelay { + delay = maxDelay + } + + select { + case <-time.After(delay): + // intentionally left blank + case <-req.Context().Done(): + err = req.Context().Err() + return + } + } + return +} + +func responseHasStatusCode(resp *http.Response, codes ...int) bool { + if resp != nil { + for _, i := range codes { + if i == resp.StatusCode { + return true + } + } + } + return false +} + +// SetAutoRefresh enables or disables automatic refreshing of stale tokens. +func (spt *ServicePrincipalToken) SetAutoRefresh(autoRefresh bool) { + spt.inner.AutoRefresh = autoRefresh +} + +// SetRefreshWithin sets the interval within which if the token will expire, EnsureFresh will +// refresh the token. +func (spt *ServicePrincipalToken) SetRefreshWithin(d time.Duration) { + spt.inner.RefreshWithin = d + return +} + +// SetSender sets the http.Client used when obtaining the Service Principal token. An +// undecorated http.Client is used by default. +func (spt *ServicePrincipalToken) SetSender(s Sender) { spt.sender = s } + +// OAuthToken implements the OAuthTokenProvider interface. It returns the current access token. +func (spt *ServicePrincipalToken) OAuthToken() string { + spt.refreshLock.RLock() + defer spt.refreshLock.RUnlock() + return spt.inner.Token.OAuthToken() +} + +// Token returns a copy of the current token. +func (spt *ServicePrincipalToken) Token() Token { + spt.refreshLock.RLock() + defer spt.refreshLock.RUnlock() + return spt.inner.Token +} + +// MultiTenantServicePrincipalToken contains tokens for multi-tenant authorization. +type MultiTenantServicePrincipalToken struct { + PrimaryToken *ServicePrincipalToken + AuxiliaryTokens []*ServicePrincipalToken +} + +// PrimaryOAuthToken returns the primary authorization token. +func (mt *MultiTenantServicePrincipalToken) PrimaryOAuthToken() string { + return mt.PrimaryToken.OAuthToken() +} + +// AuxiliaryOAuthTokens returns one to three auxiliary authorization tokens. +func (mt *MultiTenantServicePrincipalToken) AuxiliaryOAuthTokens() []string { + tokens := make([]string, len(mt.AuxiliaryTokens)) + for i := range mt.AuxiliaryTokens { + tokens[i] = mt.AuxiliaryTokens[i].OAuthToken() + } + return tokens +} + +// NewMultiTenantServicePrincipalToken creates a new MultiTenantServicePrincipalToken with the specified credentials and resource. +func NewMultiTenantServicePrincipalToken(multiTenantCfg MultiTenantOAuthConfig, clientID string, secret string, resource string) (*MultiTenantServicePrincipalToken, error) { + if err := validateStringParam(clientID, "clientID"); err != nil { + return nil, err + } + if err := validateStringParam(secret, "secret"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + auxTenants := multiTenantCfg.AuxiliaryTenants() + m := MultiTenantServicePrincipalToken{ + AuxiliaryTokens: make([]*ServicePrincipalToken, len(auxTenants)), + } + primary, err := NewServicePrincipalToken(*multiTenantCfg.PrimaryTenant(), clientID, secret, resource) + if err != nil { + return nil, fmt.Errorf("failed to create SPT for primary tenant: %v", err) + } + m.PrimaryToken = primary + for i := range auxTenants { + aux, err := NewServicePrincipalToken(*auxTenants[i], clientID, secret, resource) + if err != nil { + return nil, fmt.Errorf("failed to create SPT for auxiliary tenant: %v", err) + } + m.AuxiliaryTokens[i] = aux + } + return &m, nil +} + +// NewMultiTenantServicePrincipalTokenFromCertificate creates a new MultiTenantServicePrincipalToken with the specified certificate credentials and resource. +func NewMultiTenantServicePrincipalTokenFromCertificate(multiTenantCfg MultiTenantOAuthConfig, clientID string, certificate *x509.Certificate, privateKey *rsa.PrivateKey, resource string) (*MultiTenantServicePrincipalToken, error) { + if err := validateStringParam(clientID, "clientID"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + if certificate == nil { + return nil, fmt.Errorf("parameter 'certificate' cannot be nil") + } + if privateKey == nil { + return nil, fmt.Errorf("parameter 'privateKey' cannot be nil") + } + auxTenants := multiTenantCfg.AuxiliaryTenants() + m := MultiTenantServicePrincipalToken{ + AuxiliaryTokens: make([]*ServicePrincipalToken, len(auxTenants)), + } + primary, err := NewServicePrincipalTokenWithSecret( + *multiTenantCfg.PrimaryTenant(), + clientID, + resource, + &ServicePrincipalCertificateSecret{ + PrivateKey: privateKey, + Certificate: certificate, + }, + ) + if err != nil { + return nil, fmt.Errorf("failed to create SPT for primary tenant: %v", err) + } + m.PrimaryToken = primary + for i := range auxTenants { + aux, err := NewServicePrincipalTokenWithSecret( + *auxTenants[i], + clientID, + resource, + &ServicePrincipalCertificateSecret{ + PrivateKey: privateKey, + Certificate: certificate, + }, + ) + if err != nil { + return nil, fmt.Errorf("failed to create SPT for auxiliary tenant: %v", err) + } + m.AuxiliaryTokens[i] = aux + } + return &m, nil +} + +// MSIAvailable returns true if the MSI endpoint is available for authentication. +func MSIAvailable(ctx context.Context, sender Sender) bool { + resp, err := getMSIEndpoint(ctx, sender) + if err == nil { + resp.Body.Close() + } + return err == nil +} + +// used for testing purposes +var msiAvailableHook = func(ctx context.Context, sender Sender) bool { + return MSIAvailable(ctx, sender) +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/token_1.13.go b/vendor/github.com/Azure/go-autorest/autorest/adal/token_1.13.go new file mode 100644 index 0000000..953f755 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/token_1.13.go @@ -0,0 +1,75 @@ +// +build go1.13 + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package adal + +import ( + "context" + "fmt" + "net/http" + "time" +) + +func getMSIEndpoint(ctx context.Context, sender Sender) (*http.Response, error) { + tempCtx, cancel := context.WithTimeout(ctx, 500*time.Millisecond) + defer cancel() + // http.NewRequestWithContext() was added in Go 1.13 + req, _ := http.NewRequestWithContext(tempCtx, http.MethodGet, msiEndpoint, nil) + q := req.URL.Query() + q.Add("api-version", msiAPIVersion) + req.URL.RawQuery = q.Encode() + return sender.Do(req) +} + +// EnsureFreshWithContext will refresh the token if it will expire within the refresh window (as set by +// RefreshWithin) and autoRefresh flag is on. This method is safe for concurrent use. +func (mt *MultiTenantServicePrincipalToken) EnsureFreshWithContext(ctx context.Context) error { + if err := mt.PrimaryToken.EnsureFreshWithContext(ctx); err != nil { + return fmt.Errorf("failed to refresh primary token: %w", err) + } + for _, aux := range mt.AuxiliaryTokens { + if err := aux.EnsureFreshWithContext(ctx); err != nil { + return fmt.Errorf("failed to refresh auxiliary token: %w", err) + } + } + return nil +} + +// RefreshWithContext obtains a fresh token for the Service Principal. +func (mt *MultiTenantServicePrincipalToken) RefreshWithContext(ctx context.Context) error { + if err := mt.PrimaryToken.RefreshWithContext(ctx); err != nil { + return fmt.Errorf("failed to refresh primary token: %w", err) + } + for _, aux := range mt.AuxiliaryTokens { + if err := aux.RefreshWithContext(ctx); err != nil { + return fmt.Errorf("failed to refresh auxiliary token: %w", err) + } + } + return nil +} + +// RefreshExchangeWithContext refreshes the token, but for a different resource. +func (mt *MultiTenantServicePrincipalToken) RefreshExchangeWithContext(ctx context.Context, resource string) error { + if err := mt.PrimaryToken.RefreshExchangeWithContext(ctx, resource); err != nil { + return fmt.Errorf("failed to refresh primary token: %w", err) + } + for _, aux := range mt.AuxiliaryTokens { + if err := aux.RefreshExchangeWithContext(ctx, resource); err != nil { + return fmt.Errorf("failed to refresh auxiliary token: %w", err) + } + } + return nil +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/token_legacy.go b/vendor/github.com/Azure/go-autorest/autorest/adal/token_legacy.go new file mode 100644 index 0000000..729bfbd --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/token_legacy.go @@ -0,0 +1,74 @@ +// +build !go1.13 + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package adal + +import ( + "context" + "net/http" + "time" +) + +func getMSIEndpoint(ctx context.Context, sender Sender) (*http.Response, error) { + tempCtx, cancel := context.WithTimeout(ctx, 500*time.Millisecond) + defer cancel() + req, _ := http.NewRequest(http.MethodGet, msiEndpoint, nil) + req = req.WithContext(tempCtx) + q := req.URL.Query() + q.Add("api-version", msiAPIVersion) + req.URL.RawQuery = q.Encode() + return sender.Do(req) +} + +// EnsureFreshWithContext will refresh the token if it will expire within the refresh window (as set by +// RefreshWithin) and autoRefresh flag is on. This method is safe for concurrent use. +func (mt *MultiTenantServicePrincipalToken) EnsureFreshWithContext(ctx context.Context) error { + if err := mt.PrimaryToken.EnsureFreshWithContext(ctx); err != nil { + return err + } + for _, aux := range mt.AuxiliaryTokens { + if err := aux.EnsureFreshWithContext(ctx); err != nil { + return err + } + } + return nil +} + +// RefreshWithContext obtains a fresh token for the Service Principal. +func (mt *MultiTenantServicePrincipalToken) RefreshWithContext(ctx context.Context) error { + if err := mt.PrimaryToken.RefreshWithContext(ctx); err != nil { + return err + } + for _, aux := range mt.AuxiliaryTokens { + if err := aux.RefreshWithContext(ctx); err != nil { + return err + } + } + return nil +} + +// RefreshExchangeWithContext refreshes the token, but for a different resource. +func (mt *MultiTenantServicePrincipalToken) RefreshExchangeWithContext(ctx context.Context, resource string) error { + if err := mt.PrimaryToken.RefreshExchangeWithContext(ctx, resource); err != nil { + return err + } + for _, aux := range mt.AuxiliaryTokens { + if err := aux.RefreshExchangeWithContext(ctx, resource); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/version.go b/vendor/github.com/Azure/go-autorest/autorest/adal/version.go new file mode 100644 index 0000000..c867b34 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/version.go @@ -0,0 +1,45 @@ +package adal + +import ( + "fmt" + "runtime" +) + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +const number = "v1.0.0" + +var ( + ua = fmt.Sprintf("Go/%s (%s-%s) go-autorest/adal/%s", + runtime.Version(), + runtime.GOARCH, + runtime.GOOS, + number, + ) +) + +// UserAgent returns a string containing the Go version, system architecture and OS, and the adal version. +func UserAgent() string { + return ua +} + +// AddToUserAgent adds an extension to the current user agent +func AddToUserAgent(extension string) error { + if extension != "" { + ua = fmt.Sprintf("%s %s", ua, extension) + return nil + } + return fmt.Errorf("Extension was empty, User Agent remained as '%s'", ua) +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/authorization.go b/vendor/github.com/Azure/go-autorest/autorest/authorization.go new file mode 100644 index 0000000..1226c41 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/authorization.go @@ -0,0 +1,353 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "crypto/tls" + "encoding/base64" + "fmt" + "net/http" + "net/url" + "strings" + + "github.com/Azure/go-autorest/autorest/adal" +) + +const ( + bearerChallengeHeader = "Www-Authenticate" + bearer = "Bearer" + tenantID = "tenantID" + apiKeyAuthorizerHeader = "Ocp-Apim-Subscription-Key" + bingAPISdkHeader = "X-BingApis-SDK-Client" + golangBingAPISdkHeaderValue = "Go-SDK" + authorization = "Authorization" + basic = "Basic" +) + +// Authorizer is the interface that provides a PrepareDecorator used to supply request +// authorization. Most often, the Authorizer decorator runs last so it has access to the full +// state of the formed HTTP request. +type Authorizer interface { + WithAuthorization() PrepareDecorator +} + +// NullAuthorizer implements a default, "do nothing" Authorizer. +type NullAuthorizer struct{} + +// WithAuthorization returns a PrepareDecorator that does nothing. +func (na NullAuthorizer) WithAuthorization() PrepareDecorator { + return WithNothing() +} + +// APIKeyAuthorizer implements API Key authorization. +type APIKeyAuthorizer struct { + headers map[string]interface{} + queryParameters map[string]interface{} +} + +// NewAPIKeyAuthorizerWithHeaders creates an ApiKeyAuthorizer with headers. +func NewAPIKeyAuthorizerWithHeaders(headers map[string]interface{}) *APIKeyAuthorizer { + return NewAPIKeyAuthorizer(headers, nil) +} + +// NewAPIKeyAuthorizerWithQueryParameters creates an ApiKeyAuthorizer with query parameters. +func NewAPIKeyAuthorizerWithQueryParameters(queryParameters map[string]interface{}) *APIKeyAuthorizer { + return NewAPIKeyAuthorizer(nil, queryParameters) +} + +// NewAPIKeyAuthorizer creates an ApiKeyAuthorizer with headers. +func NewAPIKeyAuthorizer(headers map[string]interface{}, queryParameters map[string]interface{}) *APIKeyAuthorizer { + return &APIKeyAuthorizer{headers: headers, queryParameters: queryParameters} +} + +// WithAuthorization returns a PrepareDecorator that adds an HTTP headers and Query Parameters. +func (aka *APIKeyAuthorizer) WithAuthorization() PrepareDecorator { + return func(p Preparer) Preparer { + return DecoratePreparer(p, WithHeaders(aka.headers), WithQueryParameters(aka.queryParameters)) + } +} + +// CognitiveServicesAuthorizer implements authorization for Cognitive Services. +type CognitiveServicesAuthorizer struct { + subscriptionKey string +} + +// NewCognitiveServicesAuthorizer is +func NewCognitiveServicesAuthorizer(subscriptionKey string) *CognitiveServicesAuthorizer { + return &CognitiveServicesAuthorizer{subscriptionKey: subscriptionKey} +} + +// WithAuthorization is +func (csa *CognitiveServicesAuthorizer) WithAuthorization() PrepareDecorator { + headers := make(map[string]interface{}) + headers[apiKeyAuthorizerHeader] = csa.subscriptionKey + headers[bingAPISdkHeader] = golangBingAPISdkHeaderValue + + return NewAPIKeyAuthorizerWithHeaders(headers).WithAuthorization() +} + +// BearerAuthorizer implements the bearer authorization +type BearerAuthorizer struct { + tokenProvider adal.OAuthTokenProvider +} + +// NewBearerAuthorizer crates a BearerAuthorizer using the given token provider +func NewBearerAuthorizer(tp adal.OAuthTokenProvider) *BearerAuthorizer { + return &BearerAuthorizer{tokenProvider: tp} +} + +// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose +// value is "Bearer " followed by the token. +// +// By default, the token will be automatically refreshed through the Refresher interface. +func (ba *BearerAuthorizer) WithAuthorization() PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + // the ordering is important here, prefer RefresherWithContext if available + if refresher, ok := ba.tokenProvider.(adal.RefresherWithContext); ok { + err = refresher.EnsureFreshWithContext(r.Context()) + } else if refresher, ok := ba.tokenProvider.(adal.Refresher); ok { + err = refresher.EnsureFresh() + } + if err != nil { + var resp *http.Response + if tokError, ok := err.(adal.TokenRefreshError); ok { + resp = tokError.Response() + } + return r, NewErrorWithError(err, "azure.BearerAuthorizer", "WithAuthorization", resp, + "Failed to refresh the Token for request to %s", r.URL) + } + return Prepare(r, WithHeader(headerAuthorization, fmt.Sprintf("Bearer %s", ba.tokenProvider.OAuthToken()))) + } + return r, err + }) + } +} + +// TokenProvider returns OAuthTokenProvider so that it can be used for authorization outside the REST. +func (ba *BearerAuthorizer) TokenProvider() adal.OAuthTokenProvider { + return ba.tokenProvider +} + +// BearerAuthorizerCallbackFunc is the authentication callback signature. +type BearerAuthorizerCallbackFunc func(tenantID, resource string) (*BearerAuthorizer, error) + +// BearerAuthorizerCallback implements bearer authorization via a callback. +type BearerAuthorizerCallback struct { + sender Sender + callback BearerAuthorizerCallbackFunc +} + +// NewBearerAuthorizerCallback creates a bearer authorization callback. The callback +// is invoked when the HTTP request is submitted. +func NewBearerAuthorizerCallback(s Sender, callback BearerAuthorizerCallbackFunc) *BearerAuthorizerCallback { + if s == nil { + s = sender(tls.RenegotiateNever) + } + return &BearerAuthorizerCallback{sender: s, callback: callback} +} + +// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose value +// is "Bearer " followed by the token. The BearerAuthorizer is obtained via a user-supplied callback. +// +// By default, the token will be automatically refreshed through the Refresher interface. +func (bacb *BearerAuthorizerCallback) WithAuthorization() PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + // make a copy of the request and remove the body as it's not + // required and avoids us having to create a copy of it. + rCopy := *r + removeRequestBody(&rCopy) + + resp, err := bacb.sender.Do(&rCopy) + if err != nil { + return r, err + } + DrainResponseBody(resp) + if resp.StatusCode == 401 && hasBearerChallenge(resp.Header) { + bc, err := newBearerChallenge(resp.Header) + if err != nil { + return r, err + } + if bacb.callback != nil { + ba, err := bacb.callback(bc.values[tenantID], bc.values["resource"]) + if err != nil { + return r, err + } + return Prepare(r, ba.WithAuthorization()) + } + } + } + return r, err + }) + } +} + +// returns true if the HTTP response contains a bearer challenge +func hasBearerChallenge(header http.Header) bool { + authHeader := header.Get(bearerChallengeHeader) + if len(authHeader) == 0 || strings.Index(authHeader, bearer) < 0 { + return false + } + return true +} + +type bearerChallenge struct { + values map[string]string +} + +func newBearerChallenge(header http.Header) (bc bearerChallenge, err error) { + challenge := strings.TrimSpace(header.Get(bearerChallengeHeader)) + trimmedChallenge := challenge[len(bearer)+1:] + + // challenge is a set of key=value pairs that are comma delimited + pairs := strings.Split(trimmedChallenge, ",") + if len(pairs) < 1 { + err = fmt.Errorf("challenge '%s' contains no pairs", challenge) + return bc, err + } + + bc.values = make(map[string]string) + for i := range pairs { + trimmedPair := strings.TrimSpace(pairs[i]) + pair := strings.Split(trimmedPair, "=") + if len(pair) == 2 { + // remove the enclosing quotes + key := strings.Trim(pair[0], "\"") + value := strings.Trim(pair[1], "\"") + + switch key { + case "authorization", "authorization_uri": + // strip the tenant ID from the authorization URL + asURL, err := url.Parse(value) + if err != nil { + return bc, err + } + bc.values[tenantID] = asURL.Path[1:] + default: + bc.values[key] = value + } + } + } + + return bc, err +} + +// EventGridKeyAuthorizer implements authorization for event grid using key authentication. +type EventGridKeyAuthorizer struct { + topicKey string +} + +// NewEventGridKeyAuthorizer creates a new EventGridKeyAuthorizer +// with the specified topic key. +func NewEventGridKeyAuthorizer(topicKey string) EventGridKeyAuthorizer { + return EventGridKeyAuthorizer{topicKey: topicKey} +} + +// WithAuthorization returns a PrepareDecorator that adds the aeg-sas-key authentication header. +func (egta EventGridKeyAuthorizer) WithAuthorization() PrepareDecorator { + headers := map[string]interface{}{ + "aeg-sas-key": egta.topicKey, + } + return NewAPIKeyAuthorizerWithHeaders(headers).WithAuthorization() +} + +// BasicAuthorizer implements basic HTTP authorization by adding the Authorization HTTP header +// with the value "Basic " where is a base64-encoded username:password tuple. +type BasicAuthorizer struct { + userName string + password string +} + +// NewBasicAuthorizer creates a new BasicAuthorizer with the specified username and password. +func NewBasicAuthorizer(userName, password string) *BasicAuthorizer { + return &BasicAuthorizer{ + userName: userName, + password: password, + } +} + +// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose +// value is "Basic " followed by the base64-encoded username:password tuple. +func (ba *BasicAuthorizer) WithAuthorization() PrepareDecorator { + headers := make(map[string]interface{}) + headers[authorization] = basic + " " + base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf("%s:%s", ba.userName, ba.password))) + + return NewAPIKeyAuthorizerWithHeaders(headers).WithAuthorization() +} + +// MultiTenantServicePrincipalTokenAuthorizer provides authentication across tenants. +type MultiTenantServicePrincipalTokenAuthorizer interface { + WithAuthorization() PrepareDecorator +} + +// NewMultiTenantServicePrincipalTokenAuthorizer crates a BearerAuthorizer using the given token provider +func NewMultiTenantServicePrincipalTokenAuthorizer(tp adal.MultitenantOAuthTokenProvider) MultiTenantServicePrincipalTokenAuthorizer { + return NewMultiTenantBearerAuthorizer(tp) +} + +// MultiTenantBearerAuthorizer implements bearer authorization across multiple tenants. +type MultiTenantBearerAuthorizer struct { + tp adal.MultitenantOAuthTokenProvider +} + +// NewMultiTenantBearerAuthorizer creates a MultiTenantBearerAuthorizer using the given token provider. +func NewMultiTenantBearerAuthorizer(tp adal.MultitenantOAuthTokenProvider) *MultiTenantBearerAuthorizer { + return &MultiTenantBearerAuthorizer{tp: tp} +} + +// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header using the +// primary token along with the auxiliary authorization header using the auxiliary tokens. +// +// By default, the token will be automatically refreshed through the Refresher interface. +func (mt *MultiTenantBearerAuthorizer) WithAuthorization() PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err != nil { + return r, err + } + if refresher, ok := mt.tp.(adal.RefresherWithContext); ok { + err = refresher.EnsureFreshWithContext(r.Context()) + if err != nil { + var resp *http.Response + if tokError, ok := err.(adal.TokenRefreshError); ok { + resp = tokError.Response() + } + return r, NewErrorWithError(err, "azure.multiTenantSPTAuthorizer", "WithAuthorization", resp, + "Failed to refresh one or more Tokens for request to %s", r.URL) + } + } + r, err = Prepare(r, WithHeader(headerAuthorization, fmt.Sprintf("Bearer %s", mt.tp.PrimaryOAuthToken()))) + if err != nil { + return r, err + } + auxTokens := mt.tp.AuxiliaryOAuthTokens() + for i := range auxTokens { + auxTokens[i] = fmt.Sprintf("Bearer %s", auxTokens[i]) + } + return Prepare(r, WithHeader(headerAuxAuthorization, strings.Join(auxTokens, ", "))) + }) + } +} + +// TokenProvider returns the underlying MultitenantOAuthTokenProvider for this authorizer. +func (mt *MultiTenantBearerAuthorizer) TokenProvider() adal.MultitenantOAuthTokenProvider { + return mt.tp +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/authorization_sas.go b/vendor/github.com/Azure/go-autorest/autorest/authorization_sas.go new file mode 100644 index 0000000..6650149 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/authorization_sas.go @@ -0,0 +1,66 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "fmt" + "net/http" + "strings" +) + +// SASTokenAuthorizer implements an authorization for SAS Token Authentication +// this can be used for interaction with Blob Storage Endpoints +type SASTokenAuthorizer struct { + sasToken string +} + +// NewSASTokenAuthorizer creates a SASTokenAuthorizer using the given credentials +func NewSASTokenAuthorizer(sasToken string) (*SASTokenAuthorizer, error) { + if strings.TrimSpace(sasToken) == "" { + return nil, fmt.Errorf("sasToken cannot be empty") + } + + token := sasToken + if strings.HasPrefix(sasToken, "?") { + token = strings.TrimPrefix(sasToken, "?") + } + + return &SASTokenAuthorizer{ + sasToken: token, + }, nil +} + +// WithAuthorization returns a PrepareDecorator that adds a shared access signature token to the +// URI's query parameters. This can be used for the Blob, Queue, and File Services. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/delegate-access-with-shared-access-signature +func (sas *SASTokenAuthorizer) WithAuthorization() PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err != nil { + return r, err + } + + if r.URL.RawQuery == "" { + r.URL.RawQuery = sas.sasToken + } else if !strings.Contains(r.URL.RawQuery, sas.sasToken) { + r.URL.RawQuery = fmt.Sprintf("%s&%s", r.URL.RawQuery, sas.sasToken) + } + + return Prepare(r) + }) + } +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/authorization_storage.go b/vendor/github.com/Azure/go-autorest/autorest/authorization_storage.go new file mode 100644 index 0000000..2af5030 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/authorization_storage.go @@ -0,0 +1,307 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "crypto/hmac" + "crypto/sha256" + "encoding/base64" + "fmt" + "net/http" + "net/url" + "sort" + "strings" + "time" +) + +// SharedKeyType defines the enumeration for the various shared key types. +// See https://docs.microsoft.com/en-us/rest/api/storageservices/authorize-with-shared-key for details on the shared key types. +type SharedKeyType string + +const ( + // SharedKey is used to authorize against blobs, files and queues services. + SharedKey SharedKeyType = "sharedKey" + + // SharedKeyForTable is used to authorize against the table service. + SharedKeyForTable SharedKeyType = "sharedKeyTable" + + // SharedKeyLite is used to authorize against blobs, files and queues services. It's provided for + // backwards compatibility with API versions before 2009-09-19. Prefer SharedKey instead. + SharedKeyLite SharedKeyType = "sharedKeyLite" + + // SharedKeyLiteForTable is used to authorize against the table service. It's provided for + // backwards compatibility with older table API versions. Prefer SharedKeyForTable instead. + SharedKeyLiteForTable SharedKeyType = "sharedKeyLiteTable" +) + +const ( + headerAccept = "Accept" + headerAcceptCharset = "Accept-Charset" + headerContentEncoding = "Content-Encoding" + headerContentLength = "Content-Length" + headerContentMD5 = "Content-MD5" + headerContentLanguage = "Content-Language" + headerIfModifiedSince = "If-Modified-Since" + headerIfMatch = "If-Match" + headerIfNoneMatch = "If-None-Match" + headerIfUnmodifiedSince = "If-Unmodified-Since" + headerDate = "Date" + headerXMSDate = "X-Ms-Date" + headerXMSVersion = "x-ms-version" + headerRange = "Range" +) + +const storageEmulatorAccountName = "devstoreaccount1" + +// SharedKeyAuthorizer implements an authorization for Shared Key +// this can be used for interaction with Blob, File and Queue Storage Endpoints +type SharedKeyAuthorizer struct { + accountName string + accountKey []byte + keyType SharedKeyType +} + +// NewSharedKeyAuthorizer creates a SharedKeyAuthorizer using the provided credentials and shared key type. +func NewSharedKeyAuthorizer(accountName, accountKey string, keyType SharedKeyType) (*SharedKeyAuthorizer, error) { + key, err := base64.StdEncoding.DecodeString(accountKey) + if err != nil { + return nil, fmt.Errorf("malformed storage account key: %v", err) + } + return &SharedKeyAuthorizer{ + accountName: accountName, + accountKey: key, + keyType: keyType, + }, nil +} + +// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose +// value is " " followed by the computed key. +// This can be used for the Blob, Queue, and File Services +// +// from: https://docs.microsoft.com/en-us/rest/api/storageservices/authorize-with-shared-key +// You may use Shared Key authorization to authorize a request made against the +// 2009-09-19 version and later of the Blob and Queue services, +// and version 2014-02-14 and later of the File services. +func (sk *SharedKeyAuthorizer) WithAuthorization() PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err != nil { + return r, err + } + + sk, err := buildSharedKey(sk.accountName, sk.accountKey, r, sk.keyType) + if err != nil { + return r, err + } + return Prepare(r, WithHeader(headerAuthorization, sk)) + }) + } +} + +func buildSharedKey(accName string, accKey []byte, req *http.Request, keyType SharedKeyType) (string, error) { + canRes, err := buildCanonicalizedResource(accName, req.URL.String(), keyType) + if err != nil { + return "", err + } + + if req.Header == nil { + req.Header = http.Header{} + } + + // ensure date is set + if req.Header.Get(headerDate) == "" && req.Header.Get(headerXMSDate) == "" { + date := time.Now().UTC().Format(http.TimeFormat) + req.Header.Set(headerXMSDate, date) + } + canString, err := buildCanonicalizedString(req.Method, req.Header, canRes, keyType) + if err != nil { + return "", err + } + return createAuthorizationHeader(accName, accKey, canString, keyType), nil +} + +func buildCanonicalizedResource(accountName, uri string, keyType SharedKeyType) (string, error) { + errMsg := "buildCanonicalizedResource error: %s" + u, err := url.Parse(uri) + if err != nil { + return "", fmt.Errorf(errMsg, err.Error()) + } + + cr := bytes.NewBufferString("") + if accountName != storageEmulatorAccountName { + cr.WriteString("/") + cr.WriteString(getCanonicalizedAccountName(accountName)) + } + + if len(u.Path) > 0 { + // Any portion of the CanonicalizedResource string that is derived from + // the resource's URI should be encoded exactly as it is in the URI. + // -- https://msdn.microsoft.com/en-gb/library/azure/dd179428.aspx + cr.WriteString(u.EscapedPath()) + } else { + // a slash is required to indicate the root path + cr.WriteString("/") + } + + params, err := url.ParseQuery(u.RawQuery) + if err != nil { + return "", fmt.Errorf(errMsg, err.Error()) + } + + // See https://github.com/Azure/azure-storage-net/blob/master/Lib/Common/Core/Util/AuthenticationUtility.cs#L277 + if keyType == SharedKey { + if len(params) > 0 { + cr.WriteString("\n") + + keys := []string{} + for key := range params { + keys = append(keys, key) + } + sort.Strings(keys) + + completeParams := []string{} + for _, key := range keys { + if len(params[key]) > 1 { + sort.Strings(params[key]) + } + + completeParams = append(completeParams, fmt.Sprintf("%s:%s", key, strings.Join(params[key], ","))) + } + cr.WriteString(strings.Join(completeParams, "\n")) + } + } else { + // search for "comp" parameter, if exists then add it to canonicalizedresource + if v, ok := params["comp"]; ok { + cr.WriteString("?comp=" + v[0]) + } + } + + return string(cr.Bytes()), nil +} + +func getCanonicalizedAccountName(accountName string) string { + // since we may be trying to access a secondary storage account, we need to + // remove the -secondary part of the storage name + return strings.TrimSuffix(accountName, "-secondary") +} + +func buildCanonicalizedString(verb string, headers http.Header, canonicalizedResource string, keyType SharedKeyType) (string, error) { + contentLength := headers.Get(headerContentLength) + if contentLength == "0" { + contentLength = "" + } + date := headers.Get(headerDate) + if v := headers.Get(headerXMSDate); v != "" { + if keyType == SharedKey || keyType == SharedKeyLite { + date = "" + } else { + date = v + } + } + var canString string + switch keyType { + case SharedKey: + canString = strings.Join([]string{ + verb, + headers.Get(headerContentEncoding), + headers.Get(headerContentLanguage), + contentLength, + headers.Get(headerContentMD5), + headers.Get(headerContentType), + date, + headers.Get(headerIfModifiedSince), + headers.Get(headerIfMatch), + headers.Get(headerIfNoneMatch), + headers.Get(headerIfUnmodifiedSince), + headers.Get(headerRange), + buildCanonicalizedHeader(headers), + canonicalizedResource, + }, "\n") + case SharedKeyForTable: + canString = strings.Join([]string{ + verb, + headers.Get(headerContentMD5), + headers.Get(headerContentType), + date, + canonicalizedResource, + }, "\n") + case SharedKeyLite: + canString = strings.Join([]string{ + verb, + headers.Get(headerContentMD5), + headers.Get(headerContentType), + date, + buildCanonicalizedHeader(headers), + canonicalizedResource, + }, "\n") + case SharedKeyLiteForTable: + canString = strings.Join([]string{ + date, + canonicalizedResource, + }, "\n") + default: + return "", fmt.Errorf("key type '%s' is not supported", keyType) + } + return canString, nil +} + +func buildCanonicalizedHeader(headers http.Header) string { + cm := make(map[string]string) + + for k := range headers { + headerName := strings.TrimSpace(strings.ToLower(k)) + if strings.HasPrefix(headerName, "x-ms-") { + cm[headerName] = headers.Get(k) + } + } + + if len(cm) == 0 { + return "" + } + + keys := []string{} + for key := range cm { + keys = append(keys, key) + } + + sort.Strings(keys) + + ch := bytes.NewBufferString("") + + for _, key := range keys { + ch.WriteString(key) + ch.WriteRune(':') + ch.WriteString(cm[key]) + ch.WriteRune('\n') + } + + return strings.TrimSuffix(string(ch.Bytes()), "\n") +} + +func createAuthorizationHeader(accountName string, accountKey []byte, canonicalizedString string, keyType SharedKeyType) string { + h := hmac.New(sha256.New, accountKey) + h.Write([]byte(canonicalizedString)) + signature := base64.StdEncoding.EncodeToString(h.Sum(nil)) + var key string + switch keyType { + case SharedKey, SharedKeyForTable: + key = "SharedKey" + case SharedKeyLite, SharedKeyLiteForTable: + key = "SharedKeyLite" + } + return fmt.Sprintf("%s %s:%s", key, getCanonicalizedAccountName(accountName), signature) +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/autorest.go b/vendor/github.com/Azure/go-autorest/autorest/autorest.go new file mode 100644 index 0000000..aafdf02 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/autorest.go @@ -0,0 +1,150 @@ +/* +Package autorest implements an HTTP request pipeline suitable for use across multiple go-routines +and provides the shared routines relied on by AutoRest (see https://github.com/Azure/autorest/) +generated Go code. + +The package breaks sending and responding to HTTP requests into three phases: Preparing, Sending, +and Responding. A typical pattern is: + + req, err := Prepare(&http.Request{}, + token.WithAuthorization()) + + resp, err := Send(req, + WithLogging(logger), + DoErrorIfStatusCode(http.StatusInternalServerError), + DoCloseIfError(), + DoRetryForAttempts(5, time.Second)) + + err = Respond(resp, + ByDiscardingBody(), + ByClosing()) + +Each phase relies on decorators to modify and / or manage processing. Decorators may first modify +and then pass the data along, pass the data first and then modify the result, or wrap themselves +around passing the data (such as a logger might do). Decorators run in the order provided. For +example, the following: + + req, err := Prepare(&http.Request{}, + WithBaseURL("https://microsoft.com/"), + WithPath("a"), + WithPath("b"), + WithPath("c")) + +will set the URL to: + + https://microsoft.com/a/b/c + +Preparers and Responders may be shared and re-used (assuming the underlying decorators support +sharing and re-use). Performant use is obtained by creating one or more Preparers and Responders +shared among multiple go-routines, and a single Sender shared among multiple sending go-routines, +all bound together by means of input / output channels. + +Decorators hold their passed state within a closure (such as the path components in the example +above). Be careful to share Preparers and Responders only in a context where such held state +applies. For example, it may not make sense to share a Preparer that applies a query string from a +fixed set of values. Similarly, sharing a Responder that reads the response body into a passed +struct (e.g., ByUnmarshallingJson) is likely incorrect. + +Lastly, the Swagger specification (https://swagger.io) that drives AutoRest +(https://github.com/Azure/autorest/) precisely defines two date forms: date and date-time. The +github.com/Azure/go-autorest/autorest/date package provides time.Time derivations to ensure +correct parsing and formatting. + +Errors raised by autorest objects and methods will conform to the autorest.Error interface. + +See the included examples for more detail. For details on the suggested use of this package by +generated clients, see the Client described below. +*/ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "context" + "net/http" + "time" +) + +const ( + // HeaderLocation specifies the HTTP Location header. + HeaderLocation = "Location" + + // HeaderRetryAfter specifies the HTTP Retry-After header. + HeaderRetryAfter = "Retry-After" +) + +// ResponseHasStatusCode returns true if the status code in the HTTP Response is in the passed set +// and false otherwise. +func ResponseHasStatusCode(resp *http.Response, codes ...int) bool { + if resp == nil { + return false + } + return containsInt(codes, resp.StatusCode) +} + +// GetLocation retrieves the URL from the Location header of the passed response. +func GetLocation(resp *http.Response) string { + return resp.Header.Get(HeaderLocation) +} + +// GetRetryAfter extracts the retry delay from the Retry-After header of the passed response. If +// the header is absent or is malformed, it will return the supplied default delay time.Duration. +func GetRetryAfter(resp *http.Response, defaultDelay time.Duration) time.Duration { + retry := resp.Header.Get(HeaderRetryAfter) + if retry == "" { + return defaultDelay + } + + d, err := time.ParseDuration(retry + "s") + if err != nil { + return defaultDelay + } + + return d +} + +// NewPollingRequest allocates and returns a new http.Request to poll for the passed response. +func NewPollingRequest(resp *http.Response, cancel <-chan struct{}) (*http.Request, error) { + location := GetLocation(resp) + if location == "" { + return nil, NewErrorWithResponse("autorest", "NewPollingRequest", resp, "Location header missing from response that requires polling") + } + + req, err := Prepare(&http.Request{Cancel: cancel}, + AsGet(), + WithBaseURL(location)) + if err != nil { + return nil, NewErrorWithError(err, "autorest", "NewPollingRequest", nil, "Failure creating poll request to %s", location) + } + + return req, nil +} + +// NewPollingRequestWithContext allocates and returns a new http.Request with the specified context to poll for the passed response. +func NewPollingRequestWithContext(ctx context.Context, resp *http.Response) (*http.Request, error) { + location := GetLocation(resp) + if location == "" { + return nil, NewErrorWithResponse("autorest", "NewPollingRequestWithContext", resp, "Location header missing from response that requires polling") + } + + req, err := Prepare((&http.Request{}).WithContext(ctx), + AsGet(), + WithBaseURL(location)) + if err != nil { + return nil, NewErrorWithError(err, "autorest", "NewPollingRequestWithContext", nil, "Failure creating poll request to %s", location) + } + + return req, nil +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/async.go b/vendor/github.com/Azure/go-autorest/autorest/azure/async.go new file mode 100644 index 0000000..42e28cf --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/async.go @@ -0,0 +1,991 @@ +package azure + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "strings" + "time" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/tracing" +) + +const ( + headerAsyncOperation = "Azure-AsyncOperation" +) + +const ( + operationInProgress string = "InProgress" + operationCanceled string = "Canceled" + operationFailed string = "Failed" + operationSucceeded string = "Succeeded" +) + +var pollingCodes = [...]int{http.StatusNoContent, http.StatusAccepted, http.StatusCreated, http.StatusOK} + +// FutureAPI contains the set of methods on the Future type. +type FutureAPI interface { + // Response returns the last HTTP response. + Response() *http.Response + + // Status returns the last status message of the operation. + Status() string + + // PollingMethod returns the method used to monitor the status of the asynchronous operation. + PollingMethod() PollingMethodType + + // DoneWithContext queries the service to see if the operation has completed. + DoneWithContext(context.Context, autorest.Sender) (bool, error) + + // GetPollingDelay returns a duration the application should wait before checking + // the status of the asynchronous request and true; this value is returned from + // the service via the Retry-After response header. If the header wasn't returned + // then the function returns the zero-value time.Duration and false. + GetPollingDelay() (time.Duration, bool) + + // WaitForCompletionRef will return when one of the following conditions is met: the long + // running operation has completed, the provided context is cancelled, or the client's + // polling duration has been exceeded. It will retry failed polling attempts based on + // the retry value defined in the client up to the maximum retry attempts. + // If no deadline is specified in the context then the client.PollingDuration will be + // used to determine if a default deadline should be used. + // If PollingDuration is greater than zero the value will be used as the context's timeout. + // If PollingDuration is zero then no default deadline will be used. + WaitForCompletionRef(context.Context, autorest.Client) error + + // MarshalJSON implements the json.Marshaler interface. + MarshalJSON() ([]byte, error) + + // MarshalJSON implements the json.Unmarshaler interface. + UnmarshalJSON([]byte) error + + // PollingURL returns the URL used for retrieving the status of the long-running operation. + PollingURL() string + + // GetResult should be called once polling has completed successfully. + // It makes the final GET call to retrieve the resultant payload. + GetResult(autorest.Sender) (*http.Response, error) +} + +var _ FutureAPI = (*Future)(nil) + +// Future provides a mechanism to access the status and results of an asynchronous request. +// Since futures are stateful they should be passed by value to avoid race conditions. +type Future struct { + pt pollingTracker +} + +// NewFutureFromResponse returns a new Future object initialized +// with the initial response from an asynchronous operation. +func NewFutureFromResponse(resp *http.Response) (Future, error) { + pt, err := createPollingTracker(resp) + return Future{pt: pt}, err +} + +// Response returns the last HTTP response. +func (f Future) Response() *http.Response { + if f.pt == nil { + return nil + } + return f.pt.latestResponse() +} + +// Status returns the last status message of the operation. +func (f Future) Status() string { + if f.pt == nil { + return "" + } + return f.pt.pollingStatus() +} + +// PollingMethod returns the method used to monitor the status of the asynchronous operation. +func (f Future) PollingMethod() PollingMethodType { + if f.pt == nil { + return PollingUnknown + } + return f.pt.pollingMethod() +} + +// DoneWithContext queries the service to see if the operation has completed. +func (f *Future) DoneWithContext(ctx context.Context, sender autorest.Sender) (done bool, err error) { + ctx = tracing.StartSpan(ctx, "github.com/Azure/go-autorest/autorest/azure/async.DoneWithContext") + defer func() { + sc := -1 + resp := f.Response() + if resp != nil { + sc = resp.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + + if f.pt == nil { + return false, autorest.NewError("Future", "Done", "future is not initialized") + } + if f.pt.hasTerminated() { + return true, f.pt.pollingError() + } + if err := f.pt.pollForStatus(ctx, sender); err != nil { + return false, err + } + if err := f.pt.checkForErrors(); err != nil { + return f.pt.hasTerminated(), err + } + if err := f.pt.updatePollingState(f.pt.provisioningStateApplicable()); err != nil { + return false, err + } + if err := f.pt.initPollingMethod(); err != nil { + return false, err + } + if err := f.pt.updatePollingMethod(); err != nil { + return false, err + } + return f.pt.hasTerminated(), f.pt.pollingError() +} + +// GetPollingDelay returns a duration the application should wait before checking +// the status of the asynchronous request and true; this value is returned from +// the service via the Retry-After response header. If the header wasn't returned +// then the function returns the zero-value time.Duration and false. +func (f Future) GetPollingDelay() (time.Duration, bool) { + if f.pt == nil { + return 0, false + } + resp := f.pt.latestResponse() + if resp == nil { + return 0, false + } + + retry := resp.Header.Get(autorest.HeaderRetryAfter) + if retry == "" { + return 0, false + } + + d, err := time.ParseDuration(retry + "s") + if err != nil { + panic(err) + } + + return d, true +} + +// WaitForCompletionRef will return when one of the following conditions is met: the long +// running operation has completed, the provided context is cancelled, or the client's +// polling duration has been exceeded. It will retry failed polling attempts based on +// the retry value defined in the client up to the maximum retry attempts. +// If no deadline is specified in the context then the client.PollingDuration will be +// used to determine if a default deadline should be used. +// If PollingDuration is greater than zero the value will be used as the context's timeout. +// If PollingDuration is zero then no default deadline will be used. +func (f *Future) WaitForCompletionRef(ctx context.Context, client autorest.Client) (err error) { + ctx = tracing.StartSpan(ctx, "github.com/Azure/go-autorest/autorest/azure/async.WaitForCompletionRef") + defer func() { + sc := -1 + resp := f.Response() + if resp != nil { + sc = resp.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + cancelCtx := ctx + // if the provided context already has a deadline don't override it + _, hasDeadline := ctx.Deadline() + if d := client.PollingDuration; !hasDeadline && d != 0 { + var cancel context.CancelFunc + cancelCtx, cancel = context.WithTimeout(ctx, d) + defer cancel() + } + // if the initial response has a Retry-After, sleep for the specified amount of time before starting to poll + if delay, ok := f.GetPollingDelay(); ok { + if delayElapsed := autorest.DelayForBackoff(delay, 0, cancelCtx.Done()); !delayElapsed { + err = cancelCtx.Err() + return + } + } + done, err := f.DoneWithContext(ctx, client) + for attempts := 0; !done; done, err = f.DoneWithContext(ctx, client) { + if attempts >= client.RetryAttempts { + return autorest.NewErrorWithError(err, "Future", "WaitForCompletion", f.pt.latestResponse(), "the number of retries has been exceeded") + } + // we want delayAttempt to be zero in the non-error case so + // that DelayForBackoff doesn't perform exponential back-off + var delayAttempt int + var delay time.Duration + if err == nil { + // check for Retry-After delay, if not present use the client's polling delay + var ok bool + delay, ok = f.GetPollingDelay() + if !ok { + delay = client.PollingDelay + } + } else { + // there was an error polling for status so perform exponential + // back-off based on the number of attempts using the client's retry + // duration. update attempts after delayAttempt to avoid off-by-one. + delayAttempt = attempts + delay = client.RetryDuration + attempts++ + } + // wait until the delay elapses or the context is cancelled + delayElapsed := autorest.DelayForBackoff(delay, delayAttempt, cancelCtx.Done()) + if !delayElapsed { + return autorest.NewErrorWithError(cancelCtx.Err(), "Future", "WaitForCompletion", f.pt.latestResponse(), "context has been cancelled") + } + } + return +} + +// MarshalJSON implements the json.Marshaler interface. +func (f Future) MarshalJSON() ([]byte, error) { + return json.Marshal(f.pt) +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (f *Future) UnmarshalJSON(data []byte) error { + // unmarshal into JSON object to determine the tracker type + obj := map[string]interface{}{} + err := json.Unmarshal(data, &obj) + if err != nil { + return err + } + if obj["method"] == nil { + return autorest.NewError("Future", "UnmarshalJSON", "missing 'method' property") + } + method := obj["method"].(string) + switch strings.ToUpper(method) { + case http.MethodDelete: + f.pt = &pollingTrackerDelete{} + case http.MethodPatch: + f.pt = &pollingTrackerPatch{} + case http.MethodPost: + f.pt = &pollingTrackerPost{} + case http.MethodPut: + f.pt = &pollingTrackerPut{} + default: + return autorest.NewError("Future", "UnmarshalJSON", "unsupoorted method '%s'", method) + } + // now unmarshal into the tracker + return json.Unmarshal(data, &f.pt) +} + +// PollingURL returns the URL used for retrieving the status of the long-running operation. +func (f Future) PollingURL() string { + if f.pt == nil { + return "" + } + return f.pt.pollingURL() +} + +// GetResult should be called once polling has completed successfully. +// It makes the final GET call to retrieve the resultant payload. +func (f Future) GetResult(sender autorest.Sender) (*http.Response, error) { + if f.pt.finalGetURL() == "" { + // we can end up in this situation if the async operation returns a 200 + // with no polling URLs. in that case return the response which should + // contain the JSON payload (only do this for successful terminal cases). + if lr := f.pt.latestResponse(); lr != nil && f.pt.hasSucceeded() { + return lr, nil + } + return nil, autorest.NewError("Future", "GetResult", "missing URL for retrieving result") + } + req, err := http.NewRequest(http.MethodGet, f.pt.finalGetURL(), nil) + if err != nil { + return nil, err + } + resp, err := sender.Do(req) + if err == nil && resp.Body != nil { + // copy the body and close it so callers don't have to + defer resp.Body.Close() + b, err := ioutil.ReadAll(resp.Body) + if err != nil { + return resp, err + } + resp.Body = ioutil.NopCloser(bytes.NewReader(b)) + } + return resp, err +} + +type pollingTracker interface { + // these methods can differ per tracker + + // checks the response headers and status code to determine the polling mechanism + updatePollingMethod() error + + // checks the response for tracker-specific error conditions + checkForErrors() error + + // returns true if provisioning state should be checked + provisioningStateApplicable() bool + + // methods common to all trackers + + // initializes a tracker's polling URL and method, called for each iteration. + // these values can be overridden by each polling tracker as required. + initPollingMethod() error + + // initializes the tracker's internal state, call this when the tracker is created + initializeState() error + + // makes an HTTP request to check the status of the LRO + pollForStatus(ctx context.Context, sender autorest.Sender) error + + // updates internal tracker state, call this after each call to pollForStatus + updatePollingState(provStateApl bool) error + + // returns the error response from the service, can be nil + pollingError() error + + // returns the polling method being used + pollingMethod() PollingMethodType + + // returns the state of the LRO as returned from the service + pollingStatus() string + + // returns the URL used for polling status + pollingURL() string + + // returns the URL used for the final GET to retrieve the resource + finalGetURL() string + + // returns true if the LRO is in a terminal state + hasTerminated() bool + + // returns true if the LRO is in a failed terminal state + hasFailed() bool + + // returns true if the LRO is in a successful terminal state + hasSucceeded() bool + + // returns the cached HTTP response after a call to pollForStatus(), can be nil + latestResponse() *http.Response +} + +type pollingTrackerBase struct { + // resp is the last response, either from the submission of the LRO or from polling + resp *http.Response + + // method is the HTTP verb, this is needed for deserialization + Method string `json:"method"` + + // rawBody is the raw JSON response body + rawBody map[string]interface{} + + // denotes if polling is using async-operation or location header + Pm PollingMethodType `json:"pollingMethod"` + + // the URL to poll for status + URI string `json:"pollingURI"` + + // the state of the LRO as returned from the service + State string `json:"lroState"` + + // the URL to GET for the final result + FinalGetURI string `json:"resultURI"` + + // used to hold an error object returned from the service + Err *ServiceError `json:"error,omitempty"` +} + +func (pt *pollingTrackerBase) initializeState() error { + // determine the initial polling state based on response body and/or HTTP status + // code. this is applicable to the initial LRO response, not polling responses! + pt.Method = pt.resp.Request.Method + if err := pt.updateRawBody(); err != nil { + return err + } + switch pt.resp.StatusCode { + case http.StatusOK: + if ps := pt.getProvisioningState(); ps != nil { + pt.State = *ps + if pt.hasFailed() { + pt.updateErrorFromResponse() + return pt.pollingError() + } + } else { + pt.State = operationSucceeded + } + case http.StatusCreated: + if ps := pt.getProvisioningState(); ps != nil { + pt.State = *ps + } else { + pt.State = operationInProgress + } + case http.StatusAccepted: + pt.State = operationInProgress + case http.StatusNoContent: + pt.State = operationSucceeded + default: + pt.State = operationFailed + pt.updateErrorFromResponse() + return pt.pollingError() + } + return pt.initPollingMethod() +} + +func (pt pollingTrackerBase) getProvisioningState() *string { + if pt.rawBody != nil && pt.rawBody["properties"] != nil { + p := pt.rawBody["properties"].(map[string]interface{}) + if ps := p["provisioningState"]; ps != nil { + s := ps.(string) + return &s + } + } + return nil +} + +func (pt *pollingTrackerBase) updateRawBody() error { + pt.rawBody = map[string]interface{}{} + if pt.resp.ContentLength != 0 { + defer pt.resp.Body.Close() + b, err := ioutil.ReadAll(pt.resp.Body) + if err != nil { + return autorest.NewErrorWithError(err, "pollingTrackerBase", "updateRawBody", nil, "failed to read response body") + } + // put the body back so it's available to other callers + pt.resp.Body = ioutil.NopCloser(bytes.NewReader(b)) + // observed in 204 responses over HTTP/2.0; the content length is -1 but body is empty + if len(b) == 0 { + return nil + } + if err = json.Unmarshal(b, &pt.rawBody); err != nil { + return autorest.NewErrorWithError(err, "pollingTrackerBase", "updateRawBody", nil, "failed to unmarshal response body") + } + } + return nil +} + +func (pt *pollingTrackerBase) pollForStatus(ctx context.Context, sender autorest.Sender) error { + req, err := http.NewRequest(http.MethodGet, pt.URI, nil) + if err != nil { + return autorest.NewErrorWithError(err, "pollingTrackerBase", "pollForStatus", nil, "failed to create HTTP request") + } + + req = req.WithContext(ctx) + preparer := autorest.CreatePreparer(autorest.GetPrepareDecorators(ctx)...) + req, err = preparer.Prepare(req) + if err != nil { + return autorest.NewErrorWithError(err, "pollingTrackerBase", "pollForStatus", nil, "failed preparing HTTP request") + } + pt.resp, err = sender.Do(req) + if err != nil { + return autorest.NewErrorWithError(err, "pollingTrackerBase", "pollForStatus", nil, "failed to send HTTP request") + } + if autorest.ResponseHasStatusCode(pt.resp, pollingCodes[:]...) { + // reset the service error on success case + pt.Err = nil + err = pt.updateRawBody() + } else { + // check response body for error content + pt.updateErrorFromResponse() + err = pt.pollingError() + } + return err +} + +// attempts to unmarshal a ServiceError type from the response body. +// if that fails then make a best attempt at creating something meaningful. +// NOTE: this assumes that the async operation has failed. +func (pt *pollingTrackerBase) updateErrorFromResponse() { + var err error + if pt.resp.ContentLength != 0 { + type respErr struct { + ServiceError *ServiceError `json:"error"` + } + re := respErr{} + defer pt.resp.Body.Close() + var b []byte + if b, err = ioutil.ReadAll(pt.resp.Body); err != nil { + goto Default + } + // put the body back so it's available to other callers + pt.resp.Body = ioutil.NopCloser(bytes.NewReader(b)) + if len(b) == 0 { + goto Default + } + if err = json.Unmarshal(b, &re); err != nil { + goto Default + } + // unmarshalling the error didn't yield anything, try unwrapped error + if re.ServiceError == nil { + err = json.Unmarshal(b, &re.ServiceError) + if err != nil { + goto Default + } + } + // the unmarshaller will ensure re.ServiceError is non-nil + // even if there was no content unmarshalled so check the code. + if re.ServiceError.Code != "" { + pt.Err = re.ServiceError + return + } + } +Default: + se := &ServiceError{ + Code: pt.pollingStatus(), + Message: "The async operation failed.", + } + if err != nil { + se.InnerError = make(map[string]interface{}) + se.InnerError["unmarshalError"] = err.Error() + } + // stick the response body into the error object in hopes + // it contains something useful to help diagnose the failure. + if len(pt.rawBody) > 0 { + se.AdditionalInfo = []map[string]interface{}{ + pt.rawBody, + } + } + pt.Err = se +} + +func (pt *pollingTrackerBase) updatePollingState(provStateApl bool) error { + if pt.Pm == PollingAsyncOperation && pt.rawBody["status"] != nil { + pt.State = pt.rawBody["status"].(string) + } else { + if pt.resp.StatusCode == http.StatusAccepted { + pt.State = operationInProgress + } else if provStateApl { + if ps := pt.getProvisioningState(); ps != nil { + pt.State = *ps + } else { + pt.State = operationSucceeded + } + } else { + return autorest.NewError("pollingTrackerBase", "updatePollingState", "the response from the async operation has an invalid status code") + } + } + // if the operation has failed update the error state + if pt.hasFailed() { + pt.updateErrorFromResponse() + } + return nil +} + +func (pt pollingTrackerBase) pollingError() error { + if pt.Err == nil { + return nil + } + return pt.Err +} + +func (pt pollingTrackerBase) pollingMethod() PollingMethodType { + return pt.Pm +} + +func (pt pollingTrackerBase) pollingStatus() string { + return pt.State +} + +func (pt pollingTrackerBase) pollingURL() string { + return pt.URI +} + +func (pt pollingTrackerBase) finalGetURL() string { + return pt.FinalGetURI +} + +func (pt pollingTrackerBase) hasTerminated() bool { + return strings.EqualFold(pt.State, operationCanceled) || strings.EqualFold(pt.State, operationFailed) || strings.EqualFold(pt.State, operationSucceeded) +} + +func (pt pollingTrackerBase) hasFailed() bool { + return strings.EqualFold(pt.State, operationCanceled) || strings.EqualFold(pt.State, operationFailed) +} + +func (pt pollingTrackerBase) hasSucceeded() bool { + return strings.EqualFold(pt.State, operationSucceeded) +} + +func (pt pollingTrackerBase) latestResponse() *http.Response { + return pt.resp +} + +// error checking common to all trackers +func (pt pollingTrackerBase) baseCheckForErrors() error { + // for Azure-AsyncOperations the response body cannot be nil or empty + if pt.Pm == PollingAsyncOperation { + if pt.resp.Body == nil || pt.resp.ContentLength == 0 { + return autorest.NewError("pollingTrackerBase", "baseCheckForErrors", "for Azure-AsyncOperation response body cannot be nil") + } + if pt.rawBody["status"] == nil { + return autorest.NewError("pollingTrackerBase", "baseCheckForErrors", "missing status property in Azure-AsyncOperation response body") + } + } + return nil +} + +// default initialization of polling URL/method. each verb tracker will update this as required. +func (pt *pollingTrackerBase) initPollingMethod() error { + if ao, err := getURLFromAsyncOpHeader(pt.resp); err != nil { + return err + } else if ao != "" { + pt.URI = ao + pt.Pm = PollingAsyncOperation + return nil + } + if lh, err := getURLFromLocationHeader(pt.resp); err != nil { + return err + } else if lh != "" { + pt.URI = lh + pt.Pm = PollingLocation + return nil + } + // it's ok if we didn't find a polling header, this will be handled elsewhere + return nil +} + +// DELETE + +type pollingTrackerDelete struct { + pollingTrackerBase +} + +func (pt *pollingTrackerDelete) updatePollingMethod() error { + // for 201 the Location header is required + if pt.resp.StatusCode == http.StatusCreated { + if lh, err := getURLFromLocationHeader(pt.resp); err != nil { + return err + } else if lh == "" { + return autorest.NewError("pollingTrackerDelete", "updateHeaders", "missing Location header in 201 response") + } else { + pt.URI = lh + } + pt.Pm = PollingLocation + pt.FinalGetURI = pt.URI + } + // for 202 prefer the Azure-AsyncOperation header but fall back to Location if necessary + if pt.resp.StatusCode == http.StatusAccepted { + ao, err := getURLFromAsyncOpHeader(pt.resp) + if err != nil { + return err + } else if ao != "" { + pt.URI = ao + pt.Pm = PollingAsyncOperation + } + // if the Location header is invalid and we already have a polling URL + // then we don't care if the Location header URL is malformed. + if lh, err := getURLFromLocationHeader(pt.resp); err != nil && pt.URI == "" { + return err + } else if lh != "" { + if ao == "" { + pt.URI = lh + pt.Pm = PollingLocation + } + // when both headers are returned we use the value in the Location header for the final GET + pt.FinalGetURI = lh + } + // make sure a polling URL was found + if pt.URI == "" { + return autorest.NewError("pollingTrackerPost", "updateHeaders", "didn't get any suitable polling URLs in 202 response") + } + } + return nil +} + +func (pt pollingTrackerDelete) checkForErrors() error { + return pt.baseCheckForErrors() +} + +func (pt pollingTrackerDelete) provisioningStateApplicable() bool { + return pt.resp.StatusCode == http.StatusOK || pt.resp.StatusCode == http.StatusNoContent +} + +// PATCH + +type pollingTrackerPatch struct { + pollingTrackerBase +} + +func (pt *pollingTrackerPatch) updatePollingMethod() error { + // by default we can use the original URL for polling and final GET + if pt.URI == "" { + pt.URI = pt.resp.Request.URL.String() + } + if pt.FinalGetURI == "" { + pt.FinalGetURI = pt.resp.Request.URL.String() + } + if pt.Pm == PollingUnknown { + pt.Pm = PollingRequestURI + } + // for 201 it's permissible for no headers to be returned + if pt.resp.StatusCode == http.StatusCreated { + if ao, err := getURLFromAsyncOpHeader(pt.resp); err != nil { + return err + } else if ao != "" { + pt.URI = ao + pt.Pm = PollingAsyncOperation + } + } + // for 202 prefer the Azure-AsyncOperation header but fall back to Location if necessary + // note the absence of the "final GET" mechanism for PATCH + if pt.resp.StatusCode == http.StatusAccepted { + ao, err := getURLFromAsyncOpHeader(pt.resp) + if err != nil { + return err + } else if ao != "" { + pt.URI = ao + pt.Pm = PollingAsyncOperation + } + if ao == "" { + if lh, err := getURLFromLocationHeader(pt.resp); err != nil { + return err + } else if lh == "" { + return autorest.NewError("pollingTrackerPatch", "updateHeaders", "didn't get any suitable polling URLs in 202 response") + } else { + pt.URI = lh + pt.Pm = PollingLocation + } + } + } + return nil +} + +func (pt pollingTrackerPatch) checkForErrors() error { + return pt.baseCheckForErrors() +} + +func (pt pollingTrackerPatch) provisioningStateApplicable() bool { + return pt.resp.StatusCode == http.StatusOK || pt.resp.StatusCode == http.StatusCreated +} + +// POST + +type pollingTrackerPost struct { + pollingTrackerBase +} + +func (pt *pollingTrackerPost) updatePollingMethod() error { + // 201 requires Location header + if pt.resp.StatusCode == http.StatusCreated { + if lh, err := getURLFromLocationHeader(pt.resp); err != nil { + return err + } else if lh == "" { + return autorest.NewError("pollingTrackerPost", "updateHeaders", "missing Location header in 201 response") + } else { + pt.URI = lh + pt.FinalGetURI = lh + pt.Pm = PollingLocation + } + } + // for 202 prefer the Azure-AsyncOperation header but fall back to Location if necessary + if pt.resp.StatusCode == http.StatusAccepted { + ao, err := getURLFromAsyncOpHeader(pt.resp) + if err != nil { + return err + } else if ao != "" { + pt.URI = ao + pt.Pm = PollingAsyncOperation + } + // if the Location header is invalid and we already have a polling URL + // then we don't care if the Location header URL is malformed. + if lh, err := getURLFromLocationHeader(pt.resp); err != nil && pt.URI == "" { + return err + } else if lh != "" { + if ao == "" { + pt.URI = lh + pt.Pm = PollingLocation + } + // when both headers are returned we use the value in the Location header for the final GET + pt.FinalGetURI = lh + } + // make sure a polling URL was found + if pt.URI == "" { + return autorest.NewError("pollingTrackerPost", "updateHeaders", "didn't get any suitable polling URLs in 202 response") + } + } + return nil +} + +func (pt pollingTrackerPost) checkForErrors() error { + return pt.baseCheckForErrors() +} + +func (pt pollingTrackerPost) provisioningStateApplicable() bool { + return pt.resp.StatusCode == http.StatusOK || pt.resp.StatusCode == http.StatusNoContent +} + +// PUT + +type pollingTrackerPut struct { + pollingTrackerBase +} + +func (pt *pollingTrackerPut) updatePollingMethod() error { + // by default we can use the original URL for polling and final GET + if pt.URI == "" { + pt.URI = pt.resp.Request.URL.String() + } + if pt.FinalGetURI == "" { + pt.FinalGetURI = pt.resp.Request.URL.String() + } + if pt.Pm == PollingUnknown { + pt.Pm = PollingRequestURI + } + // for 201 it's permissible for no headers to be returned + if pt.resp.StatusCode == http.StatusCreated { + if ao, err := getURLFromAsyncOpHeader(pt.resp); err != nil { + return err + } else if ao != "" { + pt.URI = ao + pt.Pm = PollingAsyncOperation + } + } + // for 202 prefer the Azure-AsyncOperation header but fall back to Location if necessary + if pt.resp.StatusCode == http.StatusAccepted { + ao, err := getURLFromAsyncOpHeader(pt.resp) + if err != nil { + return err + } else if ao != "" { + pt.URI = ao + pt.Pm = PollingAsyncOperation + } + // if the Location header is invalid and we already have a polling URL + // then we don't care if the Location header URL is malformed. + if lh, err := getURLFromLocationHeader(pt.resp); err != nil && pt.URI == "" { + return err + } else if lh != "" { + if ao == "" { + pt.URI = lh + pt.Pm = PollingLocation + } + } + // make sure a polling URL was found + if pt.URI == "" { + return autorest.NewError("pollingTrackerPut", "updateHeaders", "didn't get any suitable polling URLs in 202 response") + } + } + return nil +} + +func (pt pollingTrackerPut) checkForErrors() error { + err := pt.baseCheckForErrors() + if err != nil { + return err + } + // if there are no LRO headers then the body cannot be empty + ao, err := getURLFromAsyncOpHeader(pt.resp) + if err != nil { + return err + } + lh, err := getURLFromLocationHeader(pt.resp) + if err != nil { + return err + } + if ao == "" && lh == "" && len(pt.rawBody) == 0 { + return autorest.NewError("pollingTrackerPut", "checkForErrors", "the response did not contain a body") + } + return nil +} + +func (pt pollingTrackerPut) provisioningStateApplicable() bool { + return pt.resp.StatusCode == http.StatusOK || pt.resp.StatusCode == http.StatusCreated +} + +// creates a polling tracker based on the verb of the original request +func createPollingTracker(resp *http.Response) (pollingTracker, error) { + var pt pollingTracker + switch strings.ToUpper(resp.Request.Method) { + case http.MethodDelete: + pt = &pollingTrackerDelete{pollingTrackerBase: pollingTrackerBase{resp: resp}} + case http.MethodPatch: + pt = &pollingTrackerPatch{pollingTrackerBase: pollingTrackerBase{resp: resp}} + case http.MethodPost: + pt = &pollingTrackerPost{pollingTrackerBase: pollingTrackerBase{resp: resp}} + case http.MethodPut: + pt = &pollingTrackerPut{pollingTrackerBase: pollingTrackerBase{resp: resp}} + default: + return nil, autorest.NewError("azure", "createPollingTracker", "unsupported HTTP method %s", resp.Request.Method) + } + if err := pt.initializeState(); err != nil { + return pt, err + } + // this initializes the polling header values, we do this during creation in case the + // initial response send us invalid values; this way the API call will return a non-nil + // error (not doing this means the error shows up in Future.Done) + return pt, pt.updatePollingMethod() +} + +// gets the polling URL from the Azure-AsyncOperation header. +// ensures the URL is well-formed and absolute. +func getURLFromAsyncOpHeader(resp *http.Response) (string, error) { + s := resp.Header.Get(http.CanonicalHeaderKey(headerAsyncOperation)) + if s == "" { + return "", nil + } + if !isValidURL(s) { + return "", autorest.NewError("azure", "getURLFromAsyncOpHeader", "invalid polling URL '%s'", s) + } + return s, nil +} + +// gets the polling URL from the Location header. +// ensures the URL is well-formed and absolute. +func getURLFromLocationHeader(resp *http.Response) (string, error) { + s := resp.Header.Get(http.CanonicalHeaderKey(autorest.HeaderLocation)) + if s == "" { + return "", nil + } + if !isValidURL(s) { + return "", autorest.NewError("azure", "getURLFromLocationHeader", "invalid polling URL '%s'", s) + } + return s, nil +} + +// verify that the URL is valid and absolute +func isValidURL(s string) bool { + u, err := url.Parse(s) + return err == nil && u.IsAbs() +} + +// PollingMethodType defines a type used for enumerating polling mechanisms. +type PollingMethodType string + +const ( + // PollingAsyncOperation indicates the polling method uses the Azure-AsyncOperation header. + PollingAsyncOperation PollingMethodType = "AsyncOperation" + + // PollingLocation indicates the polling method uses the Location header. + PollingLocation PollingMethodType = "Location" + + // PollingRequestURI indicates the polling method uses the original request URI. + PollingRequestURI PollingMethodType = "RequestURI" + + // PollingUnknown indicates an unknown polling method and is the default value. + PollingUnknown PollingMethodType = "" +) + +// AsyncOpIncompleteError is the type that's returned from a future that has not completed. +type AsyncOpIncompleteError struct { + // FutureType is the name of the type composed of a azure.Future. + FutureType string +} + +// Error returns an error message including the originating type name of the error. +func (e AsyncOpIncompleteError) Error() string { + return fmt.Sprintf("%s: asynchronous operation has not completed", e.FutureType) +} + +// NewAsyncOpIncompleteError creates a new AsyncOpIncompleteError with the specified parameters. +func NewAsyncOpIncompleteError(futureType string) AsyncOpIncompleteError { + return AsyncOpIncompleteError{ + FutureType: futureType, + } +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go b/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go new file mode 100644 index 0000000..0ded76b --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go @@ -0,0 +1,388 @@ +// Package azure provides Azure-specific implementations used with AutoRest. +// See the included examples for more detail. +package azure + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "regexp" + "strconv" + "strings" + + "github.com/Azure/go-autorest/autorest" +) + +const ( + // HeaderClientID is the Azure extension header to set a user-specified request ID. + HeaderClientID = "x-ms-client-request-id" + + // HeaderReturnClientID is the Azure extension header to set if the user-specified request ID + // should be included in the response. + HeaderReturnClientID = "x-ms-return-client-request-id" + + // HeaderContentType is the type of the content in the HTTP response. + HeaderContentType = "Content-Type" + + // HeaderRequestID is the Azure extension header of the service generated request ID returned + // in the response. + HeaderRequestID = "x-ms-request-id" +) + +// ServiceError encapsulates the error response from an Azure service. +// It adhears to the OData v4 specification for error responses. +type ServiceError struct { + Code string `json:"code"` + Message string `json:"message"` + Target *string `json:"target"` + Details []map[string]interface{} `json:"details"` + InnerError map[string]interface{} `json:"innererror"` + AdditionalInfo []map[string]interface{} `json:"additionalInfo"` +} + +func (se ServiceError) Error() string { + result := fmt.Sprintf("Code=%q Message=%q", se.Code, se.Message) + + if se.Target != nil { + result += fmt.Sprintf(" Target=%q", *se.Target) + } + + if se.Details != nil { + d, err := json.Marshal(se.Details) + if err != nil { + result += fmt.Sprintf(" Details=%v", se.Details) + } + result += fmt.Sprintf(" Details=%v", string(d)) + } + + if se.InnerError != nil { + d, err := json.Marshal(se.InnerError) + if err != nil { + result += fmt.Sprintf(" InnerError=%v", se.InnerError) + } + result += fmt.Sprintf(" InnerError=%v", string(d)) + } + + if se.AdditionalInfo != nil { + d, err := json.Marshal(se.AdditionalInfo) + if err != nil { + result += fmt.Sprintf(" AdditionalInfo=%v", se.AdditionalInfo) + } + result += fmt.Sprintf(" AdditionalInfo=%v", string(d)) + } + + return result +} + +// UnmarshalJSON implements the json.Unmarshaler interface for the ServiceError type. +func (se *ServiceError) UnmarshalJSON(b []byte) error { + // http://docs.oasis-open.org/odata/odata-json-format/v4.0/os/odata-json-format-v4.0-os.html#_Toc372793091 + + type serviceErrorInternal struct { + Code string `json:"code"` + Message string `json:"message"` + Target *string `json:"target,omitempty"` + AdditionalInfo []map[string]interface{} `json:"additionalInfo,omitempty"` + // not all services conform to the OData v4 spec. + // the following fields are where we've seen discrepancies + + // spec calls for []map[string]interface{} but have seen map[string]interface{} + Details interface{} `json:"details,omitempty"` + + // spec calls for map[string]interface{} but have seen []map[string]interface{} and string + InnerError interface{} `json:"innererror,omitempty"` + } + + sei := serviceErrorInternal{} + if err := json.Unmarshal(b, &sei); err != nil { + return err + } + + // copy the fields we know to be correct + se.AdditionalInfo = sei.AdditionalInfo + se.Code = sei.Code + se.Message = sei.Message + se.Target = sei.Target + + // converts an []interface{} to []map[string]interface{} + arrayOfObjs := func(v interface{}) ([]map[string]interface{}, bool) { + arrayOf, ok := v.([]interface{}) + if !ok { + return nil, false + } + final := []map[string]interface{}{} + for _, item := range arrayOf { + as, ok := item.(map[string]interface{}) + if !ok { + return nil, false + } + final = append(final, as) + } + return final, true + } + + // convert the remaining fields, falling back to raw JSON if necessary + + if c, ok := arrayOfObjs(sei.Details); ok { + se.Details = c + } else if c, ok := sei.Details.(map[string]interface{}); ok { + se.Details = []map[string]interface{}{c} + } else if sei.Details != nil { + // stuff into Details + se.Details = []map[string]interface{}{ + {"raw": sei.Details}, + } + } + + if c, ok := sei.InnerError.(map[string]interface{}); ok { + se.InnerError = c + } else if c, ok := arrayOfObjs(sei.InnerError); ok { + // if there's only one error extract it + if len(c) == 1 { + se.InnerError = c[0] + } else { + // multiple errors, stuff them into the value + se.InnerError = map[string]interface{}{ + "multi": c, + } + } + } else if c, ok := sei.InnerError.(string); ok { + se.InnerError = map[string]interface{}{"error": c} + } else if sei.InnerError != nil { + // stuff into InnerError + se.InnerError = map[string]interface{}{ + "raw": sei.InnerError, + } + } + return nil +} + +// RequestError describes an error response returned by Azure service. +type RequestError struct { + autorest.DetailedError + + // The error returned by the Azure service. + ServiceError *ServiceError `json:"error" xml:"Error"` + + // The request id (from the x-ms-request-id-header) of the request. + RequestID string +} + +// Error returns a human-friendly error message from service error. +func (e RequestError) Error() string { + return fmt.Sprintf("autorest/azure: Service returned an error. Status=%v %v", + e.StatusCode, e.ServiceError) +} + +// IsAzureError returns true if the passed error is an Azure Service error; false otherwise. +func IsAzureError(e error) bool { + _, ok := e.(*RequestError) + return ok +} + +// Resource contains details about an Azure resource. +type Resource struct { + SubscriptionID string + ResourceGroup string + Provider string + ResourceType string + ResourceName string +} + +// String function returns a string in form of azureResourceID +func (r Resource) String() string { + return fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/%s/%s/%s", r.SubscriptionID, r.ResourceGroup, r.Provider, r.ResourceType, r.ResourceName) +} + +// ParseResourceID parses a resource ID into a ResourceDetails struct. +// See https://docs.microsoft.com/en-us/azure/azure-resource-manager/resource-group-template-functions-resource#return-value-4. +func ParseResourceID(resourceID string) (Resource, error) { + + const resourceIDPatternText = `(?i)subscriptions/(.+)/resourceGroups/(.+)/providers/(.+?)/(.+?)/(.+)` + resourceIDPattern := regexp.MustCompile(resourceIDPatternText) + match := resourceIDPattern.FindStringSubmatch(resourceID) + + if len(match) == 0 { + return Resource{}, fmt.Errorf("parsing failed for %s. Invalid resource Id format", resourceID) + } + + v := strings.Split(match[5], "/") + resourceName := v[len(v)-1] + + result := Resource{ + SubscriptionID: match[1], + ResourceGroup: match[2], + Provider: match[3], + ResourceType: match[4], + ResourceName: resourceName, + } + + return result, nil +} + +// NewErrorWithError creates a new Error conforming object from the +// passed packageType, method, statusCode of the given resp (UndefinedStatusCode +// if resp is nil), message, and original error. message is treated as a format +// string to which the optional args apply. +func NewErrorWithError(original error, packageType string, method string, resp *http.Response, message string, args ...interface{}) RequestError { + if v, ok := original.(*RequestError); ok { + return *v + } + + statusCode := autorest.UndefinedStatusCode + if resp != nil { + statusCode = resp.StatusCode + } + return RequestError{ + DetailedError: autorest.DetailedError{ + Original: original, + PackageType: packageType, + Method: method, + StatusCode: statusCode, + Message: fmt.Sprintf(message, args...), + }, + } +} + +// WithReturningClientID returns a PrepareDecorator that adds an HTTP extension header of +// x-ms-client-request-id whose value is the passed, undecorated UUID (e.g., +// "0F39878C-5F76-4DB8-A25D-61D2C193C3CA"). It also sets the x-ms-return-client-request-id +// header to true such that UUID accompanies the http.Response. +func WithReturningClientID(uuid string) autorest.PrepareDecorator { + preparer := autorest.CreatePreparer( + WithClientID(uuid), + WithReturnClientID(true)) + + return func(p autorest.Preparer) autorest.Preparer { + return autorest.PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err != nil { + return r, err + } + return preparer.Prepare(r) + }) + } +} + +// WithClientID returns a PrepareDecorator that adds an HTTP extension header of +// x-ms-client-request-id whose value is passed, undecorated UUID (e.g., +// "0F39878C-5F76-4DB8-A25D-61D2C193C3CA"). +func WithClientID(uuid string) autorest.PrepareDecorator { + return autorest.WithHeader(HeaderClientID, uuid) +} + +// WithReturnClientID returns a PrepareDecorator that adds an HTTP extension header of +// x-ms-return-client-request-id whose boolean value indicates if the value of the +// x-ms-client-request-id header should be included in the http.Response. +func WithReturnClientID(b bool) autorest.PrepareDecorator { + return autorest.WithHeader(HeaderReturnClientID, strconv.FormatBool(b)) +} + +// ExtractClientID extracts the client identifier from the x-ms-client-request-id header set on the +// http.Request sent to the service (and returned in the http.Response) +func ExtractClientID(resp *http.Response) string { + return autorest.ExtractHeaderValue(HeaderClientID, resp) +} + +// ExtractRequestID extracts the Azure server generated request identifier from the +// x-ms-request-id header. +func ExtractRequestID(resp *http.Response) string { + return autorest.ExtractHeaderValue(HeaderRequestID, resp) +} + +// WithErrorUnlessStatusCode returns a RespondDecorator that emits an +// azure.RequestError by reading the response body unless the response HTTP status code +// is among the set passed. +// +// If there is a chance service may return responses other than the Azure error +// format and the response cannot be parsed into an error, a decoding error will +// be returned containing the response body. In any case, the Responder will +// return an error if the status code is not satisfied. +// +// If this Responder returns an error, the response body will be replaced with +// an in-memory reader, which needs no further closing. +func WithErrorUnlessStatusCode(codes ...int) autorest.RespondDecorator { + return func(r autorest.Responder) autorest.Responder { + return autorest.ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err == nil && !autorest.ResponseHasStatusCode(resp, codes...) { + var e RequestError + defer resp.Body.Close() + + encodedAs := autorest.EncodedAsJSON + if strings.Contains(resp.Header.Get("Content-Type"), "xml") { + encodedAs = autorest.EncodedAsXML + } + + // Copy and replace the Body in case it does not contain an error object. + // This will leave the Body available to the caller. + b, decodeErr := autorest.CopyAndDecode(encodedAs, resp.Body, &e) + resp.Body = ioutil.NopCloser(&b) + if decodeErr != nil { + return fmt.Errorf("autorest/azure: error response cannot be parsed: %q error: %v", b.String(), decodeErr) + } + if e.ServiceError == nil { + // Check if error is unwrapped ServiceError + decoder := autorest.NewDecoder(encodedAs, bytes.NewReader(b.Bytes())) + if err := decoder.Decode(&e.ServiceError); err != nil { + return fmt.Errorf("autorest/azure: error response cannot be parsed: %q error: %v", b.String(), err) + } + + // for example, should the API return the literal value `null` as the response + if e.ServiceError == nil { + e.ServiceError = &ServiceError{ + Code: "Unknown", + Message: "Unknown service error", + Details: []map[string]interface{}{ + { + "HttpResponse.Body": b.String(), + }, + }, + } + } + } + + if e.ServiceError != nil && e.ServiceError.Message == "" { + // if we're here it means the returned error wasn't OData v4 compliant. + // try to unmarshal the body in hopes of getting something. + rawBody := map[string]interface{}{} + decoder := autorest.NewDecoder(encodedAs, bytes.NewReader(b.Bytes())) + if err := decoder.Decode(&rawBody); err != nil { + return fmt.Errorf("autorest/azure: error response cannot be parsed: %q error: %v", b.String(), err) + } + + e.ServiceError = &ServiceError{ + Code: "Unknown", + Message: "Unknown service error", + } + if len(rawBody) > 0 { + e.ServiceError.Details = []map[string]interface{}{rawBody} + } + } + e.Response = resp + e.RequestID = ExtractRequestID(resp) + if e.StatusCode == nil { + e.StatusCode = resp.StatusCode + } + err = &e + } + return err + }) + } +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go b/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go new file mode 100644 index 0000000..9bbc089 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go @@ -0,0 +1,269 @@ +package azure + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "os" + "strings" +) + +const ( + // EnvironmentFilepathName captures the name of the environment variable containing the path to the file + // to be used while populating the Azure Environment. + EnvironmentFilepathName = "AZURE_ENVIRONMENT_FILEPATH" + + // NotAvailable is used for endpoints and resource IDs that are not available for a given cloud. + NotAvailable = "N/A" +) + +var environments = map[string]Environment{ + "AZURECHINACLOUD": ChinaCloud, + "AZUREGERMANCLOUD": GermanCloud, + "AZUREPUBLICCLOUD": PublicCloud, + "AZUREUSGOVERNMENTCLOUD": USGovernmentCloud, +} + +// ResourceIdentifier contains a set of Azure resource IDs. +type ResourceIdentifier struct { + Graph string `json:"graph"` + KeyVault string `json:"keyVault"` + Datalake string `json:"datalake"` + Batch string `json:"batch"` + OperationalInsights string `json:"operationalInsights"` + Storage string `json:"storage"` + Synapse string `json:"synapse"` + ServiceBus string `json:"serviceBus"` +} + +// Environment represents a set of endpoints for each of Azure's Clouds. +type Environment struct { + Name string `json:"name"` + ManagementPortalURL string `json:"managementPortalURL"` + PublishSettingsURL string `json:"publishSettingsURL"` + ServiceManagementEndpoint string `json:"serviceManagementEndpoint"` + ResourceManagerEndpoint string `json:"resourceManagerEndpoint"` + ActiveDirectoryEndpoint string `json:"activeDirectoryEndpoint"` + GalleryEndpoint string `json:"galleryEndpoint"` + KeyVaultEndpoint string `json:"keyVaultEndpoint"` + GraphEndpoint string `json:"graphEndpoint"` + ServiceBusEndpoint string `json:"serviceBusEndpoint"` + BatchManagementEndpoint string `json:"batchManagementEndpoint"` + StorageEndpointSuffix string `json:"storageEndpointSuffix"` + SQLDatabaseDNSSuffix string `json:"sqlDatabaseDNSSuffix"` + TrafficManagerDNSSuffix string `json:"trafficManagerDNSSuffix"` + KeyVaultDNSSuffix string `json:"keyVaultDNSSuffix"` + ServiceBusEndpointSuffix string `json:"serviceBusEndpointSuffix"` + ServiceManagementVMDNSSuffix string `json:"serviceManagementVMDNSSuffix"` + ResourceManagerVMDNSSuffix string `json:"resourceManagerVMDNSSuffix"` + ContainerRegistryDNSSuffix string `json:"containerRegistryDNSSuffix"` + CosmosDBDNSSuffix string `json:"cosmosDBDNSSuffix"` + TokenAudience string `json:"tokenAudience"` + APIManagementHostNameSuffix string `json:"apiManagementHostNameSuffix"` + SynapseEndpointSuffix string `json:"synapseEndpointSuffix"` + ResourceIdentifiers ResourceIdentifier `json:"resourceIdentifiers"` +} + +var ( + // PublicCloud is the default public Azure cloud environment + PublicCloud = Environment{ + Name: "AzurePublicCloud", + ManagementPortalURL: "https://manage.windowsazure.com/", + PublishSettingsURL: "https://manage.windowsazure.com/publishsettings/index", + ServiceManagementEndpoint: "https://management.core.windows.net/", + ResourceManagerEndpoint: "https://management.azure.com/", + ActiveDirectoryEndpoint: "https://login.microsoftonline.com/", + GalleryEndpoint: "https://gallery.azure.com/", + KeyVaultEndpoint: "https://vault.azure.net/", + GraphEndpoint: "https://graph.windows.net/", + ServiceBusEndpoint: "https://servicebus.windows.net/", + BatchManagementEndpoint: "https://batch.core.windows.net/", + StorageEndpointSuffix: "core.windows.net", + SQLDatabaseDNSSuffix: "database.windows.net", + TrafficManagerDNSSuffix: "trafficmanager.net", + KeyVaultDNSSuffix: "vault.azure.net", + ServiceBusEndpointSuffix: "servicebus.windows.net", + ServiceManagementVMDNSSuffix: "cloudapp.net", + ResourceManagerVMDNSSuffix: "cloudapp.azure.com", + ContainerRegistryDNSSuffix: "azurecr.io", + CosmosDBDNSSuffix: "documents.azure.com", + TokenAudience: "https://management.azure.com/", + APIManagementHostNameSuffix: "azure-api.net", + SynapseEndpointSuffix: "dev.azuresynapse.net", + ResourceIdentifiers: ResourceIdentifier{ + Graph: "https://graph.windows.net/", + KeyVault: "https://vault.azure.net", + Datalake: "https://datalake.azure.net/", + Batch: "https://batch.core.windows.net/", + OperationalInsights: "https://api.loganalytics.io", + Storage: "https://storage.azure.com/", + Synapse: "https://dev.azuresynapse.net", + ServiceBus: "https://servicebus.azure.net/", + }, + } + + // USGovernmentCloud is the cloud environment for the US Government + USGovernmentCloud = Environment{ + Name: "AzureUSGovernmentCloud", + ManagementPortalURL: "https://manage.windowsazure.us/", + PublishSettingsURL: "https://manage.windowsazure.us/publishsettings/index", + ServiceManagementEndpoint: "https://management.core.usgovcloudapi.net/", + ResourceManagerEndpoint: "https://management.usgovcloudapi.net/", + ActiveDirectoryEndpoint: "https://login.microsoftonline.us/", + GalleryEndpoint: "https://gallery.usgovcloudapi.net/", + KeyVaultEndpoint: "https://vault.usgovcloudapi.net/", + GraphEndpoint: "https://graph.windows.net/", + ServiceBusEndpoint: "https://servicebus.usgovcloudapi.net/", + BatchManagementEndpoint: "https://batch.core.usgovcloudapi.net/", + StorageEndpointSuffix: "core.usgovcloudapi.net", + SQLDatabaseDNSSuffix: "database.usgovcloudapi.net", + TrafficManagerDNSSuffix: "usgovtrafficmanager.net", + KeyVaultDNSSuffix: "vault.usgovcloudapi.net", + ServiceBusEndpointSuffix: "servicebus.usgovcloudapi.net", + ServiceManagementVMDNSSuffix: "usgovcloudapp.net", + ResourceManagerVMDNSSuffix: "cloudapp.usgovcloudapi.net", + ContainerRegistryDNSSuffix: "azurecr.us", + CosmosDBDNSSuffix: "documents.azure.us", + TokenAudience: "https://management.usgovcloudapi.net/", + APIManagementHostNameSuffix: "azure-api.us", + SynapseEndpointSuffix: NotAvailable, + ResourceIdentifiers: ResourceIdentifier{ + Graph: "https://graph.windows.net/", + KeyVault: "https://vault.usgovcloudapi.net", + Datalake: NotAvailable, + Batch: "https://batch.core.usgovcloudapi.net/", + OperationalInsights: "https://api.loganalytics.us", + Storage: "https://storage.azure.com/", + Synapse: NotAvailable, + ServiceBus: "https://servicebus.azure.net/", + }, + } + + // ChinaCloud is the cloud environment operated in China + ChinaCloud = Environment{ + Name: "AzureChinaCloud", + ManagementPortalURL: "https://manage.chinacloudapi.com/", + PublishSettingsURL: "https://manage.chinacloudapi.com/publishsettings/index", + ServiceManagementEndpoint: "https://management.core.chinacloudapi.cn/", + ResourceManagerEndpoint: "https://management.chinacloudapi.cn/", + ActiveDirectoryEndpoint: "https://login.chinacloudapi.cn/", + GalleryEndpoint: "https://gallery.chinacloudapi.cn/", + KeyVaultEndpoint: "https://vault.azure.cn/", + GraphEndpoint: "https://graph.chinacloudapi.cn/", + ServiceBusEndpoint: "https://servicebus.chinacloudapi.cn/", + BatchManagementEndpoint: "https://batch.chinacloudapi.cn/", + StorageEndpointSuffix: "core.chinacloudapi.cn", + SQLDatabaseDNSSuffix: "database.chinacloudapi.cn", + TrafficManagerDNSSuffix: "trafficmanager.cn", + KeyVaultDNSSuffix: "vault.azure.cn", + ServiceBusEndpointSuffix: "servicebus.chinacloudapi.cn", + ServiceManagementVMDNSSuffix: "chinacloudapp.cn", + ResourceManagerVMDNSSuffix: "cloudapp.chinacloudapi.cn", + ContainerRegistryDNSSuffix: "azurecr.cn", + CosmosDBDNSSuffix: "documents.azure.cn", + TokenAudience: "https://management.chinacloudapi.cn/", + APIManagementHostNameSuffix: "azure-api.cn", + SynapseEndpointSuffix: "dev.azuresynapse.azure.cn", + ResourceIdentifiers: ResourceIdentifier{ + Graph: "https://graph.chinacloudapi.cn/", + KeyVault: "https://vault.azure.cn", + Datalake: NotAvailable, + Batch: "https://batch.chinacloudapi.cn/", + OperationalInsights: NotAvailable, + Storage: "https://storage.azure.com/", + Synapse: "https://dev.azuresynapse.net", + ServiceBus: "https://servicebus.azure.net/", + }, + } + + // GermanCloud is the cloud environment operated in Germany + GermanCloud = Environment{ + Name: "AzureGermanCloud", + ManagementPortalURL: "http://portal.microsoftazure.de/", + PublishSettingsURL: "https://manage.microsoftazure.de/publishsettings/index", + ServiceManagementEndpoint: "https://management.core.cloudapi.de/", + ResourceManagerEndpoint: "https://management.microsoftazure.de/", + ActiveDirectoryEndpoint: "https://login.microsoftonline.de/", + GalleryEndpoint: "https://gallery.cloudapi.de/", + KeyVaultEndpoint: "https://vault.microsoftazure.de/", + GraphEndpoint: "https://graph.cloudapi.de/", + ServiceBusEndpoint: "https://servicebus.cloudapi.de/", + BatchManagementEndpoint: "https://batch.cloudapi.de/", + StorageEndpointSuffix: "core.cloudapi.de", + SQLDatabaseDNSSuffix: "database.cloudapi.de", + TrafficManagerDNSSuffix: "azuretrafficmanager.de", + KeyVaultDNSSuffix: "vault.microsoftazure.de", + ServiceBusEndpointSuffix: "servicebus.cloudapi.de", + ServiceManagementVMDNSSuffix: "azurecloudapp.de", + ResourceManagerVMDNSSuffix: "cloudapp.microsoftazure.de", + ContainerRegistryDNSSuffix: NotAvailable, + CosmosDBDNSSuffix: "documents.microsoftazure.de", + TokenAudience: "https://management.microsoftazure.de/", + APIManagementHostNameSuffix: NotAvailable, + SynapseEndpointSuffix: NotAvailable, + ResourceIdentifiers: ResourceIdentifier{ + Graph: "https://graph.cloudapi.de/", + KeyVault: "https://vault.microsoftazure.de", + Datalake: NotAvailable, + Batch: "https://batch.cloudapi.de/", + OperationalInsights: NotAvailable, + Storage: "https://storage.azure.com/", + Synapse: NotAvailable, + ServiceBus: "https://servicebus.azure.net/", + }, + } +) + +// EnvironmentFromName returns an Environment based on the common name specified. +func EnvironmentFromName(name string) (Environment, error) { + // IMPORTANT + // As per @radhikagupta5: + // This is technical debt, fundamentally here because Kubernetes is not currently accepting + // contributions to the providers. Once that is an option, the provider should be updated to + // directly call `EnvironmentFromFile`. Until then, we rely on dispatching Azure Stack environment creation + // from this method based on the name that is provided to us. + if strings.EqualFold(name, "AZURESTACKCLOUD") { + return EnvironmentFromFile(os.Getenv(EnvironmentFilepathName)) + } + + name = strings.ToUpper(name) + env, ok := environments[name] + if !ok { + return env, fmt.Errorf("autorest/azure: There is no cloud environment matching the name %q", name) + } + + return env, nil +} + +// EnvironmentFromFile loads an Environment from a configuration file available on disk. +// This function is particularly useful in the Hybrid Cloud model, where one must define their own +// endpoints. +func EnvironmentFromFile(location string) (unmarshaled Environment, err error) { + fileContents, err := ioutil.ReadFile(location) + if err != nil { + return + } + + err = json.Unmarshal(fileContents, &unmarshaled) + + return +} + +// SetEnvironment updates the environment map with the specified values. +func SetEnvironment(name string, env Environment) { + environments[strings.ToUpper(name)] = env +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/metadata_environment.go b/vendor/github.com/Azure/go-autorest/autorest/azure/metadata_environment.go new file mode 100644 index 0000000..507f9e9 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/metadata_environment.go @@ -0,0 +1,245 @@ +package azure + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + + "github.com/Azure/go-autorest/autorest" +) + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +type audience []string + +type authentication struct { + LoginEndpoint string `json:"loginEndpoint"` + Audiences audience `json:"audiences"` +} + +type environmentMetadataInfo struct { + GalleryEndpoint string `json:"galleryEndpoint"` + GraphEndpoint string `json:"graphEndpoint"` + PortalEndpoint string `json:"portalEndpoint"` + Authentication authentication `json:"authentication"` +} + +// EnvironmentProperty represent property names that clients can override +type EnvironmentProperty string + +const ( + // EnvironmentName ... + EnvironmentName EnvironmentProperty = "name" + // EnvironmentManagementPortalURL .. + EnvironmentManagementPortalURL EnvironmentProperty = "managementPortalURL" + // EnvironmentPublishSettingsURL ... + EnvironmentPublishSettingsURL EnvironmentProperty = "publishSettingsURL" + // EnvironmentServiceManagementEndpoint ... + EnvironmentServiceManagementEndpoint EnvironmentProperty = "serviceManagementEndpoint" + // EnvironmentResourceManagerEndpoint ... + EnvironmentResourceManagerEndpoint EnvironmentProperty = "resourceManagerEndpoint" + // EnvironmentActiveDirectoryEndpoint ... + EnvironmentActiveDirectoryEndpoint EnvironmentProperty = "activeDirectoryEndpoint" + // EnvironmentGalleryEndpoint ... + EnvironmentGalleryEndpoint EnvironmentProperty = "galleryEndpoint" + // EnvironmentKeyVaultEndpoint ... + EnvironmentKeyVaultEndpoint EnvironmentProperty = "keyVaultEndpoint" + // EnvironmentGraphEndpoint ... + EnvironmentGraphEndpoint EnvironmentProperty = "graphEndpoint" + // EnvironmentServiceBusEndpoint ... + EnvironmentServiceBusEndpoint EnvironmentProperty = "serviceBusEndpoint" + // EnvironmentBatchManagementEndpoint ... + EnvironmentBatchManagementEndpoint EnvironmentProperty = "batchManagementEndpoint" + // EnvironmentStorageEndpointSuffix ... + EnvironmentStorageEndpointSuffix EnvironmentProperty = "storageEndpointSuffix" + // EnvironmentSQLDatabaseDNSSuffix ... + EnvironmentSQLDatabaseDNSSuffix EnvironmentProperty = "sqlDatabaseDNSSuffix" + // EnvironmentTrafficManagerDNSSuffix ... + EnvironmentTrafficManagerDNSSuffix EnvironmentProperty = "trafficManagerDNSSuffix" + // EnvironmentKeyVaultDNSSuffix ... + EnvironmentKeyVaultDNSSuffix EnvironmentProperty = "keyVaultDNSSuffix" + // EnvironmentServiceBusEndpointSuffix ... + EnvironmentServiceBusEndpointSuffix EnvironmentProperty = "serviceBusEndpointSuffix" + // EnvironmentServiceManagementVMDNSSuffix ... + EnvironmentServiceManagementVMDNSSuffix EnvironmentProperty = "serviceManagementVMDNSSuffix" + // EnvironmentResourceManagerVMDNSSuffix ... + EnvironmentResourceManagerVMDNSSuffix EnvironmentProperty = "resourceManagerVMDNSSuffix" + // EnvironmentContainerRegistryDNSSuffix ... + EnvironmentContainerRegistryDNSSuffix EnvironmentProperty = "containerRegistryDNSSuffix" + // EnvironmentTokenAudience ... + EnvironmentTokenAudience EnvironmentProperty = "tokenAudience" +) + +// OverrideProperty represents property name and value that clients can override +type OverrideProperty struct { + Key EnvironmentProperty + Value string +} + +// EnvironmentFromURL loads an Environment from a URL +// This function is particularly useful in the Hybrid Cloud model, where one may define their own +// endpoints. +func EnvironmentFromURL(resourceManagerEndpoint string, properties ...OverrideProperty) (environment Environment, err error) { + var metadataEnvProperties environmentMetadataInfo + + if resourceManagerEndpoint == "" { + return environment, fmt.Errorf("Metadata resource manager endpoint is empty") + } + + if metadataEnvProperties, err = retrieveMetadataEnvironment(resourceManagerEndpoint); err != nil { + return environment, err + } + + // Give priority to user's override values + overrideProperties(&environment, properties) + + if environment.Name == "" { + environment.Name = "HybridEnvironment" + } + stampDNSSuffix := environment.StorageEndpointSuffix + if stampDNSSuffix == "" { + stampDNSSuffix = strings.TrimSuffix(strings.TrimPrefix(strings.Replace(resourceManagerEndpoint, strings.Split(resourceManagerEndpoint, ".")[0], "", 1), "."), "/") + environment.StorageEndpointSuffix = stampDNSSuffix + } + if environment.KeyVaultDNSSuffix == "" { + environment.KeyVaultDNSSuffix = fmt.Sprintf("%s.%s", "vault", stampDNSSuffix) + } + if environment.KeyVaultEndpoint == "" { + environment.KeyVaultEndpoint = fmt.Sprintf("%s%s", "https://", environment.KeyVaultDNSSuffix) + } + if environment.TokenAudience == "" { + environment.TokenAudience = metadataEnvProperties.Authentication.Audiences[0] + } + if environment.ActiveDirectoryEndpoint == "" { + environment.ActiveDirectoryEndpoint = metadataEnvProperties.Authentication.LoginEndpoint + } + if environment.ResourceManagerEndpoint == "" { + environment.ResourceManagerEndpoint = resourceManagerEndpoint + } + if environment.GalleryEndpoint == "" { + environment.GalleryEndpoint = metadataEnvProperties.GalleryEndpoint + } + if environment.GraphEndpoint == "" { + environment.GraphEndpoint = metadataEnvProperties.GraphEndpoint + } + + return environment, nil +} + +func overrideProperties(environment *Environment, properties []OverrideProperty) { + for _, property := range properties { + switch property.Key { + case EnvironmentName: + { + environment.Name = property.Value + } + case EnvironmentManagementPortalURL: + { + environment.ManagementPortalURL = property.Value + } + case EnvironmentPublishSettingsURL: + { + environment.PublishSettingsURL = property.Value + } + case EnvironmentServiceManagementEndpoint: + { + environment.ServiceManagementEndpoint = property.Value + } + case EnvironmentResourceManagerEndpoint: + { + environment.ResourceManagerEndpoint = property.Value + } + case EnvironmentActiveDirectoryEndpoint: + { + environment.ActiveDirectoryEndpoint = property.Value + } + case EnvironmentGalleryEndpoint: + { + environment.GalleryEndpoint = property.Value + } + case EnvironmentKeyVaultEndpoint: + { + environment.KeyVaultEndpoint = property.Value + } + case EnvironmentGraphEndpoint: + { + environment.GraphEndpoint = property.Value + } + case EnvironmentServiceBusEndpoint: + { + environment.ServiceBusEndpoint = property.Value + } + case EnvironmentBatchManagementEndpoint: + { + environment.BatchManagementEndpoint = property.Value + } + case EnvironmentStorageEndpointSuffix: + { + environment.StorageEndpointSuffix = property.Value + } + case EnvironmentSQLDatabaseDNSSuffix: + { + environment.SQLDatabaseDNSSuffix = property.Value + } + case EnvironmentTrafficManagerDNSSuffix: + { + environment.TrafficManagerDNSSuffix = property.Value + } + case EnvironmentKeyVaultDNSSuffix: + { + environment.KeyVaultDNSSuffix = property.Value + } + case EnvironmentServiceBusEndpointSuffix: + { + environment.ServiceBusEndpointSuffix = property.Value + } + case EnvironmentServiceManagementVMDNSSuffix: + { + environment.ServiceManagementVMDNSSuffix = property.Value + } + case EnvironmentResourceManagerVMDNSSuffix: + { + environment.ResourceManagerVMDNSSuffix = property.Value + } + case EnvironmentContainerRegistryDNSSuffix: + { + environment.ContainerRegistryDNSSuffix = property.Value + } + case EnvironmentTokenAudience: + { + environment.TokenAudience = property.Value + } + } + } +} + +func retrieveMetadataEnvironment(endpoint string) (environment environmentMetadataInfo, err error) { + client := autorest.NewClientWithUserAgent("") + managementEndpoint := fmt.Sprintf("%s%s", strings.TrimSuffix(endpoint, "/"), "/metadata/endpoints?api-version=1.0") + req, _ := http.NewRequest("GET", managementEndpoint, nil) + response, err := client.Do(req) + if err != nil { + return environment, err + } + defer response.Body.Close() + jsonResponse, err := ioutil.ReadAll(response.Body) + if err != nil { + return environment, err + } + err = json.Unmarshal(jsonResponse, &environment) + return environment, err +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/rp.go b/vendor/github.com/Azure/go-autorest/autorest/azure/rp.go new file mode 100644 index 0000000..c6d39f6 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/rp.go @@ -0,0 +1,204 @@ +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package azure + +import ( + "errors" + "fmt" + "net/http" + "net/url" + "strings" + "time" + + "github.com/Azure/go-autorest/autorest" +) + +// DoRetryWithRegistration tries to register the resource provider in case it is unregistered. +// It also handles request retries +func DoRetryWithRegistration(client autorest.Client) autorest.SendDecorator { + return func(s autorest.Sender) autorest.Sender { + return autorest.SenderFunc(func(r *http.Request) (resp *http.Response, err error) { + rr := autorest.NewRetriableRequest(r) + for currentAttempt := 0; currentAttempt < client.RetryAttempts; currentAttempt++ { + err = rr.Prepare() + if err != nil { + return resp, err + } + + resp, err = autorest.SendWithSender(s, rr.Request(), + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...), + ) + if err != nil { + return resp, err + } + + if resp.StatusCode != http.StatusConflict || client.SkipResourceProviderRegistration { + return resp, err + } + + var re RequestError + if strings.Contains(r.Header.Get("Content-Type"), "xml") { + // XML errors (e.g. Storage Data Plane) only return the inner object + err = autorest.Respond(resp, autorest.ByUnmarshallingXML(&re.ServiceError)) + } else { + err = autorest.Respond(resp, autorest.ByUnmarshallingJSON(&re)) + } + + if err != nil { + return resp, err + } + err = re + + if re.ServiceError != nil && re.ServiceError.Code == "MissingSubscriptionRegistration" { + regErr := register(client, r, re) + if regErr != nil { + return resp, fmt.Errorf("failed auto registering Resource Provider: %s. Original error: %s", regErr, err) + } + } + } + return resp, err + }) + } +} + +func getProvider(re RequestError) (string, error) { + if re.ServiceError != nil && len(re.ServiceError.Details) > 0 { + return re.ServiceError.Details[0]["target"].(string), nil + } + return "", errors.New("provider was not found in the response") +} + +func register(client autorest.Client, originalReq *http.Request, re RequestError) error { + subID := getSubscription(originalReq.URL.Path) + if subID == "" { + return errors.New("missing parameter subscriptionID to register resource provider") + } + providerName, err := getProvider(re) + if err != nil { + return fmt.Errorf("missing parameter provider to register resource provider: %s", err) + } + newURL := url.URL{ + Scheme: originalReq.URL.Scheme, + Host: originalReq.URL.Host, + } + + // taken from the resources SDK + // with almost identical code, this sections are easier to mantain + // It is also not a good idea to import the SDK here + // https://github.com/Azure/azure-sdk-for-go/blob/9f366792afa3e0ddaecdc860e793ba9d75e76c27/arm/resources/resources/providers.go#L252 + pathParameters := map[string]interface{}{ + "resourceProviderNamespace": autorest.Encode("path", providerName), + "subscriptionId": autorest.Encode("path", subID), + } + + const APIVersion = "2016-09-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(newURL.String()), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/{resourceProviderNamespace}/register", pathParameters), + autorest.WithQueryParameters(queryParameters), + ) + + req, err := preparer.Prepare(&http.Request{}) + if err != nil { + return err + } + req = req.WithContext(originalReq.Context()) + + resp, err := autorest.SendWithSender(client, req, + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...), + ) + if err != nil { + return err + } + + type Provider struct { + RegistrationState *string `json:"registrationState,omitempty"` + } + var provider Provider + + err = autorest.Respond( + resp, + WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&provider), + autorest.ByClosing(), + ) + if err != nil { + return err + } + + // poll for registered provisioning state + registrationStartTime := time.Now() + for err == nil && (client.PollingDuration == 0 || (client.PollingDuration != 0 && time.Since(registrationStartTime) < client.PollingDuration)) { + // taken from the resources SDK + // https://github.com/Azure/azure-sdk-for-go/blob/9f366792afa3e0ddaecdc860e793ba9d75e76c27/arm/resources/resources/providers.go#L45 + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(newURL.String()), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/{resourceProviderNamespace}", pathParameters), + autorest.WithQueryParameters(queryParameters), + ) + req, err = preparer.Prepare(&http.Request{}) + if err != nil { + return err + } + req = req.WithContext(originalReq.Context()) + + resp, err := autorest.SendWithSender(client, req, + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...), + ) + if err != nil { + return err + } + + err = autorest.Respond( + resp, + WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&provider), + autorest.ByClosing(), + ) + if err != nil { + return err + } + + if provider.RegistrationState != nil && + *provider.RegistrationState == "Registered" { + break + } + + delayed := autorest.DelayWithRetryAfter(resp, originalReq.Context().Done()) + if !delayed && !autorest.DelayForBackoff(client.PollingDelay, 0, originalReq.Context().Done()) { + return originalReq.Context().Err() + } + } + if client.PollingDuration != 0 && !(time.Since(registrationStartTime) < client.PollingDuration) { + return errors.New("polling for resource provider registration has exceeded the polling duration") + } + return err +} + +func getSubscription(path string) string { + parts := strings.Split(path, "/") + for i, v := range parts { + if v == "subscriptions" && (i+1) < len(parts) { + return parts[i+1] + } + } + return "" +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/client.go b/vendor/github.com/Azure/go-autorest/autorest/client.go new file mode 100644 index 0000000..0b7525f --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/client.go @@ -0,0 +1,328 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "crypto/tls" + "errors" + "fmt" + "io" + "io/ioutil" + "log" + "net/http" + "strings" + "time" + + "github.com/Azure/go-autorest/logger" +) + +const ( + // DefaultPollingDelay is a reasonable delay between polling requests. + DefaultPollingDelay = 60 * time.Second + + // DefaultPollingDuration is a reasonable total polling duration. + DefaultPollingDuration = 15 * time.Minute + + // DefaultRetryAttempts is number of attempts for retry status codes (5xx). + DefaultRetryAttempts = 3 + + // DefaultRetryDuration is the duration to wait between retries. + DefaultRetryDuration = 30 * time.Second +) + +var ( + // StatusCodesForRetry are a defined group of status code for which the client will retry + StatusCodesForRetry = []int{ + http.StatusRequestTimeout, // 408 + http.StatusTooManyRequests, // 429 + http.StatusInternalServerError, // 500 + http.StatusBadGateway, // 502 + http.StatusServiceUnavailable, // 503 + http.StatusGatewayTimeout, // 504 + } +) + +const ( + requestFormat = `HTTP Request Begin =================================================== +%s +===================================================== HTTP Request End +` + responseFormat = `HTTP Response Begin =================================================== +%s +===================================================== HTTP Response End +` +) + +// Response serves as the base for all responses from generated clients. It provides access to the +// last http.Response. +type Response struct { + *http.Response `json:"-"` +} + +// IsHTTPStatus returns true if the returned HTTP status code matches the provided status code. +// If there was no response (i.e. the underlying http.Response is nil) the return value is false. +func (r Response) IsHTTPStatus(statusCode int) bool { + if r.Response == nil { + return false + } + return r.Response.StatusCode == statusCode +} + +// HasHTTPStatus returns true if the returned HTTP status code matches one of the provided status codes. +// If there was no response (i.e. the underlying http.Response is nil) or not status codes are provided +// the return value is false. +func (r Response) HasHTTPStatus(statusCodes ...int) bool { + return ResponseHasStatusCode(r.Response, statusCodes...) +} + +// LoggingInspector implements request and response inspectors that log the full request and +// response to a supplied log. +type LoggingInspector struct { + Logger *log.Logger +} + +// WithInspection returns a PrepareDecorator that emits the http.Request to the supplied logger. The +// body is restored after being emitted. +// +// Note: Since it reads the entire Body, this decorator should not be used where body streaming is +// important. It is best used to trace JSON or similar body values. +func (li LoggingInspector) WithInspection() PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + var body, b bytes.Buffer + + defer r.Body.Close() + + r.Body = ioutil.NopCloser(io.TeeReader(r.Body, &body)) + if err := r.Write(&b); err != nil { + return nil, fmt.Errorf("Failed to write response: %v", err) + } + + li.Logger.Printf(requestFormat, b.String()) + + r.Body = ioutil.NopCloser(&body) + return p.Prepare(r) + }) + } +} + +// ByInspecting returns a RespondDecorator that emits the http.Response to the supplied logger. The +// body is restored after being emitted. +// +// Note: Since it reads the entire Body, this decorator should not be used where body streaming is +// important. It is best used to trace JSON or similar body values. +func (li LoggingInspector) ByInspecting() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + var body, b bytes.Buffer + defer resp.Body.Close() + resp.Body = ioutil.NopCloser(io.TeeReader(resp.Body, &body)) + if err := resp.Write(&b); err != nil { + return fmt.Errorf("Failed to write response: %v", err) + } + + li.Logger.Printf(responseFormat, b.String()) + + resp.Body = ioutil.NopCloser(&body) + return r.Respond(resp) + }) + } +} + +// Client is the base for autorest generated clients. It provides default, "do nothing" +// implementations of an Authorizer, RequestInspector, and ResponseInspector. It also returns the +// standard, undecorated http.Client as a default Sender. +// +// Generated clients should also use Error (see NewError and NewErrorWithError) for errors and +// return responses that compose with Response. +// +// Most customization of generated clients is best achieved by supplying a custom Authorizer, custom +// RequestInspector, and / or custom ResponseInspector. Users may log requests, implement circuit +// breakers (see https://msdn.microsoft.com/en-us/library/dn589784.aspx) or otherwise influence +// sending the request by providing a decorated Sender. +type Client struct { + Authorizer Authorizer + Sender Sender + RequestInspector PrepareDecorator + ResponseInspector RespondDecorator + + // PollingDelay sets the polling frequency used in absence of a Retry-After HTTP header + PollingDelay time.Duration + + // PollingDuration sets the maximum polling time after which an error is returned. + // Setting this to zero will use the provided context to control the duration. + PollingDuration time.Duration + + // RetryAttempts sets the total number of times the client will attempt to make an HTTP request. + // Set the value to 1 to disable retries. DO NOT set the value to less than 1. + RetryAttempts int + + // RetryDuration sets the delay duration for retries. + RetryDuration time.Duration + + // UserAgent, if not empty, will be set as the HTTP User-Agent header on all requests sent + // through the Do method. + UserAgent string + + Jar http.CookieJar + + // Set to true to skip attempted registration of resource providers (false by default). + SkipResourceProviderRegistration bool + + // SendDecorators can be used to override the default chain of SendDecorators. + // This can be used to specify things like a custom retry SendDecorator. + // Set this to an empty slice to use no SendDecorators. + SendDecorators []SendDecorator +} + +// NewClientWithUserAgent returns an instance of a Client with the UserAgent set to the passed +// string. +func NewClientWithUserAgent(ua string) Client { + return newClient(ua, tls.RenegotiateNever) +} + +// ClientOptions contains various Client configuration options. +type ClientOptions struct { + // UserAgent is an optional user-agent string to append to the default user agent. + UserAgent string + + // Renegotiation is an optional setting to control client-side TLS renegotiation. + Renegotiation tls.RenegotiationSupport +} + +// NewClientWithOptions returns an instance of a Client with the specified values. +func NewClientWithOptions(options ClientOptions) Client { + return newClient(options.UserAgent, options.Renegotiation) +} + +func newClient(ua string, renegotiation tls.RenegotiationSupport) Client { + c := Client{ + PollingDelay: DefaultPollingDelay, + PollingDuration: DefaultPollingDuration, + RetryAttempts: DefaultRetryAttempts, + RetryDuration: DefaultRetryDuration, + UserAgent: UserAgent(), + } + c.Sender = c.sender(renegotiation) + c.AddToUserAgent(ua) + return c +} + +// AddToUserAgent adds an extension to the current user agent +func (c *Client) AddToUserAgent(extension string) error { + if extension != "" { + c.UserAgent = fmt.Sprintf("%s %s", c.UserAgent, extension) + return nil + } + return fmt.Errorf("Extension was empty, User Agent stayed as %s", c.UserAgent) +} + +// Do implements the Sender interface by invoking the active Sender after applying authorization. +// If Sender is not set, it uses a new instance of http.Client. In both cases it will, if UserAgent +// is set, apply set the User-Agent header. +func (c Client) Do(r *http.Request) (*http.Response, error) { + if r.UserAgent() == "" { + r, _ = Prepare(r, + WithUserAgent(c.UserAgent)) + } + // NOTE: c.WithInspection() must be last in the list so that it can inspect all preceding operations + r, err := Prepare(r, + c.WithAuthorization(), + c.WithInspection()) + if err != nil { + var resp *http.Response + if detErr, ok := err.(DetailedError); ok { + // if the authorization failed (e.g. invalid credentials) there will + // be a response associated with the error, be sure to return it. + resp = detErr.Response + } + return resp, NewErrorWithError(err, "autorest/Client", "Do", nil, "Preparing request failed") + } + logger.Instance.WriteRequest(r, logger.Filter{ + Header: func(k string, v []string) (bool, []string) { + // remove the auth token from the log + if strings.EqualFold(k, "Authorization") || strings.EqualFold(k, "Ocp-Apim-Subscription-Key") { + v = []string{"**REDACTED**"} + } + return true, v + }, + }) + resp, err := SendWithSender(c.sender(tls.RenegotiateNever), r) + if resp == nil && err == nil { + err = errors.New("autorest: received nil response and error") + } + logger.Instance.WriteResponse(resp, logger.Filter{}) + Respond(resp, c.ByInspecting()) + return resp, err +} + +// sender returns the Sender to which to send requests. +func (c Client) sender(renengotiation tls.RenegotiationSupport) Sender { + if c.Sender == nil { + return sender(renengotiation) + } + return c.Sender +} + +// WithAuthorization is a convenience method that returns the WithAuthorization PrepareDecorator +// from the current Authorizer. If not Authorizer is set, it uses the NullAuthorizer. +func (c Client) WithAuthorization() PrepareDecorator { + return c.authorizer().WithAuthorization() +} + +// authorizer returns the Authorizer to use. +func (c Client) authorizer() Authorizer { + if c.Authorizer == nil { + return NullAuthorizer{} + } + return c.Authorizer +} + +// WithInspection is a convenience method that passes the request to the supplied RequestInspector, +// if present, or returns the WithNothing PrepareDecorator otherwise. +func (c Client) WithInspection() PrepareDecorator { + if c.RequestInspector == nil { + return WithNothing() + } + return c.RequestInspector +} + +// ByInspecting is a convenience method that passes the response to the supplied ResponseInspector, +// if present, or returns the ByIgnoring RespondDecorator otherwise. +func (c Client) ByInspecting() RespondDecorator { + if c.ResponseInspector == nil { + return ByIgnoring() + } + return c.ResponseInspector +} + +// Send sends the provided http.Request using the client's Sender or the default sender. +// It returns the http.Response and possible error. It also accepts a, possibly empty, +// default set of SendDecorators used when sending the request. +// SendDecorators have the following precedence: +// 1. In a request's context via WithSendDecorators() +// 2. Specified on the client in SendDecorators +// 3. The default values specified in this method +func (c Client) Send(req *http.Request, decorators ...SendDecorator) (*http.Response, error) { + if c.SendDecorators != nil { + decorators = c.SendDecorators + } + inCtx := req.Context().Value(ctxSendDecorators{}) + if sd, ok := inCtx.([]SendDecorator); ok { + decorators = sd + } + return SendWithSender(c, req, decorators...) +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/LICENSE b/vendor/github.com/Azure/go-autorest/autorest/date/LICENSE new file mode 100644 index 0000000..b9d6a27 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/date/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Microsoft Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/date.go b/vendor/github.com/Azure/go-autorest/autorest/date/date.go new file mode 100644 index 0000000..c457106 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/date/date.go @@ -0,0 +1,96 @@ +/* +Package date provides time.Time derivatives that conform to the Swagger.io (https://swagger.io/) +defined date formats: Date and DateTime. Both types may, in most cases, be used in lieu of +time.Time types. And both convert to time.Time through a ToTime method. +*/ +package date + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "fmt" + "time" +) + +const ( + fullDate = "2006-01-02" + fullDateJSON = `"2006-01-02"` + dateFormat = "%04d-%02d-%02d" + jsonFormat = `"%04d-%02d-%02d"` +) + +// Date defines a type similar to time.Time but assumes a layout of RFC3339 full-date (i.e., +// 2006-01-02). +type Date struct { + time.Time +} + +// ParseDate create a new Date from the passed string. +func ParseDate(date string) (d Date, err error) { + return parseDate(date, fullDate) +} + +func parseDate(date string, format string) (Date, error) { + d, err := time.Parse(format, date) + return Date{Time: d}, err +} + +// MarshalBinary preserves the Date as a byte array conforming to RFC3339 full-date (i.e., +// 2006-01-02). +func (d Date) MarshalBinary() ([]byte, error) { + return d.MarshalText() +} + +// UnmarshalBinary reconstitutes a Date saved as a byte array conforming to RFC3339 full-date (i.e., +// 2006-01-02). +func (d *Date) UnmarshalBinary(data []byte) error { + return d.UnmarshalText(data) +} + +// MarshalJSON preserves the Date as a JSON string conforming to RFC3339 full-date (i.e., +// 2006-01-02). +func (d Date) MarshalJSON() (json []byte, err error) { + return []byte(fmt.Sprintf(jsonFormat, d.Year(), d.Month(), d.Day())), nil +} + +// UnmarshalJSON reconstitutes the Date from a JSON string conforming to RFC3339 full-date (i.e., +// 2006-01-02). +func (d *Date) UnmarshalJSON(data []byte) (err error) { + d.Time, err = time.Parse(fullDateJSON, string(data)) + return err +} + +// MarshalText preserves the Date as a byte array conforming to RFC3339 full-date (i.e., +// 2006-01-02). +func (d Date) MarshalText() (text []byte, err error) { + return []byte(fmt.Sprintf(dateFormat, d.Year(), d.Month(), d.Day())), nil +} + +// UnmarshalText reconstitutes a Date saved as a byte array conforming to RFC3339 full-date (i.e., +// 2006-01-02). +func (d *Date) UnmarshalText(data []byte) (err error) { + d.Time, err = time.Parse(fullDate, string(data)) + return err +} + +// String returns the Date formatted as an RFC3339 full-date string (i.e., 2006-01-02). +func (d Date) String() string { + return fmt.Sprintf(dateFormat, d.Year(), d.Month(), d.Day()) +} + +// ToTime returns a Date as a time.Time +func (d Date) ToTime() time.Time { + return d.Time +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/go.mod b/vendor/github.com/Azure/go-autorest/autorest/date/go.mod new file mode 100644 index 0000000..f88ecc4 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/date/go.mod @@ -0,0 +1,5 @@ +module github.com/Azure/go-autorest/autorest/date + +go 1.12 + +require github.com/Azure/go-autorest v14.2.0+incompatible diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/go.sum b/vendor/github.com/Azure/go-autorest/autorest/date/go.sum new file mode 100644 index 0000000..1fc56a9 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/date/go.sum @@ -0,0 +1,2 @@ +github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= +github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/go_mod_tidy_hack.go b/vendor/github.com/Azure/go-autorest/autorest/date/go_mod_tidy_hack.go new file mode 100644 index 0000000..4e05432 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/date/go_mod_tidy_hack.go @@ -0,0 +1,24 @@ +// +build modhack + +package date + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file, and the github.com/Azure/go-autorest import, won't actually become part of +// the resultant binary. + +// Necessary for safely adding multi-module repo. +// See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository +import _ "github.com/Azure/go-autorest" diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/time.go b/vendor/github.com/Azure/go-autorest/autorest/date/time.go new file mode 100644 index 0000000..b453fad --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/date/time.go @@ -0,0 +1,103 @@ +package date + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "regexp" + "time" +) + +// Azure reports time in UTC but it doesn't include the 'Z' time zone suffix in some cases. +const ( + azureUtcFormatJSON = `"2006-01-02T15:04:05.999999999"` + azureUtcFormat = "2006-01-02T15:04:05.999999999" + rfc3339JSON = `"` + time.RFC3339Nano + `"` + rfc3339 = time.RFC3339Nano + tzOffsetRegex = `(Z|z|\+|-)(\d+:\d+)*"*$` +) + +// Time defines a type similar to time.Time but assumes a layout of RFC3339 date-time (i.e., +// 2006-01-02T15:04:05Z). +type Time struct { + time.Time +} + +// MarshalBinary preserves the Time as a byte array conforming to RFC3339 date-time (i.e., +// 2006-01-02T15:04:05Z). +func (t Time) MarshalBinary() ([]byte, error) { + return t.Time.MarshalText() +} + +// UnmarshalBinary reconstitutes a Time saved as a byte array conforming to RFC3339 date-time +// (i.e., 2006-01-02T15:04:05Z). +func (t *Time) UnmarshalBinary(data []byte) error { + return t.UnmarshalText(data) +} + +// MarshalJSON preserves the Time as a JSON string conforming to RFC3339 date-time (i.e., +// 2006-01-02T15:04:05Z). +func (t Time) MarshalJSON() (json []byte, err error) { + return t.Time.MarshalJSON() +} + +// UnmarshalJSON reconstitutes the Time from a JSON string conforming to RFC3339 date-time +// (i.e., 2006-01-02T15:04:05Z). +func (t *Time) UnmarshalJSON(data []byte) (err error) { + timeFormat := azureUtcFormatJSON + match, err := regexp.Match(tzOffsetRegex, data) + if err != nil { + return err + } else if match { + timeFormat = rfc3339JSON + } + t.Time, err = ParseTime(timeFormat, string(data)) + return err +} + +// MarshalText preserves the Time as a byte array conforming to RFC3339 date-time (i.e., +// 2006-01-02T15:04:05Z). +func (t Time) MarshalText() (text []byte, err error) { + return t.Time.MarshalText() +} + +// UnmarshalText reconstitutes a Time saved as a byte array conforming to RFC3339 date-time +// (i.e., 2006-01-02T15:04:05Z). +func (t *Time) UnmarshalText(data []byte) (err error) { + timeFormat := azureUtcFormat + match, err := regexp.Match(tzOffsetRegex, data) + if err != nil { + return err + } else if match { + timeFormat = rfc3339 + } + t.Time, err = ParseTime(timeFormat, string(data)) + return err +} + +// String returns the Time formatted as an RFC3339 date-time string (i.e., +// 2006-01-02T15:04:05Z). +func (t Time) String() string { + // Note: time.Time.String does not return an RFC3339 compliant string, time.Time.MarshalText does. + b, err := t.MarshalText() + if err != nil { + return "" + } + return string(b) +} + +// ToTime returns a Time as a time.Time +func (t Time) ToTime() time.Time { + return t.Time +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/timerfc1123.go b/vendor/github.com/Azure/go-autorest/autorest/date/timerfc1123.go new file mode 100644 index 0000000..48fb39b --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/date/timerfc1123.go @@ -0,0 +1,100 @@ +package date + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "errors" + "time" +) + +const ( + rfc1123JSON = `"` + time.RFC1123 + `"` + rfc1123 = time.RFC1123 +) + +// TimeRFC1123 defines a type similar to time.Time but assumes a layout of RFC1123 date-time (i.e., +// Mon, 02 Jan 2006 15:04:05 MST). +type TimeRFC1123 struct { + time.Time +} + +// UnmarshalJSON reconstitutes the Time from a JSON string conforming to RFC1123 date-time +// (i.e., Mon, 02 Jan 2006 15:04:05 MST). +func (t *TimeRFC1123) UnmarshalJSON(data []byte) (err error) { + t.Time, err = ParseTime(rfc1123JSON, string(data)) + if err != nil { + return err + } + return nil +} + +// MarshalJSON preserves the Time as a JSON string conforming to RFC1123 date-time (i.e., +// Mon, 02 Jan 2006 15:04:05 MST). +func (t TimeRFC1123) MarshalJSON() ([]byte, error) { + if y := t.Year(); y < 0 || y >= 10000 { + return nil, errors.New("Time.MarshalJSON: year outside of range [0,9999]") + } + b := []byte(t.Format(rfc1123JSON)) + return b, nil +} + +// MarshalText preserves the Time as a byte array conforming to RFC1123 date-time (i.e., +// Mon, 02 Jan 2006 15:04:05 MST). +func (t TimeRFC1123) MarshalText() ([]byte, error) { + if y := t.Year(); y < 0 || y >= 10000 { + return nil, errors.New("Time.MarshalText: year outside of range [0,9999]") + } + + b := []byte(t.Format(rfc1123)) + return b, nil +} + +// UnmarshalText reconstitutes a Time saved as a byte array conforming to RFC1123 date-time +// (i.e., Mon, 02 Jan 2006 15:04:05 MST). +func (t *TimeRFC1123) UnmarshalText(data []byte) (err error) { + t.Time, err = ParseTime(rfc1123, string(data)) + if err != nil { + return err + } + return nil +} + +// MarshalBinary preserves the Time as a byte array conforming to RFC1123 date-time (i.e., +// Mon, 02 Jan 2006 15:04:05 MST). +func (t TimeRFC1123) MarshalBinary() ([]byte, error) { + return t.MarshalText() +} + +// UnmarshalBinary reconstitutes a Time saved as a byte array conforming to RFC1123 date-time +// (i.e., Mon, 02 Jan 2006 15:04:05 MST). +func (t *TimeRFC1123) UnmarshalBinary(data []byte) error { + return t.UnmarshalText(data) +} + +// ToTime returns a Time as a time.Time +func (t TimeRFC1123) ToTime() time.Time { + return t.Time +} + +// String returns the Time formatted as an RFC1123 date-time string (i.e., +// Mon, 02 Jan 2006 15:04:05 MST). +func (t TimeRFC1123) String() string { + // Note: time.Time.String does not return an RFC1123 compliant string, time.Time.MarshalText does. + b, err := t.MarshalText() + if err != nil { + return "" + } + return string(b) +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/unixtime.go b/vendor/github.com/Azure/go-autorest/autorest/date/unixtime.go new file mode 100644 index 0000000..7073959 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/date/unixtime.go @@ -0,0 +1,123 @@ +package date + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "encoding/binary" + "encoding/json" + "time" +) + +// unixEpoch is the moment in time that should be treated as timestamp 0. +var unixEpoch = time.Date(1970, time.January, 1, 0, 0, 0, 0, time.UTC) + +// UnixTime marshals and unmarshals a time that is represented as the number +// of seconds (ignoring skip-seconds) since the Unix Epoch. +type UnixTime time.Time + +// Duration returns the time as a Duration since the UnixEpoch. +func (t UnixTime) Duration() time.Duration { + return time.Time(t).Sub(unixEpoch) +} + +// NewUnixTimeFromSeconds creates a UnixTime as a number of seconds from the UnixEpoch. +func NewUnixTimeFromSeconds(seconds float64) UnixTime { + return NewUnixTimeFromDuration(time.Duration(seconds * float64(time.Second))) +} + +// NewUnixTimeFromNanoseconds creates a UnixTime as a number of nanoseconds from the UnixEpoch. +func NewUnixTimeFromNanoseconds(nanoseconds int64) UnixTime { + return NewUnixTimeFromDuration(time.Duration(nanoseconds)) +} + +// NewUnixTimeFromDuration creates a UnixTime as a duration of time since the UnixEpoch. +func NewUnixTimeFromDuration(dur time.Duration) UnixTime { + return UnixTime(unixEpoch.Add(dur)) +} + +// UnixEpoch retreives the moment considered the Unix Epoch. I.e. The time represented by '0' +func UnixEpoch() time.Time { + return unixEpoch +} + +// MarshalJSON preserves the UnixTime as a JSON number conforming to Unix Timestamp requirements. +// (i.e. the number of seconds since midnight January 1st, 1970 not considering leap seconds.) +func (t UnixTime) MarshalJSON() ([]byte, error) { + buffer := &bytes.Buffer{} + enc := json.NewEncoder(buffer) + err := enc.Encode(float64(time.Time(t).UnixNano()) / 1e9) + if err != nil { + return nil, err + } + return buffer.Bytes(), nil +} + +// UnmarshalJSON reconstitures a UnixTime saved as a JSON number of the number of seconds since +// midnight January 1st, 1970. +func (t *UnixTime) UnmarshalJSON(text []byte) error { + dec := json.NewDecoder(bytes.NewReader(text)) + + var secondsSinceEpoch float64 + if err := dec.Decode(&secondsSinceEpoch); err != nil { + return err + } + + *t = NewUnixTimeFromSeconds(secondsSinceEpoch) + + return nil +} + +// MarshalText stores the number of seconds since the Unix Epoch as a textual floating point number. +func (t UnixTime) MarshalText() ([]byte, error) { + cast := time.Time(t) + return cast.MarshalText() +} + +// UnmarshalText populates a UnixTime with a value stored textually as a floating point number of seconds since the Unix Epoch. +func (t *UnixTime) UnmarshalText(raw []byte) error { + var unmarshaled time.Time + + if err := unmarshaled.UnmarshalText(raw); err != nil { + return err + } + + *t = UnixTime(unmarshaled) + return nil +} + +// MarshalBinary converts a UnixTime into a binary.LittleEndian float64 of nanoseconds since the epoch. +func (t UnixTime) MarshalBinary() ([]byte, error) { + buf := &bytes.Buffer{} + + payload := int64(t.Duration()) + + if err := binary.Write(buf, binary.LittleEndian, &payload); err != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +// UnmarshalBinary converts a from a binary.LittleEndian float64 of nanoseconds since the epoch into a UnixTime. +func (t *UnixTime) UnmarshalBinary(raw []byte) error { + var nanosecondsSinceEpoch int64 + + if err := binary.Read(bytes.NewReader(raw), binary.LittleEndian, &nanosecondsSinceEpoch); err != nil { + return err + } + *t = NewUnixTimeFromNanoseconds(nanosecondsSinceEpoch) + return nil +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/utility.go b/vendor/github.com/Azure/go-autorest/autorest/date/utility.go new file mode 100644 index 0000000..12addf0 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/date/utility.go @@ -0,0 +1,25 @@ +package date + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "strings" + "time" +) + +// ParseTime to parse Time string to specified format. +func ParseTime(format string, t string) (d time.Time, err error) { + return time.Parse(format, strings.ToUpper(t)) +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/error.go b/vendor/github.com/Azure/go-autorest/autorest/error.go new file mode 100644 index 0000000..35098ed --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/error.go @@ -0,0 +1,103 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "fmt" + "net/http" +) + +const ( + // UndefinedStatusCode is used when HTTP status code is not available for an error. + UndefinedStatusCode = 0 +) + +// DetailedError encloses a error with details of the package, method, and associated HTTP +// status code (if any). +type DetailedError struct { + Original error + + // PackageType is the package type of the object emitting the error. For types, the value + // matches that produced the the '%T' format specifier of the fmt package. For other elements, + // such as functions, it is just the package name (e.g., "autorest"). + PackageType string + + // Method is the name of the method raising the error. + Method string + + // StatusCode is the HTTP Response StatusCode (if non-zero) that led to the error. + StatusCode interface{} + + // Message is the error message. + Message string + + // Service Error is the response body of failed API in bytes + ServiceError []byte + + // Response is the response object that was returned during failure if applicable. + Response *http.Response +} + +// NewError creates a new Error conforming object from the passed packageType, method, and +// message. message is treated as a format string to which the optional args apply. +func NewError(packageType string, method string, message string, args ...interface{}) DetailedError { + return NewErrorWithError(nil, packageType, method, nil, message, args...) +} + +// NewErrorWithResponse creates a new Error conforming object from the passed +// packageType, method, statusCode of the given resp (UndefinedStatusCode if +// resp is nil), and message. message is treated as a format string to which the +// optional args apply. +func NewErrorWithResponse(packageType string, method string, resp *http.Response, message string, args ...interface{}) DetailedError { + return NewErrorWithError(nil, packageType, method, resp, message, args...) +} + +// NewErrorWithError creates a new Error conforming object from the +// passed packageType, method, statusCode of the given resp (UndefinedStatusCode +// if resp is nil), message, and original error. message is treated as a format +// string to which the optional args apply. +func NewErrorWithError(original error, packageType string, method string, resp *http.Response, message string, args ...interface{}) DetailedError { + if v, ok := original.(DetailedError); ok { + return v + } + + statusCode := UndefinedStatusCode + if resp != nil { + statusCode = resp.StatusCode + } + + return DetailedError{ + Original: original, + PackageType: packageType, + Method: method, + StatusCode: statusCode, + Message: fmt.Sprintf(message, args...), + Response: resp, + } +} + +// Error returns a formatted containing all available details (i.e., PackageType, Method, +// StatusCode, Message, and original error (if any)). +func (e DetailedError) Error() string { + if e.Original == nil { + return fmt.Sprintf("%s#%s: %s: StatusCode=%d", e.PackageType, e.Method, e.Message, e.StatusCode) + } + return fmt.Sprintf("%s#%s: %s: StatusCode=%d -- Original Error: %v", e.PackageType, e.Method, e.Message, e.StatusCode, e.Original) +} + +// Unwrap returns the original error. +func (e DetailedError) Unwrap() error { + return e.Original +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/go.mod b/vendor/github.com/Azure/go-autorest/autorest/go.mod new file mode 100644 index 0000000..fd0b2c0 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/go.mod @@ -0,0 +1,12 @@ +module github.com/Azure/go-autorest/autorest + +go 1.12 + +require ( + github.com/Azure/go-autorest v14.2.0+incompatible + github.com/Azure/go-autorest/autorest/adal v0.9.13 + github.com/Azure/go-autorest/autorest/mocks v0.4.1 + github.com/Azure/go-autorest/logger v0.2.1 + github.com/Azure/go-autorest/tracing v0.6.0 + golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0 +) diff --git a/vendor/github.com/Azure/go-autorest/autorest/go.sum b/vendor/github.com/Azure/go-autorest/autorest/go.sum new file mode 100644 index 0000000..373d9c4 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/go.sum @@ -0,0 +1,23 @@ +github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= +github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/go-autorest/autorest/adal v0.9.13 h1:Mp5hbtOePIzM8pJVRa3YLrWWmZtoxRXqUEzCfJt3+/Q= +github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= +github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= +github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= +github.com/Azure/go-autorest/autorest/mocks v0.4.1 h1:K0laFcLE6VLTOwNgSxaGbUcLPuGXlNkbVvq4cW4nIHk= +github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= +github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+ZtXWSmf4Tg= +github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= +github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= +github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= +github.com/form3tech-oss/jwt-go v3.2.2+incompatible h1:TcekIExNqud5crz4xD2pavyTgWiPvpYe4Xau31I0PRk= +github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0 h1:hb9wdF1z5waM+dSIICn1l0DkLVDT3hqhhQsDNUmHPRE= +golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= diff --git a/vendor/github.com/Azure/go-autorest/autorest/go_mod_tidy_hack.go b/vendor/github.com/Azure/go-autorest/autorest/go_mod_tidy_hack.go new file mode 100644 index 0000000..da65e10 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/go_mod_tidy_hack.go @@ -0,0 +1,24 @@ +// +build modhack + +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file, and the github.com/Azure/go-autorest import, won't actually become part of +// the resultant binary. + +// Necessary for safely adding multi-module repo. +// See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository +import _ "github.com/Azure/go-autorest" diff --git a/vendor/github.com/Azure/go-autorest/autorest/preparer.go b/vendor/github.com/Azure/go-autorest/autorest/preparer.go new file mode 100644 index 0000000..98574a4 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/preparer.go @@ -0,0 +1,547 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "context" + "encoding/json" + "encoding/xml" + "fmt" + "io" + "io/ioutil" + "mime/multipart" + "net/http" + "net/url" + "strings" +) + +const ( + mimeTypeJSON = "application/json" + mimeTypeOctetStream = "application/octet-stream" + mimeTypeFormPost = "application/x-www-form-urlencoded" + + headerAuthorization = "Authorization" + headerAuxAuthorization = "x-ms-authorization-auxiliary" + headerContentType = "Content-Type" + headerUserAgent = "User-Agent" +) + +// used as a key type in context.WithValue() +type ctxPrepareDecorators struct{} + +// WithPrepareDecorators adds the specified PrepareDecorators to the provided context. +// If no PrepareDecorators are provided the context is unchanged. +func WithPrepareDecorators(ctx context.Context, prepareDecorator []PrepareDecorator) context.Context { + if len(prepareDecorator) == 0 { + return ctx + } + return context.WithValue(ctx, ctxPrepareDecorators{}, prepareDecorator) +} + +// GetPrepareDecorators returns the PrepareDecorators in the provided context or the provided default PrepareDecorators. +func GetPrepareDecorators(ctx context.Context, defaultPrepareDecorators ...PrepareDecorator) []PrepareDecorator { + inCtx := ctx.Value(ctxPrepareDecorators{}) + if pd, ok := inCtx.([]PrepareDecorator); ok { + return pd + } + return defaultPrepareDecorators +} + +// Preparer is the interface that wraps the Prepare method. +// +// Prepare accepts and possibly modifies an http.Request (e.g., adding Headers). Implementations +// must ensure to not share or hold per-invocation state since Preparers may be shared and re-used. +type Preparer interface { + Prepare(*http.Request) (*http.Request, error) +} + +// PreparerFunc is a method that implements the Preparer interface. +type PreparerFunc func(*http.Request) (*http.Request, error) + +// Prepare implements the Preparer interface on PreparerFunc. +func (pf PreparerFunc) Prepare(r *http.Request) (*http.Request, error) { + return pf(r) +} + +// PrepareDecorator takes and possibly decorates, by wrapping, a Preparer. Decorators may affect the +// http.Request and pass it along or, first, pass the http.Request along then affect the result. +type PrepareDecorator func(Preparer) Preparer + +// CreatePreparer creates, decorates, and returns a Preparer. +// Without decorators, the returned Preparer returns the passed http.Request unmodified. +// Preparers are safe to share and re-use. +func CreatePreparer(decorators ...PrepareDecorator) Preparer { + return DecoratePreparer( + Preparer(PreparerFunc(func(r *http.Request) (*http.Request, error) { return r, nil })), + decorators...) +} + +// DecoratePreparer accepts a Preparer and a, possibly empty, set of PrepareDecorators, which it +// applies to the Preparer. Decorators are applied in the order received, but their affect upon the +// request depends on whether they are a pre-decorator (change the http.Request and then pass it +// along) or a post-decorator (pass the http.Request along and alter it on return). +func DecoratePreparer(p Preparer, decorators ...PrepareDecorator) Preparer { + for _, decorate := range decorators { + p = decorate(p) + } + return p +} + +// Prepare accepts an http.Request and a, possibly empty, set of PrepareDecorators. +// It creates a Preparer from the decorators which it then applies to the passed http.Request. +func Prepare(r *http.Request, decorators ...PrepareDecorator) (*http.Request, error) { + if r == nil { + return nil, NewError("autorest", "Prepare", "Invoked without an http.Request") + } + return CreatePreparer(decorators...).Prepare(r) +} + +// WithNothing returns a "do nothing" PrepareDecorator that makes no changes to the passed +// http.Request. +func WithNothing() PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + return p.Prepare(r) + }) + } +} + +// WithHeader returns a PrepareDecorator that sets the specified HTTP header of the http.Request to +// the passed value. It canonicalizes the passed header name (via http.CanonicalHeaderKey) before +// adding the header. +func WithHeader(header string, value string) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + setHeader(r, http.CanonicalHeaderKey(header), value) + } + return r, err + }) + } +} + +// WithHeaders returns a PrepareDecorator that sets the specified HTTP headers of the http.Request to +// the passed value. It canonicalizes the passed headers name (via http.CanonicalHeaderKey) before +// adding them. +func WithHeaders(headers map[string]interface{}) PrepareDecorator { + h := ensureValueStrings(headers) + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + if r.Header == nil { + r.Header = make(http.Header) + } + + for name, value := range h { + r.Header.Set(http.CanonicalHeaderKey(name), value) + } + } + return r, err + }) + } +} + +// WithBearerAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose +// value is "Bearer " followed by the supplied token. +func WithBearerAuthorization(token string) PrepareDecorator { + return WithHeader(headerAuthorization, fmt.Sprintf("Bearer %s", token)) +} + +// AsContentType returns a PrepareDecorator that adds an HTTP Content-Type header whose value +// is the passed contentType. +func AsContentType(contentType string) PrepareDecorator { + return WithHeader(headerContentType, contentType) +} + +// WithUserAgent returns a PrepareDecorator that adds an HTTP User-Agent header whose value is the +// passed string. +func WithUserAgent(ua string) PrepareDecorator { + return WithHeader(headerUserAgent, ua) +} + +// AsFormURLEncoded returns a PrepareDecorator that adds an HTTP Content-Type header whose value is +// "application/x-www-form-urlencoded". +func AsFormURLEncoded() PrepareDecorator { + return AsContentType(mimeTypeFormPost) +} + +// AsJSON returns a PrepareDecorator that adds an HTTP Content-Type header whose value is +// "application/json". +func AsJSON() PrepareDecorator { + return AsContentType(mimeTypeJSON) +} + +// AsOctetStream returns a PrepareDecorator that adds the "application/octet-stream" Content-Type header. +func AsOctetStream() PrepareDecorator { + return AsContentType(mimeTypeOctetStream) +} + +// WithMethod returns a PrepareDecorator that sets the HTTP method of the passed request. The +// decorator does not validate that the passed method string is a known HTTP method. +func WithMethod(method string) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r.Method = method + return p.Prepare(r) + }) + } +} + +// AsDelete returns a PrepareDecorator that sets the HTTP method to DELETE. +func AsDelete() PrepareDecorator { return WithMethod("DELETE") } + +// AsGet returns a PrepareDecorator that sets the HTTP method to GET. +func AsGet() PrepareDecorator { return WithMethod("GET") } + +// AsHead returns a PrepareDecorator that sets the HTTP method to HEAD. +func AsHead() PrepareDecorator { return WithMethod("HEAD") } + +// AsMerge returns a PrepareDecorator that sets the HTTP method to MERGE. +func AsMerge() PrepareDecorator { return WithMethod("MERGE") } + +// AsOptions returns a PrepareDecorator that sets the HTTP method to OPTIONS. +func AsOptions() PrepareDecorator { return WithMethod("OPTIONS") } + +// AsPatch returns a PrepareDecorator that sets the HTTP method to PATCH. +func AsPatch() PrepareDecorator { return WithMethod("PATCH") } + +// AsPost returns a PrepareDecorator that sets the HTTP method to POST. +func AsPost() PrepareDecorator { return WithMethod("POST") } + +// AsPut returns a PrepareDecorator that sets the HTTP method to PUT. +func AsPut() PrepareDecorator { return WithMethod("PUT") } + +// WithBaseURL returns a PrepareDecorator that populates the http.Request with a url.URL constructed +// from the supplied baseUrl. Query parameters will be encoded as required. +func WithBaseURL(baseURL string) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + var u *url.URL + if u, err = url.Parse(baseURL); err != nil { + return r, err + } + if u.Scheme == "" { + return r, fmt.Errorf("autorest: No scheme detected in URL %s", baseURL) + } + if u.RawQuery != "" { + q, err := url.ParseQuery(u.RawQuery) + if err != nil { + return r, err + } + u.RawQuery = q.Encode() + } + r.URL = u + } + return r, err + }) + } +} + +// WithBytes returns a PrepareDecorator that takes a list of bytes +// which passes the bytes directly to the body +func WithBytes(input *[]byte) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + if input == nil { + return r, fmt.Errorf("Input Bytes was nil") + } + + r.ContentLength = int64(len(*input)) + r.Body = ioutil.NopCloser(bytes.NewReader(*input)) + } + return r, err + }) + } +} + +// WithCustomBaseURL returns a PrepareDecorator that replaces brace-enclosed keys within the +// request base URL (i.e., http.Request.URL) with the corresponding values from the passed map. +func WithCustomBaseURL(baseURL string, urlParameters map[string]interface{}) PrepareDecorator { + parameters := ensureValueStrings(urlParameters) + for key, value := range parameters { + baseURL = strings.Replace(baseURL, "{"+key+"}", value, -1) + } + return WithBaseURL(baseURL) +} + +// WithFormData returns a PrepareDecoratore that "URL encodes" (e.g., bar=baz&foo=quux) into the +// http.Request body. +func WithFormData(v url.Values) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + s := v.Encode() + + setHeader(r, http.CanonicalHeaderKey(headerContentType), mimeTypeFormPost) + r.ContentLength = int64(len(s)) + r.Body = ioutil.NopCloser(strings.NewReader(s)) + } + return r, err + }) + } +} + +// WithMultiPartFormData returns a PrepareDecoratore that "URL encodes" (e.g., bar=baz&foo=quux) form parameters +// into the http.Request body. +func WithMultiPartFormData(formDataParameters map[string]interface{}) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + var body bytes.Buffer + writer := multipart.NewWriter(&body) + for key, value := range formDataParameters { + if rc, ok := value.(io.ReadCloser); ok { + var fd io.Writer + if fd, err = writer.CreateFormFile(key, key); err != nil { + return r, err + } + if _, err = io.Copy(fd, rc); err != nil { + return r, err + } + } else { + if err = writer.WriteField(key, ensureValueString(value)); err != nil { + return r, err + } + } + } + if err = writer.Close(); err != nil { + return r, err + } + setHeader(r, http.CanonicalHeaderKey(headerContentType), writer.FormDataContentType()) + r.Body = ioutil.NopCloser(bytes.NewReader(body.Bytes())) + r.ContentLength = int64(body.Len()) + return r, err + } + return r, err + }) + } +} + +// WithFile returns a PrepareDecorator that sends file in request body. +func WithFile(f io.ReadCloser) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + b, err := ioutil.ReadAll(f) + if err != nil { + return r, err + } + r.Body = ioutil.NopCloser(bytes.NewReader(b)) + r.ContentLength = int64(len(b)) + } + return r, err + }) + } +} + +// WithBool returns a PrepareDecorator that encodes the passed bool into the body of the request +// and sets the Content-Length header. +func WithBool(v bool) PrepareDecorator { + return WithString(fmt.Sprintf("%v", v)) +} + +// WithFloat32 returns a PrepareDecorator that encodes the passed float32 into the body of the +// request and sets the Content-Length header. +func WithFloat32(v float32) PrepareDecorator { + return WithString(fmt.Sprintf("%v", v)) +} + +// WithFloat64 returns a PrepareDecorator that encodes the passed float64 into the body of the +// request and sets the Content-Length header. +func WithFloat64(v float64) PrepareDecorator { + return WithString(fmt.Sprintf("%v", v)) +} + +// WithInt32 returns a PrepareDecorator that encodes the passed int32 into the body of the request +// and sets the Content-Length header. +func WithInt32(v int32) PrepareDecorator { + return WithString(fmt.Sprintf("%v", v)) +} + +// WithInt64 returns a PrepareDecorator that encodes the passed int64 into the body of the request +// and sets the Content-Length header. +func WithInt64(v int64) PrepareDecorator { + return WithString(fmt.Sprintf("%v", v)) +} + +// WithString returns a PrepareDecorator that encodes the passed string into the body of the request +// and sets the Content-Length header. +func WithString(v string) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + r.ContentLength = int64(len(v)) + r.Body = ioutil.NopCloser(strings.NewReader(v)) + } + return r, err + }) + } +} + +// WithJSON returns a PrepareDecorator that encodes the data passed as JSON into the body of the +// request and sets the Content-Length header. +func WithJSON(v interface{}) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + b, err := json.Marshal(v) + if err == nil { + r.ContentLength = int64(len(b)) + r.Body = ioutil.NopCloser(bytes.NewReader(b)) + } + } + return r, err + }) + } +} + +// WithXML returns a PrepareDecorator that encodes the data passed as XML into the body of the +// request and sets the Content-Length header. +func WithXML(v interface{}) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + b, err := xml.Marshal(v) + if err == nil { + // we have to tack on an XML header + withHeader := xml.Header + string(b) + bytesWithHeader := []byte(withHeader) + + r.ContentLength = int64(len(bytesWithHeader)) + setHeader(r, headerContentLength, fmt.Sprintf("%d", len(bytesWithHeader))) + r.Body = ioutil.NopCloser(bytes.NewReader(bytesWithHeader)) + } + } + return r, err + }) + } +} + +// WithPath returns a PrepareDecorator that adds the supplied path to the request URL. If the path +// is absolute (that is, it begins with a "/"), it replaces the existing path. +func WithPath(path string) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + if r.URL == nil { + return r, NewError("autorest", "WithPath", "Invoked with a nil URL") + } + if r.URL, err = parseURL(r.URL, path); err != nil { + return r, err + } + } + return r, err + }) + } +} + +// WithEscapedPathParameters returns a PrepareDecorator that replaces brace-enclosed keys within the +// request path (i.e., http.Request.URL.Path) with the corresponding values from the passed map. The +// values will be escaped (aka URL encoded) before insertion into the path. +func WithEscapedPathParameters(path string, pathParameters map[string]interface{}) PrepareDecorator { + parameters := escapeValueStrings(ensureValueStrings(pathParameters)) + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + if r.URL == nil { + return r, NewError("autorest", "WithEscapedPathParameters", "Invoked with a nil URL") + } + for key, value := range parameters { + path = strings.Replace(path, "{"+key+"}", value, -1) + } + if r.URL, err = parseURL(r.URL, path); err != nil { + return r, err + } + } + return r, err + }) + } +} + +// WithPathParameters returns a PrepareDecorator that replaces brace-enclosed keys within the +// request path (i.e., http.Request.URL.Path) with the corresponding values from the passed map. +func WithPathParameters(path string, pathParameters map[string]interface{}) PrepareDecorator { + parameters := ensureValueStrings(pathParameters) + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + if r.URL == nil { + return r, NewError("autorest", "WithPathParameters", "Invoked with a nil URL") + } + for key, value := range parameters { + path = strings.Replace(path, "{"+key+"}", value, -1) + } + + if r.URL, err = parseURL(r.URL, path); err != nil { + return r, err + } + } + return r, err + }) + } +} + +func parseURL(u *url.URL, path string) (*url.URL, error) { + p := strings.TrimRight(u.String(), "/") + if !strings.HasPrefix(path, "/") { + path = "/" + path + } + return url.Parse(p + path) +} + +// WithQueryParameters returns a PrepareDecorators that encodes and applies the query parameters +// given in the supplied map (i.e., key=value). +func WithQueryParameters(queryParameters map[string]interface{}) PrepareDecorator { + parameters := MapToValues(queryParameters) + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + if r.URL == nil { + return r, NewError("autorest", "WithQueryParameters", "Invoked with a nil URL") + } + v := r.URL.Query() + for key, value := range parameters { + for i := range value { + d, err := url.QueryUnescape(value[i]) + if err != nil { + return r, err + } + value[i] = d + } + v[key] = value + } + r.URL.RawQuery = v.Encode() + } + return r, err + }) + } +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/responder.go b/vendor/github.com/Azure/go-autorest/autorest/responder.go new file mode 100644 index 0000000..349e196 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/responder.go @@ -0,0 +1,269 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "encoding/json" + "encoding/xml" + "fmt" + "io" + "io/ioutil" + "net/http" + "strings" +) + +// Responder is the interface that wraps the Respond method. +// +// Respond accepts and reacts to an http.Response. Implementations must ensure to not share or hold +// state since Responders may be shared and re-used. +type Responder interface { + Respond(*http.Response) error +} + +// ResponderFunc is a method that implements the Responder interface. +type ResponderFunc func(*http.Response) error + +// Respond implements the Responder interface on ResponderFunc. +func (rf ResponderFunc) Respond(r *http.Response) error { + return rf(r) +} + +// RespondDecorator takes and possibly decorates, by wrapping, a Responder. Decorators may react to +// the http.Response and pass it along or, first, pass the http.Response along then react. +type RespondDecorator func(Responder) Responder + +// CreateResponder creates, decorates, and returns a Responder. Without decorators, the returned +// Responder returns the passed http.Response unmodified. Responders may or may not be safe to share +// and re-used: It depends on the applied decorators. For example, a standard decorator that closes +// the response body is fine to share whereas a decorator that reads the body into a passed struct +// is not. +// +// To prevent memory leaks, ensure that at least one Responder closes the response body. +func CreateResponder(decorators ...RespondDecorator) Responder { + return DecorateResponder( + Responder(ResponderFunc(func(r *http.Response) error { return nil })), + decorators...) +} + +// DecorateResponder accepts a Responder and a, possibly empty, set of RespondDecorators, which it +// applies to the Responder. Decorators are applied in the order received, but their affect upon the +// request depends on whether they are a pre-decorator (react to the http.Response and then pass it +// along) or a post-decorator (pass the http.Response along and then react). +func DecorateResponder(r Responder, decorators ...RespondDecorator) Responder { + for _, decorate := range decorators { + r = decorate(r) + } + return r +} + +// Respond accepts an http.Response and a, possibly empty, set of RespondDecorators. +// It creates a Responder from the decorators it then applies to the passed http.Response. +func Respond(r *http.Response, decorators ...RespondDecorator) error { + if r == nil { + return nil + } + return CreateResponder(decorators...).Respond(r) +} + +// ByIgnoring returns a RespondDecorator that ignores the passed http.Response passing it unexamined +// to the next RespondDecorator. +func ByIgnoring() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + return r.Respond(resp) + }) + } +} + +// ByCopying copies the contents of the http.Response Body into the passed bytes.Buffer as +// the Body is read. +func ByCopying(b *bytes.Buffer) RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err == nil && resp != nil && resp.Body != nil { + resp.Body = TeeReadCloser(resp.Body, b) + } + return err + }) + } +} + +// ByDiscardingBody returns a RespondDecorator that first invokes the passed Responder after which +// it copies the remaining bytes (if any) in the response body to ioutil.Discard. Since the passed +// Responder is invoked prior to discarding the response body, the decorator may occur anywhere +// within the set. +func ByDiscardingBody() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err == nil && resp != nil && resp.Body != nil { + if _, err := io.Copy(ioutil.Discard, resp.Body); err != nil { + return fmt.Errorf("Error discarding the response body: %v", err) + } + } + return err + }) + } +} + +// ByClosing returns a RespondDecorator that first invokes the passed Responder after which it +// closes the response body. Since the passed Responder is invoked prior to closing the response +// body, the decorator may occur anywhere within the set. +func ByClosing() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if resp != nil && resp.Body != nil { + if err := resp.Body.Close(); err != nil { + return fmt.Errorf("Error closing the response body: %v", err) + } + } + return err + }) + } +} + +// ByClosingIfError returns a RespondDecorator that first invokes the passed Responder after which +// it closes the response if the passed Responder returns an error and the response body exists. +func ByClosingIfError() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err != nil && resp != nil && resp.Body != nil { + if err := resp.Body.Close(); err != nil { + return fmt.Errorf("Error closing the response body: %v", err) + } + } + return err + }) + } +} + +// ByUnmarshallingBytes returns a RespondDecorator that copies the Bytes returned in the +// response Body into the value pointed to by v. +func ByUnmarshallingBytes(v *[]byte) RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err == nil { + bytes, errInner := ioutil.ReadAll(resp.Body) + if errInner != nil { + err = fmt.Errorf("Error occurred reading http.Response#Body - Error = '%v'", errInner) + } else { + *v = bytes + } + } + return err + }) + } +} + +// ByUnmarshallingJSON returns a RespondDecorator that decodes a JSON document returned in the +// response Body into the value pointed to by v. +func ByUnmarshallingJSON(v interface{}) RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err == nil { + b, errInner := ioutil.ReadAll(resp.Body) + // Some responses might include a BOM, remove for successful unmarshalling + b = bytes.TrimPrefix(b, []byte("\xef\xbb\xbf")) + if errInner != nil { + err = fmt.Errorf("Error occurred reading http.Response#Body - Error = '%v'", errInner) + } else if len(strings.Trim(string(b), " ")) > 0 { + errInner = json.Unmarshal(b, v) + if errInner != nil { + err = fmt.Errorf("Error occurred unmarshalling JSON - Error = '%v' JSON = '%s'", errInner, string(b)) + } + } + } + return err + }) + } +} + +// ByUnmarshallingXML returns a RespondDecorator that decodes a XML document returned in the +// response Body into the value pointed to by v. +func ByUnmarshallingXML(v interface{}) RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err == nil { + b, errInner := ioutil.ReadAll(resp.Body) + if errInner != nil { + err = fmt.Errorf("Error occurred reading http.Response#Body - Error = '%v'", errInner) + } else { + errInner = xml.Unmarshal(b, v) + if errInner != nil { + err = fmt.Errorf("Error occurred unmarshalling Xml - Error = '%v' Xml = '%s'", errInner, string(b)) + } + } + } + return err + }) + } +} + +// WithErrorUnlessStatusCode returns a RespondDecorator that emits an error unless the response +// StatusCode is among the set passed. On error, response body is fully read into a buffer and +// presented in the returned error, as well as in the response body. +func WithErrorUnlessStatusCode(codes ...int) RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err == nil && !ResponseHasStatusCode(resp, codes...) { + derr := NewErrorWithResponse("autorest", "WithErrorUnlessStatusCode", resp, "%v %v failed with %s", + resp.Request.Method, + resp.Request.URL, + resp.Status) + if resp.Body != nil { + defer resp.Body.Close() + b, _ := ioutil.ReadAll(resp.Body) + derr.ServiceError = b + resp.Body = ioutil.NopCloser(bytes.NewReader(b)) + } + err = derr + } + return err + }) + } +} + +// WithErrorUnlessOK returns a RespondDecorator that emits an error if the response StatusCode is +// anything other than HTTP 200. +func WithErrorUnlessOK() RespondDecorator { + return WithErrorUnlessStatusCode(http.StatusOK) +} + +// ExtractHeader extracts all values of the specified header from the http.Response. It returns an +// empty string slice if the passed http.Response is nil or the header does not exist. +func ExtractHeader(header string, resp *http.Response) []string { + if resp != nil && resp.Header != nil { + return resp.Header[http.CanonicalHeaderKey(header)] + } + return nil +} + +// ExtractHeaderValue extracts the first value of the specified header from the http.Response. It +// returns an empty string if the passed http.Response is nil or the header does not exist. +func ExtractHeaderValue(header string, resp *http.Response) string { + h := ExtractHeader(header, resp) + if len(h) > 0 { + return h[0] + } + return "" +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/retriablerequest.go b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest.go new file mode 100644 index 0000000..fa11dbe --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest.go @@ -0,0 +1,52 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "io" + "io/ioutil" + "net/http" +) + +// NewRetriableRequest returns a wrapper around an HTTP request that support retry logic. +func NewRetriableRequest(req *http.Request) *RetriableRequest { + return &RetriableRequest{req: req} +} + +// Request returns the wrapped HTTP request. +func (rr *RetriableRequest) Request() *http.Request { + return rr.req +} + +func (rr *RetriableRequest) prepareFromByteReader() (err error) { + // fall back to making a copy (only do this once) + b := []byte{} + if rr.req.ContentLength > 0 { + b = make([]byte, rr.req.ContentLength) + _, err = io.ReadFull(rr.req.Body, b) + if err != nil { + return err + } + } else { + b, err = ioutil.ReadAll(rr.req.Body) + if err != nil { + return err + } + } + rr.br = bytes.NewReader(b) + rr.req.Body = ioutil.NopCloser(rr.br) + return err +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.7.go b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.7.go new file mode 100644 index 0000000..7143cc6 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.7.go @@ -0,0 +1,54 @@ +// +build !go1.8 + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package autorest + +import ( + "bytes" + "io/ioutil" + "net/http" +) + +// RetriableRequest provides facilities for retrying an HTTP request. +type RetriableRequest struct { + req *http.Request + br *bytes.Reader +} + +// Prepare signals that the request is about to be sent. +func (rr *RetriableRequest) Prepare() (err error) { + // preserve the request body; this is to support retry logic as + // the underlying transport will always close the reqeust body + if rr.req.Body != nil { + if rr.br != nil { + _, err = rr.br.Seek(0, 0 /*io.SeekStart*/) + rr.req.Body = ioutil.NopCloser(rr.br) + } + if err != nil { + return err + } + if rr.br == nil { + // fall back to making a copy (only do this once) + err = rr.prepareFromByteReader() + } + } + return err +} + +func removeRequestBody(req *http.Request) { + req.Body = nil + req.ContentLength = 0 +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.8.go b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.8.go new file mode 100644 index 0000000..ae15c6b --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.8.go @@ -0,0 +1,66 @@ +// +build go1.8 + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package autorest + +import ( + "bytes" + "io" + "io/ioutil" + "net/http" +) + +// RetriableRequest provides facilities for retrying an HTTP request. +type RetriableRequest struct { + req *http.Request + rc io.ReadCloser + br *bytes.Reader +} + +// Prepare signals that the request is about to be sent. +func (rr *RetriableRequest) Prepare() (err error) { + // preserve the request body; this is to support retry logic as + // the underlying transport will always close the reqeust body + if rr.req.Body != nil { + if rr.rc != nil { + rr.req.Body = rr.rc + } else if rr.br != nil { + _, err = rr.br.Seek(0, io.SeekStart) + rr.req.Body = ioutil.NopCloser(rr.br) + } + if err != nil { + return err + } + if rr.req.GetBody != nil { + // this will allow us to preserve the body without having to + // make a copy. note we need to do this on each iteration + rr.rc, err = rr.req.GetBody() + if err != nil { + return err + } + } else if rr.br == nil { + // fall back to making a copy (only do this once) + err = rr.prepareFromByteReader() + } + } + return err +} + +func removeRequestBody(req *http.Request) { + req.Body = nil + req.GetBody = nil + req.ContentLength = 0 +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/sender.go b/vendor/github.com/Azure/go-autorest/autorest/sender.go new file mode 100644 index 0000000..78610ef --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/sender.go @@ -0,0 +1,447 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "context" + "crypto/tls" + "fmt" + "log" + "math" + "net/http" + "net/http/cookiejar" + "strconv" + "sync" + "time" + + "github.com/Azure/go-autorest/tracing" +) + +// there is one sender per TLS renegotiation type, i.e. count of tls.RenegotiationSupport enums +const defaultSendersCount = 3 + +type defaultSender struct { + sender Sender + init *sync.Once +} + +// each type of sender will be created on demand in sender() +var defaultSenders [defaultSendersCount]defaultSender + +func init() { + for i := 0; i < defaultSendersCount; i++ { + defaultSenders[i].init = &sync.Once{} + } +} + +// used as a key type in context.WithValue() +type ctxSendDecorators struct{} + +// WithSendDecorators adds the specified SendDecorators to the provided context. +// If no SendDecorators are provided the context is unchanged. +func WithSendDecorators(ctx context.Context, sendDecorator []SendDecorator) context.Context { + if len(sendDecorator) == 0 { + return ctx + } + return context.WithValue(ctx, ctxSendDecorators{}, sendDecorator) +} + +// GetSendDecorators returns the SendDecorators in the provided context or the provided default SendDecorators. +func GetSendDecorators(ctx context.Context, defaultSendDecorators ...SendDecorator) []SendDecorator { + inCtx := ctx.Value(ctxSendDecorators{}) + if sd, ok := inCtx.([]SendDecorator); ok { + return sd + } + return defaultSendDecorators +} + +// Sender is the interface that wraps the Do method to send HTTP requests. +// +// The standard http.Client conforms to this interface. +type Sender interface { + Do(*http.Request) (*http.Response, error) +} + +// SenderFunc is a method that implements the Sender interface. +type SenderFunc func(*http.Request) (*http.Response, error) + +// Do implements the Sender interface on SenderFunc. +func (sf SenderFunc) Do(r *http.Request) (*http.Response, error) { + return sf(r) +} + +// SendDecorator takes and possibly decorates, by wrapping, a Sender. Decorators may affect the +// http.Request and pass it along or, first, pass the http.Request along then react to the +// http.Response result. +type SendDecorator func(Sender) Sender + +// CreateSender creates, decorates, and returns, as a Sender, the default http.Client. +func CreateSender(decorators ...SendDecorator) Sender { + return DecorateSender(sender(tls.RenegotiateNever), decorators...) +} + +// DecorateSender accepts a Sender and a, possibly empty, set of SendDecorators, which is applies to +// the Sender. Decorators are applied in the order received, but their affect upon the request +// depends on whether they are a pre-decorator (change the http.Request and then pass it along) or a +// post-decorator (pass the http.Request along and react to the results in http.Response). +func DecorateSender(s Sender, decorators ...SendDecorator) Sender { + for _, decorate := range decorators { + s = decorate(s) + } + return s +} + +// Send sends, by means of the default http.Client, the passed http.Request, returning the +// http.Response and possible error. It also accepts a, possibly empty, set of SendDecorators which +// it will apply the http.Client before invoking the Do method. +// +// Send is a convenience method and not recommended for production. Advanced users should use +// SendWithSender, passing and sharing their own Sender (e.g., instance of http.Client). +// +// Send will not poll or retry requests. +func Send(r *http.Request, decorators ...SendDecorator) (*http.Response, error) { + return SendWithSender(sender(tls.RenegotiateNever), r, decorators...) +} + +// SendWithSender sends the passed http.Request, through the provided Sender, returning the +// http.Response and possible error. It also accepts a, possibly empty, set of SendDecorators which +// it will apply the http.Client before invoking the Do method. +// +// SendWithSender will not poll or retry requests. +func SendWithSender(s Sender, r *http.Request, decorators ...SendDecorator) (*http.Response, error) { + return DecorateSender(s, decorators...).Do(r) +} + +func sender(renengotiation tls.RenegotiationSupport) Sender { + // note that we can't init defaultSenders in init() since it will + // execute before calling code has had a chance to enable tracing + defaultSenders[renengotiation].init.Do(func() { + // Use behaviour compatible with DefaultTransport, but require TLS minimum version. + defaultTransport := http.DefaultTransport.(*http.Transport) + transport := &http.Transport{ + Proxy: defaultTransport.Proxy, + DialContext: defaultTransport.DialContext, + MaxIdleConns: defaultTransport.MaxIdleConns, + IdleConnTimeout: defaultTransport.IdleConnTimeout, + TLSHandshakeTimeout: defaultTransport.TLSHandshakeTimeout, + ExpectContinueTimeout: defaultTransport.ExpectContinueTimeout, + TLSClientConfig: &tls.Config{ + MinVersion: tls.VersionTLS12, + Renegotiation: renengotiation, + }, + } + var roundTripper http.RoundTripper = transport + if tracing.IsEnabled() { + roundTripper = tracing.NewTransport(transport) + } + j, _ := cookiejar.New(nil) + defaultSenders[renengotiation].sender = &http.Client{Jar: j, Transport: roundTripper} + }) + return defaultSenders[renengotiation].sender +} + +// AfterDelay returns a SendDecorator that delays for the passed time.Duration before +// invoking the Sender. The delay may be terminated by closing the optional channel on the +// http.Request. If canceled, no further Senders are invoked. +func AfterDelay(d time.Duration) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + if !DelayForBackoff(d, 0, r.Context().Done()) { + return nil, fmt.Errorf("autorest: AfterDelay canceled before full delay") + } + return s.Do(r) + }) + } +} + +// AsIs returns a SendDecorator that invokes the passed Sender without modifying the http.Request. +func AsIs() SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + return s.Do(r) + }) + } +} + +// DoCloseIfError returns a SendDecorator that first invokes the passed Sender after which +// it closes the response if the passed Sender returns an error and the response body exists. +func DoCloseIfError() SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + resp, err := s.Do(r) + if err != nil { + Respond(resp, ByDiscardingBody(), ByClosing()) + } + return resp, err + }) + } +} + +// DoErrorIfStatusCode returns a SendDecorator that emits an error if the response StatusCode is +// among the set passed. Since these are artificial errors, the response body may still require +// closing. +func DoErrorIfStatusCode(codes ...int) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + resp, err := s.Do(r) + if err == nil && ResponseHasStatusCode(resp, codes...) { + err = NewErrorWithResponse("autorest", "DoErrorIfStatusCode", resp, "%v %v failed with %s", + resp.Request.Method, + resp.Request.URL, + resp.Status) + } + return resp, err + }) + } +} + +// DoErrorUnlessStatusCode returns a SendDecorator that emits an error unless the response +// StatusCode is among the set passed. Since these are artificial errors, the response body +// may still require closing. +func DoErrorUnlessStatusCode(codes ...int) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + resp, err := s.Do(r) + if err == nil && !ResponseHasStatusCode(resp, codes...) { + err = NewErrorWithResponse("autorest", "DoErrorUnlessStatusCode", resp, "%v %v failed with %s", + resp.Request.Method, + resp.Request.URL, + resp.Status) + } + return resp, err + }) + } +} + +// DoPollForStatusCodes returns a SendDecorator that polls if the http.Response contains one of the +// passed status codes. It expects the http.Response to contain a Location header providing the +// URL at which to poll (using GET) and will poll until the time passed is equal to or greater than +// the supplied duration. It will delay between requests for the duration specified in the +// RetryAfter header or, if the header is absent, the passed delay. Polling may be canceled by +// closing the optional channel on the http.Request. +func DoPollForStatusCodes(duration time.Duration, delay time.Duration, codes ...int) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (resp *http.Response, err error) { + resp, err = s.Do(r) + + if err == nil && ResponseHasStatusCode(resp, codes...) { + r, err = NewPollingRequestWithContext(r.Context(), resp) + + for err == nil && ResponseHasStatusCode(resp, codes...) { + Respond(resp, + ByDiscardingBody(), + ByClosing()) + resp, err = SendWithSender(s, r, + AfterDelay(GetRetryAfter(resp, delay))) + } + } + + return resp, err + }) + } +} + +// DoRetryForAttempts returns a SendDecorator that retries a failed request for up to the specified +// number of attempts, exponentially backing off between requests using the supplied backoff +// time.Duration (which may be zero). Retrying may be canceled by closing the optional channel on +// the http.Request. +func DoRetryForAttempts(attempts int, backoff time.Duration) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (resp *http.Response, err error) { + rr := NewRetriableRequest(r) + for attempt := 0; attempt < attempts; attempt++ { + err = rr.Prepare() + if err != nil { + return resp, err + } + DrainResponseBody(resp) + resp, err = s.Do(rr.Request()) + if err == nil { + return resp, err + } + if !DelayForBackoff(backoff, attempt, r.Context().Done()) { + return nil, r.Context().Err() + } + } + return resp, err + }) + } +} + +// Count429AsRetry indicates that a 429 response should be included as a retry attempt. +var Count429AsRetry = true + +// Max429Delay is the maximum duration to wait between retries on a 429 if no Retry-After header was received. +var Max429Delay time.Duration + +// DoRetryForStatusCodes returns a SendDecorator that retries for specified statusCodes for up to the specified +// number of attempts, exponentially backing off between requests using the supplied backoff +// time.Duration (which may be zero). Retrying may be canceled by cancelling the context on the http.Request. +// NOTE: Code http.StatusTooManyRequests (429) will *not* be counted against the number of attempts. +func DoRetryForStatusCodes(attempts int, backoff time.Duration, codes ...int) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + return doRetryForStatusCodesImpl(s, r, Count429AsRetry, attempts, backoff, 0, codes...) + }) + } +} + +// DoRetryForStatusCodesWithCap returns a SendDecorator that retries for specified statusCodes for up to the +// specified number of attempts, exponentially backing off between requests using the supplied backoff +// time.Duration (which may be zero). To cap the maximum possible delay between iterations specify a value greater +// than zero for cap. Retrying may be canceled by cancelling the context on the http.Request. +func DoRetryForStatusCodesWithCap(attempts int, backoff, cap time.Duration, codes ...int) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + return doRetryForStatusCodesImpl(s, r, Count429AsRetry, attempts, backoff, cap, codes...) + }) + } +} + +func doRetryForStatusCodesImpl(s Sender, r *http.Request, count429 bool, attempts int, backoff, cap time.Duration, codes ...int) (resp *http.Response, err error) { + rr := NewRetriableRequest(r) + // Increment to add the first call (attempts denotes number of retries) + for attempt, delayCount := 0, 0; attempt < attempts+1; { + err = rr.Prepare() + if err != nil { + return + } + DrainResponseBody(resp) + resp, err = s.Do(rr.Request()) + // we want to retry if err is not nil (e.g. transient network failure). note that for failed authentication + // resp and err will both have a value, so in this case we don't want to retry as it will never succeed. + if err == nil && !ResponseHasStatusCode(resp, codes...) || IsTokenRefreshError(err) { + return resp, err + } + delayed := DelayWithRetryAfter(resp, r.Context().Done()) + // if this was a 429 set the delay cap as specified. + // applicable only in the absence of a retry-after header. + if resp != nil && resp.StatusCode == http.StatusTooManyRequests { + cap = Max429Delay + } + if !delayed && !DelayForBackoffWithCap(backoff, cap, delayCount, r.Context().Done()) { + return resp, r.Context().Err() + } + // when count429 == false don't count a 429 against the number + // of attempts so that we continue to retry until it succeeds + if count429 || (resp == nil || resp.StatusCode != http.StatusTooManyRequests) { + attempt++ + } + // delay count is tracked separately from attempts to + // ensure that 429 participates in exponential back-off + delayCount++ + } + return resp, err +} + +// DelayWithRetryAfter invokes time.After for the duration specified in the "Retry-After" header. +// The value of Retry-After can be either the number of seconds or a date in RFC1123 format. +// The function returns true after successfully waiting for the specified duration. If there is +// no Retry-After header or the wait is cancelled the return value is false. +func DelayWithRetryAfter(resp *http.Response, cancel <-chan struct{}) bool { + if resp == nil { + return false + } + var dur time.Duration + ra := resp.Header.Get("Retry-After") + if retryAfter, _ := strconv.Atoi(ra); retryAfter > 0 { + dur = time.Duration(retryAfter) * time.Second + } else if t, err := time.Parse(time.RFC1123, ra); err == nil { + dur = t.Sub(time.Now()) + } + if dur > 0 { + select { + case <-time.After(dur): + return true + case <-cancel: + return false + } + } + return false +} + +// DoRetryForDuration returns a SendDecorator that retries the request until the total time is equal +// to or greater than the specified duration, exponentially backing off between requests using the +// supplied backoff time.Duration (which may be zero). Retrying may be canceled by closing the +// optional channel on the http.Request. +func DoRetryForDuration(d time.Duration, backoff time.Duration) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (resp *http.Response, err error) { + rr := NewRetriableRequest(r) + end := time.Now().Add(d) + for attempt := 0; time.Now().Before(end); attempt++ { + err = rr.Prepare() + if err != nil { + return resp, err + } + DrainResponseBody(resp) + resp, err = s.Do(rr.Request()) + if err == nil { + return resp, err + } + if !DelayForBackoff(backoff, attempt, r.Context().Done()) { + return nil, r.Context().Err() + } + } + return resp, err + }) + } +} + +// WithLogging returns a SendDecorator that implements simple before and after logging of the +// request. +func WithLogging(logger *log.Logger) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + logger.Printf("Sending %s %s", r.Method, r.URL) + resp, err := s.Do(r) + if err != nil { + logger.Printf("%s %s received error '%v'", r.Method, r.URL, err) + } else { + logger.Printf("%s %s received %s", r.Method, r.URL, resp.Status) + } + return resp, err + }) + } +} + +// DelayForBackoff invokes time.After for the supplied backoff duration raised to the power of +// passed attempt (i.e., an exponential backoff delay). Backoff duration is in seconds and can set +// to zero for no delay. The delay may be canceled by closing the passed channel. If terminated early, +// returns false. +// Note: Passing attempt 1 will result in doubling "backoff" duration. Treat this as a zero-based attempt +// count. +func DelayForBackoff(backoff time.Duration, attempt int, cancel <-chan struct{}) bool { + return DelayForBackoffWithCap(backoff, 0, attempt, cancel) +} + +// DelayForBackoffWithCap invokes time.After for the supplied backoff duration raised to the power of +// passed attempt (i.e., an exponential backoff delay). Backoff duration is in seconds and can set +// to zero for no delay. To cap the maximum possible delay specify a value greater than zero for cap. +// The delay may be canceled by closing the passed channel. If terminated early, returns false. +// Note: Passing attempt 1 will result in doubling "backoff" duration. Treat this as a zero-based attempt +// count. +func DelayForBackoffWithCap(backoff, cap time.Duration, attempt int, cancel <-chan struct{}) bool { + d := time.Duration(backoff.Seconds()*math.Pow(2, float64(attempt))) * time.Second + if cap > 0 && d > cap { + d = cap + } + select { + case <-time.After(d): + return true + case <-cancel: + return false + } +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/utility.go b/vendor/github.com/Azure/go-autorest/autorest/utility.go new file mode 100644 index 0000000..3467b8f --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/utility.go @@ -0,0 +1,232 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "encoding/json" + "encoding/xml" + "fmt" + "io" + "io/ioutil" + "net" + "net/http" + "net/url" + "reflect" + "strings" +) + +// EncodedAs is a series of constants specifying various data encodings +type EncodedAs string + +const ( + // EncodedAsJSON states that data is encoded as JSON + EncodedAsJSON EncodedAs = "JSON" + + // EncodedAsXML states that data is encoded as Xml + EncodedAsXML EncodedAs = "XML" +) + +// Decoder defines the decoding method json.Decoder and xml.Decoder share +type Decoder interface { + Decode(v interface{}) error +} + +// NewDecoder creates a new decoder appropriate to the passed encoding. +// encodedAs specifies the type of encoding and r supplies the io.Reader containing the +// encoded data. +func NewDecoder(encodedAs EncodedAs, r io.Reader) Decoder { + if encodedAs == EncodedAsJSON { + return json.NewDecoder(r) + } else if encodedAs == EncodedAsXML { + return xml.NewDecoder(r) + } + return nil +} + +// CopyAndDecode decodes the data from the passed io.Reader while making a copy. Having a copy +// is especially useful if there is a chance the data will fail to decode. +// encodedAs specifies the expected encoding, r provides the io.Reader to the data, and v +// is the decoding destination. +func CopyAndDecode(encodedAs EncodedAs, r io.Reader, v interface{}) (bytes.Buffer, error) { + b := bytes.Buffer{} + return b, NewDecoder(encodedAs, io.TeeReader(r, &b)).Decode(v) +} + +// TeeReadCloser returns a ReadCloser that writes to w what it reads from rc. +// It utilizes io.TeeReader to copy the data read and has the same behavior when reading. +// Further, when it is closed, it ensures that rc is closed as well. +func TeeReadCloser(rc io.ReadCloser, w io.Writer) io.ReadCloser { + return &teeReadCloser{rc, io.TeeReader(rc, w)} +} + +type teeReadCloser struct { + rc io.ReadCloser + r io.Reader +} + +func (t *teeReadCloser) Read(p []byte) (int, error) { + return t.r.Read(p) +} + +func (t *teeReadCloser) Close() error { + return t.rc.Close() +} + +func containsInt(ints []int, n int) bool { + for _, i := range ints { + if i == n { + return true + } + } + return false +} + +func escapeValueStrings(m map[string]string) map[string]string { + for key, value := range m { + m[key] = url.QueryEscape(value) + } + return m +} + +func ensureValueStrings(mapOfInterface map[string]interface{}) map[string]string { + mapOfStrings := make(map[string]string) + for key, value := range mapOfInterface { + mapOfStrings[key] = ensureValueString(value) + } + return mapOfStrings +} + +func ensureValueString(value interface{}) string { + if value == nil { + return "" + } + switch v := value.(type) { + case string: + return v + case []byte: + return string(v) + default: + return fmt.Sprintf("%v", v) + } +} + +// MapToValues method converts map[string]interface{} to url.Values. +func MapToValues(m map[string]interface{}) url.Values { + v := url.Values{} + for key, value := range m { + x := reflect.ValueOf(value) + if x.Kind() == reflect.Array || x.Kind() == reflect.Slice { + for i := 0; i < x.Len(); i++ { + v.Add(key, ensureValueString(x.Index(i))) + } + } else { + v.Add(key, ensureValueString(value)) + } + } + return v +} + +// AsStringSlice method converts interface{} to []string. +// s must be of type slice or array or an error is returned. +// Each element of s will be converted to its string representation. +func AsStringSlice(s interface{}) ([]string, error) { + v := reflect.ValueOf(s) + if v.Kind() != reflect.Slice && v.Kind() != reflect.Array { + return nil, NewError("autorest", "AsStringSlice", "the value's type is not a slice or array.") + } + stringSlice := make([]string, 0, v.Len()) + + for i := 0; i < v.Len(); i++ { + stringSlice = append(stringSlice, fmt.Sprintf("%v", v.Index(i))) + } + return stringSlice, nil +} + +// String method converts interface v to string. If interface is a list, it +// joins list elements using the separator. Note that only sep[0] will be used for +// joining if any separator is specified. +func String(v interface{}, sep ...string) string { + if len(sep) == 0 { + return ensureValueString(v) + } + stringSlice, ok := v.([]string) + if ok == false { + var err error + stringSlice, err = AsStringSlice(v) + if err != nil { + panic(fmt.Sprintf("autorest: Couldn't convert value to a string %s.", err)) + } + } + return ensureValueString(strings.Join(stringSlice, sep[0])) +} + +// Encode method encodes url path and query parameters. +func Encode(location string, v interface{}, sep ...string) string { + s := String(v, sep...) + switch strings.ToLower(location) { + case "path": + return pathEscape(s) + case "query": + return queryEscape(s) + default: + return s + } +} + +func pathEscape(s string) string { + return strings.Replace(url.QueryEscape(s), "+", "%20", -1) +} + +func queryEscape(s string) string { + return url.QueryEscape(s) +} + +// ChangeToGet turns the specified http.Request into a GET (it assumes it wasn't). +// This is mainly useful for long-running operations that use the Azure-AsyncOperation +// header, so we change the initial PUT into a GET to retrieve the final result. +func ChangeToGet(req *http.Request) *http.Request { + req.Method = "GET" + req.Body = nil + req.ContentLength = 0 + req.Header.Del("Content-Length") + return req +} + +// IsTemporaryNetworkError returns true if the specified error is a temporary network error or false +// if it's not. If the error doesn't implement the net.Error interface the return value is true. +func IsTemporaryNetworkError(err error) bool { + if netErr, ok := err.(net.Error); !ok || (ok && netErr.Temporary()) { + return true + } + return false +} + +// DrainResponseBody reads the response body then closes it. +func DrainResponseBody(resp *http.Response) error { + if resp != nil && resp.Body != nil { + _, err := io.Copy(ioutil.Discard, resp.Body) + resp.Body.Close() + return err + } + return nil +} + +func setHeader(r *http.Request, key, value string) { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set(key, value) +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/utility_1.13.go b/vendor/github.com/Azure/go-autorest/autorest/utility_1.13.go new file mode 100644 index 0000000..4cb5e68 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/utility_1.13.go @@ -0,0 +1,29 @@ +// +build go1.13 + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package autorest + +import ( + "errors" + + "github.com/Azure/go-autorest/autorest/adal" +) + +// IsTokenRefreshError returns true if the specified error implements the TokenRefreshError interface. +func IsTokenRefreshError(err error) bool { + var tre adal.TokenRefreshError + return errors.As(err, &tre) +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/utility_legacy.go b/vendor/github.com/Azure/go-autorest/autorest/utility_legacy.go new file mode 100644 index 0000000..ebb51b4 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/utility_legacy.go @@ -0,0 +1,31 @@ +// +build !go1.13 + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package autorest + +import "github.com/Azure/go-autorest/autorest/adal" + +// IsTokenRefreshError returns true if the specified error implements the TokenRefreshError +// interface. If err is a DetailedError it will walk the chain of Original errors. +func IsTokenRefreshError(err error) bool { + if _, ok := err.(adal.TokenRefreshError); ok { + return true + } + if de, ok := err.(DetailedError); ok { + return IsTokenRefreshError(de.Original) + } + return false +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/version.go b/vendor/github.com/Azure/go-autorest/autorest/version.go new file mode 100644 index 0000000..713e235 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/version.go @@ -0,0 +1,41 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "fmt" + "runtime" +) + +const number = "v14.2.1" + +var ( + userAgent = fmt.Sprintf("Go/%s (%s-%s) go-autorest/%s", + runtime.Version(), + runtime.GOARCH, + runtime.GOOS, + number, + ) +) + +// UserAgent returns a string containing the Go version, system architecture and OS, and the go-autorest version. +func UserAgent() string { + return userAgent +} + +// Version returns the semantic version (see http://semver.org). +func Version() string { + return number +} diff --git a/vendor/github.com/Azure/go-autorest/azure-pipelines.yml b/vendor/github.com/Azure/go-autorest/azure-pipelines.yml new file mode 100644 index 0000000..6fb8404 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/azure-pipelines.yml @@ -0,0 +1,105 @@ +variables: + GOPATH: '$(system.defaultWorkingDirectory)/work' + sdkPath: '$(GOPATH)/src/github.com/$(build.repository.name)' + +jobs: + - job: 'goautorest' + displayName: 'Run go-autorest CI Checks' + + strategy: + matrix: + Linux_Go113: + vm.image: 'ubuntu-18.04' + go.version: '1.13' + Linux_Go114: + vm.image: 'ubuntu-18.04' + go.version: '1.14' + + pool: + vmImage: '$(vm.image)' + + steps: + - task: GoTool@0 + inputs: + version: '$(go.version)' + displayName: "Select Go Version" + + - script: | + set -e + mkdir -p '$(GOPATH)/bin' + mkdir -p '$(sdkPath)' + shopt -s extglob + mv !(work) '$(sdkPath)' + echo '##vso[task.prependpath]$(GOPATH)/bin' + displayName: 'Create Go Workspace' + + - script: | + set -e + curl -sSL https://raw.githubusercontent.com/golang/dep/master/install.sh | sh + dep ensure -v + go install ./vendor/golang.org/x/lint/golint + go get github.com/jstemmer/go-junit-report + go get github.com/axw/gocov/gocov + go get github.com/AlekSi/gocov-xml + go get -u github.com/matm/gocov-html + workingDirectory: '$(sdkPath)' + displayName: 'Install Dependencies' + + - script: | + go vet ./autorest/... + go vet ./logger/... + go vet ./tracing/... + workingDirectory: '$(sdkPath)' + displayName: 'Vet' + + - script: | + go build -v ./autorest/... + go build -v ./logger/... + go build -v ./tracing/... + workingDirectory: '$(sdkPath)' + displayName: 'Build' + + - script: | + set -e + go test -race -v -coverprofile=coverage.txt -covermode atomic ./autorest/... ./logger/... ./tracing/... 2>&1 | go-junit-report > report.xml + gocov convert coverage.txt > coverage.json + gocov-xml < coverage.json > coverage.xml + gocov-html < coverage.json > coverage.html + workingDirectory: '$(sdkPath)' + displayName: 'Run Tests' + + - script: grep -L -r --include *.go --exclude-dir vendor -P "Copyright (\d{4}|\(c\)) Microsoft" ./ | tee >&2 + workingDirectory: '$(sdkPath)' + displayName: 'Copyright Header Check' + failOnStderr: true + condition: succeededOrFailed() + + - script: | + gofmt -s -l -w ./autorest/. >&2 + gofmt -s -l -w ./logger/. >&2 + gofmt -s -l -w ./tracing/. >&2 + workingDirectory: '$(sdkPath)' + displayName: 'Format Check' + failOnStderr: true + condition: succeededOrFailed() + + - script: | + golint ./autorest/... >&2 + golint ./logger/... >&2 + golint ./tracing/... >&2 + workingDirectory: '$(sdkPath)' + displayName: 'Linter Check' + failOnStderr: true + condition: succeededOrFailed() + + - task: PublishTestResults@2 + inputs: + testRunner: JUnit + testResultsFiles: $(sdkPath)/report.xml + failTaskOnFailedTests: true + + - task: PublishCodeCoverageResults@1 + inputs: + codeCoverageTool: Cobertura + summaryFileLocation: $(sdkPath)/coverage.xml + additionalCodeCoverageFiles: $(sdkPath)/coverage.html diff --git a/vendor/github.com/Azure/go-autorest/doc.go b/vendor/github.com/Azure/go-autorest/doc.go new file mode 100644 index 0000000..99ae6ca --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/doc.go @@ -0,0 +1,18 @@ +/* +Package go-autorest provides an HTTP request client for use with Autorest-generated API client packages. +*/ +package go_autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. diff --git a/vendor/github.com/Azure/go-autorest/logger/LICENSE b/vendor/github.com/Azure/go-autorest/logger/LICENSE new file mode 100644 index 0000000..b9d6a27 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/logger/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Microsoft Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/Azure/go-autorest/logger/go.mod b/vendor/github.com/Azure/go-autorest/logger/go.mod new file mode 100644 index 0000000..bedeaee --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/logger/go.mod @@ -0,0 +1,5 @@ +module github.com/Azure/go-autorest/logger + +go 1.12 + +require github.com/Azure/go-autorest v14.2.0+incompatible diff --git a/vendor/github.com/Azure/go-autorest/logger/go.sum b/vendor/github.com/Azure/go-autorest/logger/go.sum new file mode 100644 index 0000000..1fc56a9 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/logger/go.sum @@ -0,0 +1,2 @@ +github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= +github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= diff --git a/vendor/github.com/Azure/go-autorest/logger/go_mod_tidy_hack.go b/vendor/github.com/Azure/go-autorest/logger/go_mod_tidy_hack.go new file mode 100644 index 0000000..0aa2768 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/logger/go_mod_tidy_hack.go @@ -0,0 +1,24 @@ +// +build modhack + +package logger + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file, and the github.com/Azure/go-autorest import, won't actually become part of +// the resultant binary. + +// Necessary for safely adding multi-module repo. +// See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository +import _ "github.com/Azure/go-autorest" diff --git a/vendor/github.com/Azure/go-autorest/logger/logger.go b/vendor/github.com/Azure/go-autorest/logger/logger.go new file mode 100644 index 0000000..2f5d8cc --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/logger/logger.go @@ -0,0 +1,337 @@ +package logger + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "os" + "strings" + "sync" + "time" +) + +// LevelType tells a logger the minimum level to log. When code reports a log entry, +// the LogLevel indicates the level of the log entry. The logger only records entries +// whose level is at least the level it was told to log. See the Log* constants. +// For example, if a logger is configured with LogError, then LogError, LogPanic, +// and LogFatal entries will be logged; lower level entries are ignored. +type LevelType uint32 + +const ( + // LogNone tells a logger not to log any entries passed to it. + LogNone LevelType = iota + + // LogFatal tells a logger to log all LogFatal entries passed to it. + LogFatal + + // LogPanic tells a logger to log all LogPanic and LogFatal entries passed to it. + LogPanic + + // LogError tells a logger to log all LogError, LogPanic and LogFatal entries passed to it. + LogError + + // LogWarning tells a logger to log all LogWarning, LogError, LogPanic and LogFatal entries passed to it. + LogWarning + + // LogInfo tells a logger to log all LogInfo, LogWarning, LogError, LogPanic and LogFatal entries passed to it. + LogInfo + + // LogDebug tells a logger to log all LogDebug, LogInfo, LogWarning, LogError, LogPanic and LogFatal entries passed to it. + LogDebug + + // LogAuth is a special case of LogDebug, it tells a logger to also log the body of an authentication request and response. + // NOTE: this can disclose sensitive information, use with care. + LogAuth +) + +const ( + logNone = "NONE" + logFatal = "FATAL" + logPanic = "PANIC" + logError = "ERROR" + logWarning = "WARNING" + logInfo = "INFO" + logDebug = "DEBUG" + logAuth = "AUTH" + logUnknown = "UNKNOWN" +) + +// ParseLevel converts the specified string into the corresponding LevelType. +func ParseLevel(s string) (lt LevelType, err error) { + switch strings.ToUpper(s) { + case logFatal: + lt = LogFatal + case logPanic: + lt = LogPanic + case logError: + lt = LogError + case logWarning: + lt = LogWarning + case logInfo: + lt = LogInfo + case logDebug: + lt = LogDebug + case logAuth: + lt = LogAuth + default: + err = fmt.Errorf("bad log level '%s'", s) + } + return +} + +// String implements the stringer interface for LevelType. +func (lt LevelType) String() string { + switch lt { + case LogNone: + return logNone + case LogFatal: + return logFatal + case LogPanic: + return logPanic + case LogError: + return logError + case LogWarning: + return logWarning + case LogInfo: + return logInfo + case LogDebug: + return logDebug + case LogAuth: + return logAuth + default: + return logUnknown + } +} + +// Filter defines functions for filtering HTTP request/response content. +type Filter struct { + // URL returns a potentially modified string representation of a request URL. + URL func(u *url.URL) string + + // Header returns a potentially modified set of values for the specified key. + // To completely exclude the header key/values return false. + Header func(key string, val []string) (bool, []string) + + // Body returns a potentially modified request/response body. + Body func(b []byte) []byte +} + +func (f Filter) processURL(u *url.URL) string { + if f.URL == nil { + return u.String() + } + return f.URL(u) +} + +func (f Filter) processHeader(k string, val []string) (bool, []string) { + if f.Header == nil { + return true, val + } + return f.Header(k, val) +} + +func (f Filter) processBody(b []byte) []byte { + if f.Body == nil { + return b + } + return f.Body(b) +} + +// Writer defines methods for writing to a logging facility. +type Writer interface { + // Writeln writes the specified message with the standard log entry header and new-line character. + Writeln(level LevelType, message string) + + // Writef writes the specified format specifier with the standard log entry header and no new-line character. + Writef(level LevelType, format string, a ...interface{}) + + // WriteRequest writes the specified HTTP request to the logger if the log level is greater than + // or equal to LogInfo. The request body, if set, is logged at level LogDebug or higher. + // Custom filters can be specified to exclude URL, header, and/or body content from the log. + // By default no request content is excluded. + WriteRequest(req *http.Request, filter Filter) + + // WriteResponse writes the specified HTTP response to the logger if the log level is greater than + // or equal to LogInfo. The response body, if set, is logged at level LogDebug or higher. + // Custom filters can be specified to exclude URL, header, and/or body content from the log. + // By default no response content is excluded. + WriteResponse(resp *http.Response, filter Filter) +} + +// Instance is the default log writer initialized during package init. +// This can be replaced with a custom implementation as required. +var Instance Writer + +// default log level +var logLevel = LogNone + +// Level returns the value specified in AZURE_GO_AUTOREST_LOG_LEVEL. +// If no value was specified the default value is LogNone. +// Custom loggers can call this to retrieve the configured log level. +func Level() LevelType { + return logLevel +} + +func init() { + // separated for testing purposes + initDefaultLogger() +} + +func initDefaultLogger() { + // init with nilLogger so callers don't have to do a nil check on Default + Instance = nilLogger{} + llStr := strings.ToLower(os.Getenv("AZURE_GO_SDK_LOG_LEVEL")) + if llStr == "" { + return + } + var err error + logLevel, err = ParseLevel(llStr) + if err != nil { + fmt.Fprintf(os.Stderr, "go-autorest: failed to parse log level: %s\n", err.Error()) + return + } + if logLevel == LogNone { + return + } + // default to stderr + dest := os.Stderr + lfStr := os.Getenv("AZURE_GO_SDK_LOG_FILE") + if strings.EqualFold(lfStr, "stdout") { + dest = os.Stdout + } else if lfStr != "" { + lf, err := os.Create(lfStr) + if err == nil { + dest = lf + } else { + fmt.Fprintf(os.Stderr, "go-autorest: failed to create log file, using stderr: %s\n", err.Error()) + } + } + Instance = fileLogger{ + logLevel: logLevel, + mu: &sync.Mutex{}, + logFile: dest, + } +} + +// the nil logger does nothing +type nilLogger struct{} + +func (nilLogger) Writeln(LevelType, string) {} + +func (nilLogger) Writef(LevelType, string, ...interface{}) {} + +func (nilLogger) WriteRequest(*http.Request, Filter) {} + +func (nilLogger) WriteResponse(*http.Response, Filter) {} + +// A File is used instead of a Logger so the stream can be flushed after every write. +type fileLogger struct { + logLevel LevelType + mu *sync.Mutex // for synchronizing writes to logFile + logFile *os.File +} + +func (fl fileLogger) Writeln(level LevelType, message string) { + fl.Writef(level, "%s\n", message) +} + +func (fl fileLogger) Writef(level LevelType, format string, a ...interface{}) { + if fl.logLevel >= level { + fl.mu.Lock() + defer fl.mu.Unlock() + fmt.Fprintf(fl.logFile, "%s %s", entryHeader(level), fmt.Sprintf(format, a...)) + fl.logFile.Sync() + } +} + +func (fl fileLogger) WriteRequest(req *http.Request, filter Filter) { + if req == nil || fl.logLevel < LogInfo { + return + } + b := &bytes.Buffer{} + fmt.Fprintf(b, "%s REQUEST: %s %s\n", entryHeader(LogInfo), req.Method, filter.processURL(req.URL)) + // dump headers + for k, v := range req.Header { + if ok, mv := filter.processHeader(k, v); ok { + fmt.Fprintf(b, "%s: %s\n", k, strings.Join(mv, ",")) + } + } + if fl.shouldLogBody(req.Header, req.Body) { + // dump body + body, err := ioutil.ReadAll(req.Body) + if err == nil { + fmt.Fprintln(b, string(filter.processBody(body))) + if nc, ok := req.Body.(io.Seeker); ok { + // rewind to the beginning + nc.Seek(0, io.SeekStart) + } else { + // recreate the body + req.Body = ioutil.NopCloser(bytes.NewReader(body)) + } + } else { + fmt.Fprintf(b, "failed to read body: %v\n", err) + } + } + fl.mu.Lock() + defer fl.mu.Unlock() + fmt.Fprint(fl.logFile, b.String()) + fl.logFile.Sync() +} + +func (fl fileLogger) WriteResponse(resp *http.Response, filter Filter) { + if resp == nil || fl.logLevel < LogInfo { + return + } + b := &bytes.Buffer{} + fmt.Fprintf(b, "%s RESPONSE: %d %s\n", entryHeader(LogInfo), resp.StatusCode, filter.processURL(resp.Request.URL)) + // dump headers + for k, v := range resp.Header { + if ok, mv := filter.processHeader(k, v); ok { + fmt.Fprintf(b, "%s: %s\n", k, strings.Join(mv, ",")) + } + } + if fl.shouldLogBody(resp.Header, resp.Body) { + // dump body + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + if err == nil { + fmt.Fprintln(b, string(filter.processBody(body))) + resp.Body = ioutil.NopCloser(bytes.NewReader(body)) + } else { + fmt.Fprintf(b, "failed to read body: %v\n", err) + } + } + fl.mu.Lock() + defer fl.mu.Unlock() + fmt.Fprint(fl.logFile, b.String()) + fl.logFile.Sync() +} + +// returns true if the provided body should be included in the log +func (fl fileLogger) shouldLogBody(header http.Header, body io.ReadCloser) bool { + ct := header.Get("Content-Type") + return fl.logLevel >= LogDebug && body != nil && !strings.Contains(ct, "application/octet-stream") +} + +// creates standard header for log entries, it contains a timestamp and the log level +func entryHeader(level LevelType) string { + // this format provides a fixed number of digits so the size of the timestamp is constant + return fmt.Sprintf("(%s) %s:", time.Now().Format("2006-01-02T15:04:05.0000000Z07:00"), level.String()) +} diff --git a/vendor/github.com/Azure/go-autorest/tracing/LICENSE b/vendor/github.com/Azure/go-autorest/tracing/LICENSE new file mode 100644 index 0000000..b9d6a27 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/tracing/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Microsoft Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/Azure/go-autorest/tracing/go.mod b/vendor/github.com/Azure/go-autorest/tracing/go.mod new file mode 100644 index 0000000..a2cdec7 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/tracing/go.mod @@ -0,0 +1,5 @@ +module github.com/Azure/go-autorest/tracing + +go 1.12 + +require github.com/Azure/go-autorest v14.2.0+incompatible diff --git a/vendor/github.com/Azure/go-autorest/tracing/go.sum b/vendor/github.com/Azure/go-autorest/tracing/go.sum new file mode 100644 index 0000000..1fc56a9 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/tracing/go.sum @@ -0,0 +1,2 @@ +github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= +github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= diff --git a/vendor/github.com/Azure/go-autorest/tracing/go_mod_tidy_hack.go b/vendor/github.com/Azure/go-autorest/tracing/go_mod_tidy_hack.go new file mode 100644 index 0000000..e163975 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/tracing/go_mod_tidy_hack.go @@ -0,0 +1,24 @@ +// +build modhack + +package tracing + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file, and the github.com/Azure/go-autorest import, won't actually become part of +// the resultant binary. + +// Necessary for safely adding multi-module repo. +// See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository +import _ "github.com/Azure/go-autorest" diff --git a/vendor/github.com/Azure/go-autorest/tracing/tracing.go b/vendor/github.com/Azure/go-autorest/tracing/tracing.go new file mode 100644 index 0000000..0e7a6e9 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/tracing/tracing.go @@ -0,0 +1,67 @@ +package tracing + +// Copyright 2018 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "context" + "net/http" +) + +// Tracer represents an HTTP tracing facility. +type Tracer interface { + NewTransport(base *http.Transport) http.RoundTripper + StartSpan(ctx context.Context, name string) context.Context + EndSpan(ctx context.Context, httpStatusCode int, err error) +} + +var ( + tracer Tracer +) + +// Register will register the provided Tracer. Pass nil to unregister a Tracer. +func Register(t Tracer) { + tracer = t +} + +// IsEnabled returns true if a Tracer has been registered. +func IsEnabled() bool { + return tracer != nil +} + +// NewTransport creates a new instrumenting http.RoundTripper for the +// registered Tracer. If no Tracer has been registered it returns nil. +func NewTransport(base *http.Transport) http.RoundTripper { + if tracer != nil { + return tracer.NewTransport(base) + } + return nil +} + +// StartSpan starts a trace span with the specified name, associating it with the +// provided context. Has no effect if a Tracer has not been registered. +func StartSpan(ctx context.Context, name string) context.Context { + if tracer != nil { + return tracer.StartSpan(ctx, name) + } + return ctx +} + +// EndSpan ends a previously started span stored in the context. +// Has no effect if a Tracer has not been registered. +func EndSpan(ctx context.Context, httpStatusCode int, err error) { + if tracer != nil { + tracer.EndSpan(ctx, httpStatusCode, err) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/client.go b/vendor/github.com/aws/aws-sdk-go/aws/client/client.go index 03334d6..74f35cc 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/client/client.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/client.go @@ -88,10 +88,6 @@ func (c *Client) NewRequest(operation *request.Operation, params interface{}, da // AddDebugHandlers injects debug logging handlers into the service to log request // debug information. func (c *Client) AddDebugHandlers() { - if !c.Config.LogLevel.AtLeast(aws.LogDebug) { - return - } - c.Handlers.Send.PushFrontNamed(LogHTTPRequestHandler) c.Handlers.Send.PushBackNamed(LogHTTPResponseHandler) } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go b/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go index 8958c32..1d774cf 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go @@ -53,6 +53,10 @@ var LogHTTPRequestHandler = request.NamedHandler{ } func logRequest(r *request.Request) { + if !r.Config.LogLevel.AtLeast(aws.LogDebug) { + return + } + logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody) bodySeekable := aws.IsReaderSeekable(r.Body) @@ -120,6 +124,10 @@ var LogHTTPResponseHandler = request.NamedHandler{ } func logResponse(r *request.Request) { + if !r.Config.LogLevel.AtLeast(aws.LogDebug) { + return + } + lw := &logWriter{r.Config.Logger, bytes.NewBuffer(nil)} if r.HTTPResponse == nil { diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go index e42c5cd..260a37c 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go @@ -95,7 +95,7 @@ import ( // StdinTokenProvider will prompt on stderr and read from stdin for a string value. // An error is returned if reading from stdin fails. // -// Use this function go read MFA tokens from stdin. The function makes no attempt +// Use this function to read MFA tokens from stdin. The function makes no attempt // to make atomic prompts from stdin across multiple gorouties. // // Using StdinTokenProvider with multiple AssumeRoleProviders, or Credentials will diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go index f472e55..5ac5691 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go @@ -21,6 +21,7 @@ const ( ApEast1RegionID = "ap-east-1" // Asia Pacific (Hong Kong). ApNortheast1RegionID = "ap-northeast-1" // Asia Pacific (Tokyo). ApNortheast2RegionID = "ap-northeast-2" // Asia Pacific (Seoul). + ApNortheast3RegionID = "ap-northeast-3" // Asia Pacific (Osaka). ApSouth1RegionID = "ap-south-1" // Asia Pacific (Mumbai). ApSoutheast1RegionID = "ap-southeast-1" // Asia Pacific (Singapore). ApSoutheast2RegionID = "ap-southeast-2" // Asia Pacific (Sydney). @@ -121,6 +122,9 @@ var awsPartition = partition{ "ap-northeast-2": region{ Description: "Asia Pacific (Seoul)", }, + "ap-northeast-3": region{ + Description: "Asia Pacific (Osaka)", + }, "ap-south-1": region{ Description: "Asia Pacific (Mumbai)", }, @@ -184,6 +188,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -239,6 +244,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -361,12 +367,13 @@ var awsPartition = partition{ "amplifybackend": service{ Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, - "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "us-east-1": endpoint{}, @@ -452,6 +459,12 @@ var awsPartition = partition{ Region: "ap-northeast-2", }, }, + "ap-northeast-3": endpoint{ + Hostname: "api.ecr.ap-northeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-3", + }, + }, "ap-south-1": endpoint{ Hostname: "api.ecr.ap-south-1.amazonaws.com", CredentialScope: credentialScope{ @@ -706,6 +719,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -730,6 +744,7 @@ var awsPartition = partition{ "ap-northeast-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-2": endpoint{}, "us-east-1": endpoint{}, @@ -765,6 +780,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -915,6 +931,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -967,6 +984,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -988,9 +1006,11 @@ var awsPartition = partition{ "batch": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -1120,6 +1140,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -1354,6 +1375,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -1385,6 +1407,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -1708,6 +1731,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -1756,6 +1780,7 @@ var awsPartition = partition{ "ap-northeast-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-2": endpoint{}, "us-east-1": endpoint{}, @@ -1765,7 +1790,11 @@ var awsPartition = partition{ "contact-lens": service{ Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-2": endpoint{}, "us-east-1": endpoint{}, "us-west-2": endpoint{}, }, @@ -1902,6 +1931,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -1963,6 +1993,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -2140,6 +2171,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -2202,6 +2234,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -2259,6 +2292,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -2325,6 +2359,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -2376,6 +2411,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -2425,6 +2461,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -2456,6 +2493,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -2505,6 +2543,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -2539,6 +2578,12 @@ var awsPartition = partition{ Region: "ap-northeast-2", }, }, + "fips-ap-northeast-3": endpoint{ + Hostname: "elasticfilesystem-fips.ap-northeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-3", + }, + }, "fips-ap-south-1": endpoint{ Hostname: "elasticfilesystem-fips.ap-south-1.amazonaws.com", CredentialScope: credentialScope{ @@ -2652,6 +2697,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -2704,6 +2750,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -2783,9 +2830,19 @@ var awsPartition = partition{ "emr-containers": service{ Endpoints: endpoints{ - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "entitlement.marketplace": service{ @@ -2805,6 +2862,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -2836,6 +2894,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -2885,6 +2944,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -3156,6 +3216,8 @@ var awsPartition = partition{ "gamelift": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -3163,8 +3225,12 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -3181,6 +3247,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -3236,6 +3303,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -3302,8 +3370,15 @@ var awsPartition = partition{ Endpoints: endpoints{ "af-south-1": endpoint{}, "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "groundstation-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, "fips-us-east-2": endpoint{ Hostname: "groundstation-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ @@ -3317,6 +3392,7 @@ var awsPartition = partition{ }, }, "me-south-1": endpoint{}, + "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-2": endpoint{}, }, @@ -3331,6 +3407,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -3696,6 +3773,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -3789,6 +3867,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -3847,11 +3926,12 @@ var awsPartition = partition{ Region: "us-west-2", }, }, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "lambda": service{ @@ -3861,6 +3941,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -3910,6 +3991,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -3977,6 +4059,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -4019,6 +4102,14 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, + "lookoutequipment": service{ + + Endpoints: endpoints{ + "ap-northeast-2": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + }, + }, "lookoutvision": service{ Endpoints: endpoints{ @@ -4064,6 +4155,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -4276,6 +4368,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -4350,6 +4443,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -4398,6 +4492,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -4754,6 +4849,22 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, + "personalize": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "pinpoint": service{ Defaults: endpoint{ CredentialScope: credentialScope{ @@ -4957,12 +5068,42 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "fips-ca-central-1": endpoint{ + Hostname: "ram-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "ram-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "ram-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "ram-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "ram-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "rds": service{ @@ -4972,6 +5113,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -5029,6 +5171,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -5132,6 +5275,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -5327,6 +5471,90 @@ var awsPartition = partition{ DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}", }, Endpoints: endpoints{ + "accesspoint-af-south-1": endpoint{ + Hostname: "s3-accesspoint.af-south-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + }, + "accesspoint-ap-east-1": endpoint{ + Hostname: "s3-accesspoint.ap-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + }, + "accesspoint-ap-northeast-1": endpoint{ + Hostname: "s3-accesspoint.ap-northeast-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + }, + "accesspoint-ap-northeast-2": endpoint{ + Hostname: "s3-accesspoint.ap-northeast-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + }, + "accesspoint-ap-northeast-3": endpoint{ + Hostname: "s3-accesspoint.ap-northeast-3.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + }, + "accesspoint-ap-south-1": endpoint{ + Hostname: "s3-accesspoint.ap-south-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + }, + "accesspoint-ap-southeast-1": endpoint{ + Hostname: "s3-accesspoint.ap-southeast-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + }, + "accesspoint-ap-southeast-2": endpoint{ + Hostname: "s3-accesspoint.ap-southeast-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + }, + "accesspoint-ca-central-1": endpoint{ + Hostname: "s3-accesspoint.ca-central-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + }, + "accesspoint-eu-central-1": endpoint{ + Hostname: "s3-accesspoint.eu-central-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + }, + "accesspoint-eu-north-1": endpoint{ + Hostname: "s3-accesspoint.eu-north-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + }, + "accesspoint-eu-south-1": endpoint{ + Hostname: "s3-accesspoint.eu-south-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + }, + "accesspoint-eu-west-1": endpoint{ + Hostname: "s3-accesspoint.eu-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + }, + "accesspoint-eu-west-2": endpoint{ + Hostname: "s3-accesspoint.eu-west-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + }, + "accesspoint-eu-west-3": endpoint{ + Hostname: "s3-accesspoint.eu-west-3.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + }, + "accesspoint-me-south-1": endpoint{ + Hostname: "s3-accesspoint.me-south-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + }, + "accesspoint-sa-east-1": endpoint{ + Hostname: "s3-accesspoint.sa-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + }, + "accesspoint-us-east-1": endpoint{ + Hostname: "s3-accesspoint.us-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + }, + "accesspoint-us-east-2": endpoint{ + Hostname: "s3-accesspoint.us-east-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + }, + "accesspoint-us-west-1": endpoint{ + Hostname: "s3-accesspoint.us-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + }, + "accesspoint-us-west-2": endpoint{ + Hostname: "s3-accesspoint.us-west-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + }, "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{ @@ -5334,6 +5562,7 @@ var awsPartition = partition{ SignatureVersions: []string{"s3", "s3v4"}, }, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{ Hostname: "s3.ap-southeast-1.amazonaws.com", @@ -5358,8 +5587,28 @@ var awsPartition = partition{ Hostname: "s3.eu-west-1.amazonaws.com", SignatureVersions: []string{"s3", "s3v4"}, }, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-accesspoint-ca-central-1": endpoint{ + Hostname: "s3-accesspoint-fips.ca-central-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + }, + "fips-accesspoint-us-east-1": endpoint{ + Hostname: "s3-accesspoint-fips.us-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + }, + "fips-accesspoint-us-east-2": endpoint{ + Hostname: "s3-accesspoint-fips.us-east-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + }, + "fips-accesspoint-us-west-1": endpoint{ + Hostname: "s3-accesspoint-fips.us-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + }, + "fips-accesspoint-us-west-2": endpoint{ + Hostname: "s3-accesspoint-fips.us-west-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + }, "me-south-1": endpoint{}, "s3-external-1": endpoint{ Hostname: "s3-external-1.amazonaws.com", @@ -5410,6 +5659,13 @@ var awsPartition = partition{ Region: "ap-northeast-2", }, }, + "ap-northeast-3": endpoint{ + Hostname: "s3-control.ap-northeast-3.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-northeast-3", + }, + }, "ap-south-1": endpoint{ Hostname: "s3-control.ap-south-1.amazonaws.com", SignatureVersions: []string{"s3v4"}, @@ -5605,6 +5861,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -5654,6 +5911,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -5846,6 +6104,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -5956,6 +6215,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -6078,6 +6338,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -6130,6 +6391,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -6166,12 +6428,10 @@ var awsPartition = partition{ }, "me-south-1": endpoint{}, "sa-east-1": endpoint{}, - "us-east-1": endpoint{ - SSLCommonName: "queue.{dnsSuffix}", - }, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "ssm": service{ @@ -6181,6 +6441,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -6236,6 +6497,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -6381,6 +6643,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -6448,6 +6711,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -6497,6 +6761,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -6582,6 +6847,8 @@ var awsPartition = partition{ "transfer": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -6623,11 +6890,12 @@ var awsPartition = partition{ Region: "us-west-2", }, }, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "translate": service{ @@ -6973,6 +7241,7 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, @@ -7003,6 +7272,7 @@ var awsPartition = partition{ "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-northeast-3": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, @@ -7573,7 +7843,8 @@ var awscnPartition = partition{ "lakeformation": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, }, }, "lambda": service{ @@ -7617,6 +7888,13 @@ var awscnPartition = partition{ "cn-northwest-1": endpoint{}, }, }, + "mq": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, "neptune": service{ Endpoints: endpoints{ @@ -7641,6 +7919,12 @@ var awscnPartition = partition{ }, }, }, + "personalize": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, "polly": service{ Endpoints: endpoints{ @@ -7704,6 +7988,14 @@ var awscnPartition = partition{ DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}", }, Endpoints: endpoints{ + "accesspoint-cn-north-1": endpoint{ + Hostname: "s3-accesspoint.cn-north-1.amazonaws.com.cn", + SignatureVersions: []string{"s3v4"}, + }, + "accesspoint-cn-northwest-1": endpoint{ + Hostname: "s3-accesspoint.cn-northwest-1.amazonaws.com.cn", + SignatureVersions: []string{"s3v4"}, + }, "cn-north-1": endpoint{}, "cn-northwest-1": endpoint{}, }, @@ -7997,6 +8289,27 @@ var awsusgovPartition = partition{ "us-gov-west-1": endpoint{}, }, }, + "api.detective": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-east-1-fips": endpoint{ + Hostname: "api.detective-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{}, + "us-gov-west-1-fips": endpoint{ + Hostname: "api.detective-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, "api.ecr": service{ Endpoints: endpoints{ @@ -8150,18 +8463,6 @@ var awsusgovPartition = partition{ "batch": service{ Endpoints: endpoints{ - "fips-us-gov-east-1": endpoint{ - Hostname: "batch.us-gov-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-east-1", - }, - }, - "fips-us-gov-west-1": endpoint{ - Hostname: "batch.us-gov-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-gov-west-1", - }, - }, "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{}, }, @@ -8686,6 +8987,27 @@ var awsusgovPartition = partition{ "us-gov-west-1": endpoint{}, }, }, + "fms": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "fms-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "fms-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, "fsx": service{ Endpoints: endpoints{ @@ -9126,8 +9448,18 @@ var awsusgovPartition = partition{ "ram": service{ Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, + "us-gov-east-1": endpoint{ + Hostname: "ram.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "ram.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, }, }, "rds": service{ @@ -9259,6 +9591,22 @@ var awsusgovPartition = partition{ DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}", }, Endpoints: endpoints{ + "accesspoint-us-gov-east-1": endpoint{ + Hostname: "s3-accesspoint.us-gov-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + }, + "accesspoint-us-gov-west-1": endpoint{ + Hostname: "s3-accesspoint.us-gov-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + }, + "fips-accesspoint-us-gov-east-1": endpoint{ + Hostname: "s3-accesspoint-fips.us-gov-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + }, + "fips-accesspoint-us-gov-west-1": endpoint{ + Hostname: "s3-accesspoint-fips.us-gov-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + }, "fips-us-gov-west-1": endpoint{ Hostname: "s3-fips.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ @@ -9392,6 +9740,27 @@ var awsusgovPartition = partition{ }, }, }, + "servicequotas": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "servicequotas.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "servicequotas.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, "sms": service{ Endpoints: endpoints{ @@ -9911,6 +10280,12 @@ var awsisoPartition = partition{ "us-iso-east-1": endpoint{}, }, }, + "firehose": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, "glacier": service{ Endpoints: endpoints{ @@ -10381,6 +10756,19 @@ var awsisobPartition = partition{ "us-isob-east-1": endpoint{}, }, }, + "route53": service{ + PartitionEndpoint: "aws-iso-b-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-iso-b-global": endpoint{ + Hostname: "route53.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + }, + }, + }, "s3": service{ Defaults: endpoint{ Protocols: []string{"http", "https"}, diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go index d71f7b3..1737c26 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go @@ -689,9 +689,12 @@ func (ctx *signingCtx) buildBodyDigest() error { if hash == "" { includeSHA256Header := ctx.unsignedPayload || ctx.ServiceName == "s3" || + ctx.ServiceName == "s3-object-lambda" || ctx.ServiceName == "glacier" - s3Presign := ctx.isPresign && ctx.ServiceName == "s3" + s3Presign := ctx.isPresign && + (ctx.ServiceName == "s3" || + ctx.ServiceName == "s3-object-lambda") if ctx.unsignedPayload || s3Presign { hash = "UNSIGNED-PAYLOAD" diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go index 9933b43..ed87158 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/version.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.37.11" +const SDKVersion = "1.38.21" diff --git a/vendor/github.com/aws/aws-sdk-go/internal/s3shared/arn/arn.go b/vendor/github.com/aws/aws-sdk-go/internal/s3shared/arn/arn.go index 7a8e46f..3079e4a 100644 --- a/vendor/github.com/aws/aws-sdk-go/internal/s3shared/arn/arn.go +++ b/vendor/github.com/aws/aws-sdk-go/internal/s3shared/arn/arn.go @@ -7,6 +7,21 @@ import ( "github.com/aws/aws-sdk-go/aws/arn" ) +var supportedServiceARN = []string{ + "s3", + "s3-outposts", + "s3-object-lambda", +} + +func isSupportedServiceARN(service string) bool { + for _, name := range supportedServiceARN { + if name == service { + return true + } + } + return false +} + // Resource provides the interfaces abstracting ARNs of specific resource // types. type Resource interface { @@ -29,9 +44,10 @@ func ParseResource(s string, resParser ResourceParser) (resARN Resource, err err return nil, InvalidARNError{ARN: a, Reason: "partition not set"} } - if a.Service != "s3" && a.Service != "s3-outposts" { + if !isSupportedServiceARN(a.Service) { return nil, InvalidARNError{ARN: a, Reason: "service is not supported"} } + if len(a.Resource) == 0 { return nil, InvalidARNError{ARN: a, Reason: "resource not set"} } diff --git a/vendor/github.com/aws/aws-sdk-go/internal/s3shared/arn/s3_object_lambda_arn.go b/vendor/github.com/aws/aws-sdk-go/internal/s3shared/arn/s3_object_lambda_arn.go new file mode 100644 index 0000000..513154c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/s3shared/arn/s3_object_lambda_arn.go @@ -0,0 +1,15 @@ +package arn + +// S3ObjectLambdaARN represents an ARN for the s3-object-lambda service +type S3ObjectLambdaARN interface { + Resource + + isS3ObjectLambdasARN() +} + +// S3ObjectLambdaAccessPointARN is an S3ObjectLambdaARN for the Access Point resource type +type S3ObjectLambdaAccessPointARN struct { + AccessPointARN +} + +func (s S3ObjectLambdaAccessPointARN) isS3ObjectLambdasARN() {} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/api.go b/vendor/github.com/aws/aws-sdk-go/service/s3/api.go index 89a0a29..6d15bad 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/api.go @@ -14,6 +14,7 @@ import ( "github.com/aws/aws-sdk-go/aws/awsutil" "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/internal/s3shared/arn" "github.com/aws/aws-sdk-go/private/checksum" "github.com/aws/aws-sdk-go/private/protocol" @@ -67,7 +68,7 @@ func (c *S3) AbortMultipartUploadRequest(input *AbortMultipartUploadInput) (req // AbortMultipartUpload API operation for Amazon Simple Storage Service. // -// This operation aborts a multipart upload. After a multipart upload is aborted, +// This action aborts a multipart upload. After a multipart upload is aborted, // no additional parts can be uploaded using that upload ID. The storage consumed // by any previously uploaded parts will be freed. However, if any part uploads // are currently in progress, those part uploads might or might not succeed. @@ -76,10 +77,10 @@ func (c *S3) AbortMultipartUploadRequest(input *AbortMultipartUploadInput) (req // // To verify that all parts have been removed, so you don't get charged for // the part storage, you should call the ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) -// operation and ensure that the parts list is empty. +// action and ensure that the parts list is empty. // -// For information about permissions required to use the multipart upload API, -// see Multipart Upload API and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). +// For information about permissions required to use the multipart upload, see +// Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). // // The following operations are related to AbortMultipartUpload: // @@ -175,10 +176,10 @@ func (c *S3) CompleteMultipartUploadRequest(input *CompleteMultipartUploadInput) // You first initiate the multipart upload and then upload all parts using the // UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) // operation. After successfully uploading all relevant parts of an upload, -// you call this operation to complete the upload. Upon receiving this request, +// you call this action to complete the upload. Upon receiving this request, // Amazon S3 concatenates all the parts in ascending order by part number to // create a new object. In the Complete Multipart Upload request, you must provide -// the parts list. You must ensure that the parts list is complete. This operation +// the parts list. You must ensure that the parts list is complete. This action // concatenates the parts that you provide in the list. For each part in the // list, you must provide the part number and the ETag value, returned after // that part was uploaded. @@ -199,7 +200,7 @@ func (c *S3) CompleteMultipartUploadRequest(input *CompleteMultipartUploadInput) // Multipart Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html). // // For information about permissions required to use the multipart upload API, -// see Multipart Upload API and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). +// see Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). // // CompleteMultipartUpload has the following special errors: // @@ -306,10 +307,10 @@ func (c *S3) CopyObjectRequest(input *CopyObjectInput) (req *request.Request, ou // Creates a copy of an object that is already stored in Amazon S3. // // You can store individual objects of up to 5 TB in Amazon S3. You create a -// copy of your object up to 5 GB in size in a single atomic operation using -// this API. However, to copy an object greater than 5 GB, you must use the -// multipart upload Upload Part - Copy API. For more information, see Copy Object -// Using the REST Multipart Upload API (https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjctsUsingRESTMPUapi.html). +// copy of your object up to 5 GB in size in a single atomic action using this +// API. However, to copy an object greater than 5 GB, you must use the multipart +// upload Upload Part - Copy API. For more information, see Copy Object Using +// the REST Multipart Upload API (https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjctsUsingRESTMPUapi.html). // // All copy requests must be authenticated. Additionally, you must have read // access to the source object and write access to the destination bucket. For @@ -319,7 +320,7 @@ func (c *S3) CopyObjectRequest(input *CopyObjectInput) (req *request.Request, ou // // A copy request might return an error when Amazon S3 receives the copy request // or while Amazon S3 is copying the files. If the error occurs before the copy -// operation starts, you receive a standard Amazon S3 error. If the error occurs +// action starts, you receive a standard Amazon S3 error. If the error occurs // during the copy operation, the error response is embedded in the 200 OK response. // This means that a 200 OK response can contain either a success or an error. // Design your application to parse the contents of the response and handle @@ -334,7 +335,7 @@ func (c *S3) CopyObjectRequest(input *CopyObjectInput) (req *request.Request, ou // // The copy request charge is based on the storage class and Region that you // specify for the destination object. For pricing information, see Amazon S3 -// pricing (https://aws.amazon.com/s3/pricing/). +// pricing (http://aws.amazon.com/s3/pricing/). // // Amazon S3 transfer acceleration does not support cross-Region copies. If // you request a cross-Region copy using a transfer acceleration endpoint, you @@ -404,7 +405,7 @@ func (c *S3) CopyObjectRequest(input *CopyObjectInput) (req *request.Request, ou // // If a target object uses SSE-KMS, you can enable an S3 Bucket Key for the // object. For more information, see Amazon S3 Bucket Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html) -// in the Amazon Simple Storage Service Developer Guide. +// in the Amazon S3 User Guide. // // Access Control List (ACL)-Specific Request Headers // @@ -418,7 +419,7 @@ func (c *S3) CopyObjectRequest(input *CopyObjectInput) (req *request.Request, ou // // Storage Class Options // -// You can use the CopyObject operation to change the storage class of an object +// You can use the CopyObject action to change the storage class of an object // that is already stored in Amazon S3 using the StorageClass parameter. For // more information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) // in the Amazon S3 Service Developer Guide. @@ -459,8 +460,8 @@ func (c *S3) CopyObjectRequest(input *CopyObjectInput) (req *request.Request, ou // // Returned Error Codes: // * ErrCodeObjectNotInActiveTierError "ObjectNotInActiveTierError" -// The source object of the COPY operation is not in the active tier and is -// only stored in Amazon S3 Glacier. +// The source object of the COPY action is not in the active tier and is only +// stored in Amazon S3 Glacier. // // See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyObject func (c *S3) CopyObject(input *CopyObjectInput) (*CopyObjectOutput, error) { @@ -678,10 +679,10 @@ func (c *S3) CreateMultipartUploadRequest(input *CreateMultipartUploadInput) (re // CreateMultipartUpload API operation for Amazon Simple Storage Service. // -// This operation initiates a multipart upload and returns an upload ID. This -// upload ID is used to associate all of the parts in the specific multipart -// upload. You specify this upload ID in each of your subsequent upload part -// requests (see UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html)). +// This action initiates a multipart upload and returns an upload ID. This upload +// ID is used to associate all of the parts in the specific multipart upload. +// You specify this upload ID in each of your subsequent upload part requests +// (see UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html)). // You also include this upload ID in the final request to either complete or // abort the multipart upload request. // @@ -691,12 +692,12 @@ func (c *S3) CreateMultipartUploadRequest(input *CreateMultipartUploadInput) (re // If you have configured a lifecycle rule to abort incomplete multipart uploads, // the upload must complete within the number of days specified in the bucket // lifecycle configuration. Otherwise, the incomplete multipart upload becomes -// eligible for an abort operation and Amazon S3 aborts the multipart upload. -// For more information, see Aborting Incomplete Multipart Uploads Using a Bucket +// eligible for an abort action and Amazon S3 aborts the multipart upload. For +// more information, see Aborting Incomplete Multipart Uploads Using a Bucket // Lifecycle Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config). // // For information about the permissions required to use the multipart upload -// API, see Multipart Upload API and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). +// API, see Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). // // For request signing, multipart upload is just a series of regular requests. // You initiate a multipart upload, send one or more requests to upload parts, @@ -716,7 +717,7 @@ func (c *S3) CreateMultipartUploadRequest(input *CreateMultipartUploadInput) (re // and decrypts it when you access it. You can provide your own encryption key, // or use AWS Key Management Service (AWS KMS) customer master keys (CMKs) or // Amazon S3-managed encryption keys. If you choose to provide your own encryption -// key, the request headers you provide in UploadPart (AmazonS3/latest/API/API_UploadPart.html) +// key, the request headers you provide in UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) // and UploadPartCopy (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html) // requests must match the headers you used in the request to initiate the upload // by using CreateMultipartUpload. @@ -989,8 +990,8 @@ func (c *S3) DeleteBucketAnalyticsConfigurationRequest(input *DeleteBucketAnalyt // To use this operation, you must have permissions to perform the s3:PutAnalyticsConfiguration // action. The bucket owner has this permission by default. The bucket owner // can grant this permission to others. For more information about permissions, -// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). // // For information about the Amazon S3 analytics feature, see Amazon S3 Analytics // – Storage Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html). @@ -1083,7 +1084,7 @@ func (c *S3) DeleteBucketCorsRequest(input *DeleteBucketCorsInput) (req *request // permission to others. // // For information about cors, see Enabling Cross-Origin Resource Sharing (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) -// in the Amazon Simple Storage Service Developer Guide. +// in the Amazon S3 User Guide. // // Related Resources: // @@ -1164,17 +1165,17 @@ func (c *S3) DeleteBucketEncryptionRequest(input *DeleteBucketEncryptionInput) ( // DeleteBucketEncryption API operation for Amazon Simple Storage Service. // -// This implementation of the DELETE operation removes default encryption from +// This implementation of the DELETE action removes default encryption from // the bucket. For information about the Amazon S3 default encryption feature, // see Amazon S3 Default Bucket Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html) -// in the Amazon Simple Storage Service Developer Guide. +// in the Amazon S3 User Guide. // // To use this operation, you must have permissions to perform the s3:PutEncryptionConfiguration // action. The bucket owner has this permission by default. The bucket owner // can grant this permission to others. For more information about permissions, -// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) -// in the Amazon Simple Storage Service Developer Guide. +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) +// in the Amazon S3 User Guide. // // Related Resources // @@ -1361,8 +1362,8 @@ func (c *S3) DeleteBucketInventoryConfigurationRequest(input *DeleteBucketInvent // To use this operation, you must have permissions to perform the s3:PutInventoryConfiguration // action. The bucket owner has this permission by default. The bucket owner // can grant this permission to others. For more information about permissions, -// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). // // For information about the Amazon S3 inventory feature, see Amazon S3 Inventory // (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html). @@ -1550,8 +1551,8 @@ func (c *S3) DeleteBucketMetricsConfigurationRequest(input *DeleteBucketMetricsC // To use this operation, you must have permissions to perform the s3:PutMetricsConfiguration // action. The bucket owner has this permission by default. The bucket owner // can grant this permission to others. For more information about permissions, -// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). // // For information about CloudWatch request metrics for Amazon S3, see Monitoring // Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html). @@ -1725,9 +1726,9 @@ func (c *S3) DeleteBucketPolicyRequest(input *DeleteBucketPolicyInput) (req *req // DeleteBucketPolicy API operation for Amazon Simple Storage Service. // -// This implementation of the DELETE operation uses the policy subresource to -// delete the policy of a specified bucket. If you are using an identity other -// than the root user of the AWS account that owns the bucket, the calling identity +// This implementation of the DELETE action uses the policy subresource to delete +// the policy of a specified bucket. If you are using an identity other than +// the root user of the AWS account that owns the bucket, the calling identity // must have the DeleteBucketPolicy permissions on the specified bucket and // belong to the bucket owner's account to use this operation. // @@ -1827,8 +1828,8 @@ func (c *S3) DeleteBucketReplicationRequest(input *DeleteBucketReplicationInput) // To use this operation, you must have permissions to perform the s3:PutReplicationConfiguration // action. The bucket owner has these permissions by default and can grant it // to others. For more information about permissions, see Permissions Related -// to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). // // It can take a while for the deletion of a replication configuration to fully // propagate. @@ -2000,15 +2001,15 @@ func (c *S3) DeleteBucketWebsiteRequest(input *DeleteBucketWebsiteInput) (req *r // DeleteBucketWebsite API operation for Amazon Simple Storage Service. // -// This operation removes the website configuration for a bucket. Amazon S3 -// returns a 200 OK response upon successfully deleting a website configuration -// on the specified bucket. You will get a 200 OK response if the website configuration +// This action removes the website configuration for a bucket. Amazon S3 returns +// a 200 OK response upon successfully deleting a website configuration on the +// specified bucket. You will get a 200 OK response if the website configuration // you are trying to delete does not exist on the bucket. Amazon S3 returns // a 404 response if the bucket specified in the request does not exist. // -// This DELETE operation requires the S3:DeleteBucketWebsite permission. By -// default, only the bucket owner can delete the website configuration attached -// to a bucket. However, bucket owners can grant other users permission to delete +// This DELETE action requires the S3:DeleteBucketWebsite permission. By default, +// only the bucket owner can delete the website configuration attached to a +// bucket. However, bucket owners can grant other users permission to delete // the website configuration by writing a bucket policy granting them the S3:DeleteBucketWebsite // permission. // @@ -2095,7 +2096,8 @@ func (c *S3) DeleteObjectRequest(input *DeleteObjectInput) (req *request.Request // // Removes the null version (if there is one) of an object and inserts a delete // marker, which becomes the latest version of the object. If there isn't a -// null version, Amazon S3 does not remove any objects. +// null version, Amazon S3 does not remove any objects but will still respond +// that the command was successful. // // To remove a specific version, you must be the bucket owner and you must use // the version Id subresource. Using this subresource permanently deletes the @@ -2110,14 +2112,14 @@ func (c *S3) DeleteObjectRequest(input *DeleteObjectInput) (req *request.Request // For more information about MFA Delete, see Using MFA Delete (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMFADelete.html). // To see sample requests that use versioning, see Sample Request (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectDELETE.html#ExampleVersionObjectDelete). // -// You can delete objects by explicitly calling the DELETE Object API or configure -// its lifecycle (PutBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html)) +// You can delete objects by explicitly calling DELETE Object or configure its +// lifecycle (PutBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html)) // to enable Amazon S3 to remove them for you. If you want to block users or // accounts from removing or deleting objects from your bucket, you must deny // them the s3:DeleteObject, s3:DeleteObjectVersion, and s3:PutLifeCycleConfiguration // actions. // -// The following operation is related to DeleteObject: +// The following action is related to DeleteObject: // // * PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) // @@ -2285,27 +2287,27 @@ func (c *S3) DeleteObjectsRequest(input *DeleteObjectsInput) (req *request.Reque // DeleteObjects API operation for Amazon Simple Storage Service. // -// This operation enables you to delete multiple objects from a bucket using -// a single HTTP request. If you know the object keys that you want to delete, -// then this operation provides a suitable alternative to sending individual -// delete requests, reducing per-request overhead. +// This action enables you to delete multiple objects from a bucket using a +// single HTTP request. If you know the object keys that you want to delete, +// then this action provides a suitable alternative to sending individual delete +// requests, reducing per-request overhead. // // The request contains a list of up to 1000 keys that you want to delete. In // the XML, you provide the object key names, and optionally, version IDs if // you want to delete a specific version of the object from a versioning-enabled -// bucket. For each key, Amazon S3 performs a delete operation and returns the +// bucket. For each key, Amazon S3 performs a delete action and returns the // result of that delete, success, or failure, in the response. Note that if // the object specified in the request is not found, Amazon S3 returns the result // as deleted. // -// The operation supports two modes for the response: verbose and quiet. By -// default, the operation uses verbose mode in which the response includes the -// result of deletion of each key in your request. In quiet mode the response -// includes only keys where the delete operation encountered an error. For a -// successful deletion, the operation does not return any information about -// the delete in the response body. +// The action supports two modes for the response: verbose and quiet. By default, +// the action uses verbose mode in which the response includes the result of +// deletion of each key in your request. In quiet mode the response includes +// only keys where the delete action encountered an error. For a successful +// deletion, the action does not return any information about the delete in +// the response body. // -// When performing this operation on an MFA Delete enabled bucket, that attempts +// When performing this action on an MFA Delete enabled bucket, that attempts // to delete any versioned objects, you must include an MFA token. If you do // not provide one, the entire request will fail, even if there are non-versioned // objects you are trying to delete. If you provide an invalid token, whether @@ -2404,8 +2406,8 @@ func (c *S3) DeletePublicAccessBlockRequest(input *DeletePublicAccessBlockInput) // Removes the PublicAccessBlock configuration for an Amazon S3 bucket. To use // this operation, you must have the s3:PutBucketPublicAccessBlock permission. // For more information about permissions, see Permissions Related to Bucket -// Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). // // The following operations are related to DeletePublicAccessBlock: // @@ -2489,17 +2491,17 @@ func (c *S3) GetBucketAccelerateConfigurationRequest(input *GetBucketAccelerateC // GetBucketAccelerateConfiguration API operation for Amazon Simple Storage Service. // -// This implementation of the GET operation uses the accelerate subresource -// to return the Transfer Acceleration state of a bucket, which is either Enabled +// This implementation of the GET action uses the accelerate subresource to +// return the Transfer Acceleration state of a bucket, which is either Enabled // or Suspended. Amazon S3 Transfer Acceleration is a bucket-level feature that // enables you to perform faster data transfers to and from Amazon S3. // // To use this operation, you must have permission to perform the s3:GetAccelerateConfiguration // action. The bucket owner has this permission by default. The bucket owner // can grant this permission to others. For more information about permissions, -// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) -// in the Amazon Simple Storage Service Developer Guide. +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) +// in the Amazon S3 User Guide. // // You set the Transfer Acceleration state of an existing bucket to Enabled // or Suspended by using the PutBucketAccelerateConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAccelerateConfiguration.html) @@ -2511,7 +2513,7 @@ func (c *S3) GetBucketAccelerateConfigurationRequest(input *GetBucketAccelerateC // // For more information about transfer acceleration, see Transfer Acceleration // (https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html) -// in the Amazon Simple Storage Service Developer Guide. +// in the Amazon S3 User Guide. // // Related Resources // @@ -2589,7 +2591,7 @@ func (c *S3) GetBucketAclRequest(input *GetBucketAclInput) (req *request.Request // GetBucketAcl API operation for Amazon Simple Storage Service. // -// This implementation of the GET operation uses the acl subresource to return +// This implementation of the GET action uses the acl subresource to return // the access control list (ACL) of a bucket. To use GET to return the ACL of // the bucket, you must have READ_ACP access to the bucket. If READ_ACP permission // is granted to the anonymous user, you can return the ACL of the bucket without @@ -2671,19 +2673,19 @@ func (c *S3) GetBucketAnalyticsConfigurationRequest(input *GetBucketAnalyticsCon // GetBucketAnalyticsConfiguration API operation for Amazon Simple Storage Service. // -// This implementation of the GET operation returns an analytics configuration +// This implementation of the GET action returns an analytics configuration // (identified by the analytics configuration ID) from the bucket. // // To use this operation, you must have permissions to perform the s3:GetAnalyticsConfiguration // action. The bucket owner has this permission by default. The bucket owner // can grant this permission to others. For more information about permissions, -// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) -// in the Amazon Simple Storage Service Developer Guide. +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) +// in the Amazon S3 User Guide. // // For information about Amazon S3 analytics feature, see Amazon S3 Analytics // – Storage Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html) -// in the Amazon Simple Storage Service Developer Guide. +// in the Amazon S3 User Guide. // // Related Resources // @@ -2852,15 +2854,18 @@ func (c *S3) GetBucketEncryptionRequest(input *GetBucketEncryptionInput) (req *r // GetBucketEncryption API operation for Amazon Simple Storage Service. // -// Returns the default encryption configuration for an Amazon S3 bucket. For -// information about the Amazon S3 default encryption feature, see Amazon S3 -// Default Bucket Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html). +// Returns the default encryption configuration for an Amazon S3 bucket. If +// the bucket does not have a default encryption configuration, GetBucketEncryption +// returns ServerSideEncryptionConfigurationNotFoundError. +// +// For information about the Amazon S3 default encryption feature, see Amazon +// S3 Default Bucket Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html). // // To use this operation, you must have permission to perform the s3:GetEncryptionConfiguration // action. The bucket owner has this permission by default. The bucket owner // can grant this permission to others. For more information about permissions, -// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). // // The following operations are related to GetBucketEncryption: // @@ -3045,8 +3050,8 @@ func (c *S3) GetBucketInventoryConfigurationRequest(input *GetBucketInventoryCon // To use this operation, you must have permissions to perform the s3:GetInventoryConfiguration // action. The bucket owner has this permission by default and can grant this // permission to others. For more information about permissions, see Permissions -// Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). // // For information about the Amazon S3 inventory feature, see Amazon S3 Inventory // (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html). @@ -3148,8 +3153,8 @@ func (c *S3) GetBucketLifecycleRequest(input *GetBucketLifecycleInput) (req *req // To use this operation, you must have permission to perform the s3:GetLifecycleConfiguration // action. The bucket owner has this permission by default. The bucket owner // can grant this permission to others. For more information about permissions, -// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). // // GetBucketLifecycle has the following special error: // @@ -3247,8 +3252,8 @@ func (c *S3) GetBucketLifecycleConfigurationRequest(input *GetBucketLifecycleCon // Accordingly, this section describes the latest API. The response describes // the new filter element that you can use to specify a filter to select a subset // of objects to which the rule applies. If you are using a previous version -// of the lifecycle configuration, it still works. For the earlier API description, -// see GetBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycle.html). +// of the lifecycle configuration, it still works. For the earlier action, see +// GetBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycle.html). // // Returns the lifecycle configuration information set on the bucket. For information // about lifecycle configuration, see Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html). @@ -3256,8 +3261,8 @@ func (c *S3) GetBucketLifecycleConfigurationRequest(input *GetBucketLifecycleCon // To use this operation, you must have permission to perform the s3:GetLifecycleConfiguration // action. The bucket owner has this permission, by default. The bucket owner // can grant this permission to others. For more information about permissions, -// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). // // GetBucketLifecycleConfiguration has the following special error: // @@ -3516,8 +3521,8 @@ func (c *S3) GetBucketMetricsConfigurationRequest(input *GetBucketMetricsConfigu // To use this operation, you must have permissions to perform the s3:GetMetricsConfiguration // action. The bucket owner has this permission by default. The bucket owner // can grant this permission to others. For more information about permissions, -// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). // // For information about CloudWatch request metrics for Amazon S3, see Monitoring // Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html). @@ -3689,8 +3694,8 @@ func (c *S3) GetBucketNotificationConfigurationRequest(input *GetBucketNotificat // // Returns the notification configuration of a bucket. // -// If notifications are not enabled on the bucket, the operation returns an -// empty NotificationConfiguration element. +// If notifications are not enabled on the bucket, the action returns an empty +// NotificationConfiguration element. // // By default, you must be the bucket owner to read the notification configuration // of a bucket. However, the bucket owner can use a bucket policy to grant permission @@ -3701,7 +3706,7 @@ func (c *S3) GetBucketNotificationConfigurationRequest(input *GetBucketNotificat // on a bucket, see Setting Up Notification of Bucket Events (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html). // For more information about bucket policies, see Using Bucket Policies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html). // -// The following operation is related to GetBucketNotification: +// The following action is related to GetBucketNotification: // // * PutBucketNotification (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketNotification.html) // @@ -3879,7 +3884,7 @@ func (c *S3) GetBucketPolicyRequest(input *GetBucketPolicyInput) (req *request.R // For more information about bucket policies, see Using Bucket Policies and // User Policies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html). // -// The following operation is related to GetBucketPolicy: +// The following action is related to GetBucketPolicy: // // * GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) // @@ -4052,11 +4057,11 @@ func (c *S3) GetBucketReplicationRequest(input *GetBucketReplicationInput) (req // can return a wrong result. // // For information about replication configuration, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html) -// in the Amazon Simple Storage Service Developer Guide. +// in the Amazon S3 User Guide. // -// This operation requires permissions for the s3:GetReplicationConfiguration -// action. For more information about permissions, see Using Bucket Policies -// and User Policies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html). +// This action requires permissions for the s3:GetReplicationConfiguration action. +// For more information about permissions, see Using Bucket Policies and User +// Policies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html). // // If you include the Filter element in a replication configuration, you must // also include the DeleteMarkerReplication and Priority elements. The response @@ -4405,7 +4410,7 @@ func (c *S3) GetBucketWebsiteRequest(input *GetBucketWebsiteInput) (req *request // For more information about hosting websites, see Hosting Websites on Amazon // S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html). // -// This GET operation requires the S3:GetBucketWebsite permission. By default, +// This GET action requires the S3:GetBucketWebsite permission. By default, // only the bucket owner can read the bucket website configuration. However, // bucket owners can allow other users to read the website configuration by // writing a bucket policy granting them the S3:GetBucketWebsite permission. @@ -4515,7 +4520,7 @@ func (c *S3) GetObjectRequest(input *GetObjectInput) (req *request.Request, outp // Deep Archive storage class, or S3 Intelligent-Tiering Archive or S3 Intelligent-Tiering // Deep Archive tiers, before you can retrieve the object you must first restore // a copy using RestoreObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html). -// Otherwise, this operation returns an InvalidObjectStateError error. For information +// Otherwise, this action returns an InvalidObjectStateError error. For information // about restoring archived objects, see Restoring Archived Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html). // // Encryption request headers, like x-amz-server-side-encryption, should not @@ -4558,8 +4563,8 @@ func (c *S3) GetObjectRequest(input *GetObjectInput) (req *request.Request, outp // // Versioning // -// By default, the GET operation returns the current version of an object. To -// return a different version, use the versionId subresource. +// By default, the GET action returns the current version of an object. To return +// a different version, use the versionId subresource. // // If the current version of the object is a delete marker, Amazon S3 behaves // as if the object was deleted and includes x-amz-delete-marker: true in the @@ -5026,7 +5031,7 @@ func (c *S3) GetObjectTaggingRequest(input *GetObjectTaggingInput) (req *request // subresource associated with the object. // // To use this operation, you must have permission to perform the s3:GetObjectTagging -// action. By default, the GET operation returns information about current version +// action. By default, the GET action returns information about current version // of an object. For a versioned bucket, you can have multiple versions of an // object in your bucket. To retrieve tags of any other version, use the versionId // query parameter. You also need permission for the s3:GetObjectVersionTagging @@ -5038,10 +5043,12 @@ func (c *S3) GetObjectTaggingRequest(input *GetObjectTaggingInput) (req *request // For information about the Amazon S3 object tagging feature, see Object Tagging // (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html). // -// The following operation is related to GetObjectTagging: +// The following action is related to GetObjectTagging: // // * PutObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectTagging.html) // +// * DeleteObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjectTagging.html) +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -5126,7 +5133,7 @@ func (c *S3) GetObjectTorrentRequest(input *GetObjectTorrentInput) (req *request // // This action is not supported by Amazon S3 on Outposts. // -// The following operation is related to GetObjectTorrent: +// The following action is related to GetObjectTorrent: // // * GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) // @@ -5300,16 +5307,20 @@ func (c *S3) HeadBucketRequest(input *HeadBucketInput) (req *request.Request, ou // HeadBucket API operation for Amazon Simple Storage Service. // -// This operation is useful to determine if a bucket exists and you have permission -// to access it. The operation returns a 200 OK if the bucket exists and you -// have permission to access it. Otherwise, the operation might return responses -// such as 404 Not Found and 403 Forbidden. +// This action is useful to determine if a bucket exists and you have permission +// to access it. The action returns a 200 OK if the bucket exists and you have +// permission to access it. +// +// If the bucket does not exist or you do not have permission to access it, +// the HEAD request returns a generic 404 Not Found or 403 Forbidden code. A +// message body is not included, so you cannot determine the exception beyond +// these error codes. // // To use this operation, you must have permissions to perform the s3:ListBucket // action. The bucket owner has this permission by default and can grant this // permission to others. For more information about permissions, see Permissions -// Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -5388,13 +5399,15 @@ func (c *S3) HeadObjectRequest(input *HeadObjectInput) (req *request.Request, ou // HeadObject API operation for Amazon Simple Storage Service. // -// The HEAD operation retrieves metadata from an object without returning the -// object itself. This operation is useful if you're only interested in an object's -// metadata. To use HEAD, you must have READ access to the object. +// The HEAD action retrieves metadata from an object without returning the object +// itself. This action is useful if you're only interested in an object's metadata. +// To use HEAD, you must have READ access to the object. // -// A HEAD request has the same options as a GET operation on an object. The -// response is identical to the GET response except that there is no response -// body. +// A HEAD request has the same options as a GET action on an object. The response +// is identical to the GET response except that there is no response body. Because +// of this, if the HEAD request generates an error, it returns a generic 404 +// Not Found or 403 Forbidden code. It is not possible to retrieve the exact +// exception beyond these error codes. // // If you encrypt an object by using server-side encryption with customer-provided // encryption keys (SSE-C) when you store the object in Amazon S3, then when @@ -5409,11 +5422,14 @@ func (c *S3) HeadObjectRequest(input *HeadObjectInput) (req *request.Request, ou // For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided // Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html). // -// Encryption request headers, like x-amz-server-side-encryption, should not -// be sent for GET requests if your object uses server-side encryption with -// CMKs stored in AWS KMS (SSE-KMS) or server-side encryption with Amazon S3–managed -// encryption keys (SSE-S3). If your object does use these types of keys, you’ll -// get an HTTP 400 BadRequest error. +// * Encryption request headers, like x-amz-server-side-encryption, should +// not be sent for GET requests if your object uses server-side encryption +// with CMKs stored in AWS KMS (SSE-KMS) or server-side encryption with Amazon +// S3–managed encryption keys (SSE-S3). If your object does use these types +// of keys, you’ll get an HTTP 400 BadRequest error. +// +// * The last modified property in this case is the creation date of the +// object. // // Request headers are limited to 8 KB in size. For more information, see Common // Request Headers (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTCommonRequestHeaders.html). @@ -5445,7 +5461,7 @@ func (c *S3) HeadObjectRequest(input *HeadObjectInput) (req *request.Request, ou // * If you don’t have the s3:ListBucket permission, Amazon S3 returns // an HTTP status code 403 ("access denied") error. // -// The following operation is related to HeadObject: +// The following action is related to HeadObject: // // * GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) // @@ -5527,19 +5543,19 @@ func (c *S3) ListBucketAnalyticsConfigurationsRequest(input *ListBucketAnalytics // Lists the analytics configurations for the bucket. You can have up to 1,000 // analytics configurations per bucket. // -// This operation supports list pagination and does not return more than 100 -// configurations at a time. You should always check the IsTruncated element -// in the response. If there are no more configurations to list, IsTruncated -// is set to false. If there are more configurations to list, IsTruncated is -// set to true, and there will be a value in NextContinuationToken. You use -// the NextContinuationToken value to continue the pagination of the list by -// passing the value in continuation-token in the request to GET the next page. +// This action supports list pagination and does not return more than 100 configurations +// at a time. You should always check the IsTruncated element in the response. +// If there are no more configurations to list, IsTruncated is set to false. +// If there are more configurations to list, IsTruncated is set to true, and +// there will be a value in NextContinuationToken. You use the NextContinuationToken +// value to continue the pagination of the list by passing the value in continuation-token +// in the request to GET the next page. // // To use this operation, you must have permissions to perform the s3:GetAnalyticsConfiguration // action. The bucket owner has this permission by default. The bucket owner // can grant this permission to others. For more information about permissions, -// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). // // For information about Amazon S3 analytics feature, see Amazon S3 Analytics // – Storage Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html). @@ -5726,19 +5742,19 @@ func (c *S3) ListBucketInventoryConfigurationsRequest(input *ListBucketInventory // Returns a list of inventory configurations for the bucket. You can have up // to 1,000 analytics configurations per bucket. // -// This operation supports list pagination and does not return more than 100 -// configurations at a time. Always check the IsTruncated element in the response. -// If there are no more configurations to list, IsTruncated is set to false. -// If there are more configurations to list, IsTruncated is set to true, and -// there is a value in NextContinuationToken. You use the NextContinuationToken -// value to continue the pagination of the list by passing the value in continuation-token +// This action supports list pagination and does not return more than 100 configurations +// at a time. Always check the IsTruncated element in the response. If there +// are no more configurations to list, IsTruncated is set to false. If there +// are more configurations to list, IsTruncated is set to true, and there is +// a value in NextContinuationToken. You use the NextContinuationToken value +// to continue the pagination of the list by passing the value in continuation-token // in the request to GET the next page. // // To use this operation, you must have permissions to perform the s3:GetInventoryConfiguration // action. The bucket owner has this permission by default. The bucket owner // can grant this permission to others. For more information about permissions, -// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). // // For information about the Amazon S3 inventory feature, see Amazon S3 Inventory // (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html) @@ -5827,19 +5843,19 @@ func (c *S3) ListBucketMetricsConfigurationsRequest(input *ListBucketMetricsConf // are only for the request metrics of the bucket and do not provide information // on daily storage metrics. You can have up to 1,000 configurations per bucket. // -// This operation supports list pagination and does not return more than 100 -// configurations at a time. Always check the IsTruncated element in the response. -// If there are no more configurations to list, IsTruncated is set to false. -// If there are more configurations to list, IsTruncated is set to true, and -// there is a value in NextContinuationToken. You use the NextContinuationToken -// value to continue the pagination of the list by passing the value in continuation-token +// This action supports list pagination and does not return more than 100 configurations +// at a time. Always check the IsTruncated element in the response. If there +// are no more configurations to list, IsTruncated is set to false. If there +// are more configurations to list, IsTruncated is set to true, and there is +// a value in NextContinuationToken. You use the NextContinuationToken value +// to continue the pagination of the list by passing the value in continuation-token // in the request to GET the next page. // // To use this operation, you must have permissions to perform the s3:GetMetricsConfiguration // action. The bucket owner has this permission by default. The bucket owner // can grant this permission to others. For more information about permissions, -// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). // // For more information about metrics configurations and CloudWatch request // metrics, see Monitoring Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html). @@ -6004,11 +6020,11 @@ func (c *S3) ListMultipartUploadsRequest(input *ListMultipartUploadsInput) (req // ListMultipartUploads API operation for Amazon Simple Storage Service. // -// This operation lists in-progress multipart uploads. An in-progress multipart +// This action lists in-progress multipart uploads. An in-progress multipart // upload is a multipart upload that has been initiated using the Initiate Multipart // Upload request, but has not yet been completed or aborted. // -// This operation returns at most 1,000 multipart uploads in the response. 1,000 +// This action returns at most 1,000 multipart uploads in the response. 1,000 // multipart uploads is the maximum number of uploads a response can include, // which is also the default value. You can further limit the number of uploads // in a response by specifying the max-uploads parameter in the response. If @@ -6025,7 +6041,7 @@ func (c *S3) ListMultipartUploadsRequest(input *ListMultipartUploadsInput) (req // Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html). // // For information on permissions required to use the multipart upload API, -// see Multipart Upload API and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). +// see Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). // // The following operations are related to ListMultipartUploads: // @@ -6173,6 +6189,9 @@ func (c *S3) ListObjectVersionsRequest(input *ListObjectVersionsInput) (req *req // use request parameters as selection criteria to return metadata about a subset // of all the object versions. // +// To use this operation, you must have permissions to perform the s3:ListBucketVersions +// action. Be aware of the name difference. +// // A 200 OK response can contain valid or invalid XML. Make sure to design your // application to parse the contents of the response and handle it appropriately. // @@ -6326,8 +6345,8 @@ func (c *S3) ListObjectsRequest(input *ListObjectsInput) (req *request.Request, // to design your application to parse the contents of the response and handle // it appropriately. // -// This API has been revised. We recommend that you use the newer version, ListObjectsV2 -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html), +// This action has been revised. We recommend that you use the newer version, +// ListObjectsV2 (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html), // when developing applications. For backward compatibility, Amazon S3 continues // to support ListObjects. // @@ -6482,18 +6501,19 @@ func (c *S3) ListObjectsV2Request(input *ListObjectsV2Input) (req *request.Reque // the request parameters as selection criteria to return a subset of the objects // in a bucket. A 200 OK response can contain valid or invalid XML. Make sure // to design your application to parse the contents of the response and handle -// it appropriately. +// it appropriately. Objects are returned sorted in an ascending order of the +// respective key names in the list. // // To use this operation, you must have READ access to the bucket. // -// To use this operation in an AWS Identity and Access Management (IAM) policy, +// To use this action in an AWS Identity and Access Management (IAM) policy, // you must have permissions to perform the s3:ListBucket action. The bucket // owner has this permission by default and can grant this permission to others. // For more information about permissions, see Permissions Related to Bucket -// Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). // -// This section describes the latest revision of the API. We recommend that +// This section describes the latest revision of this action. We recommend that // you use this revised API for application development. For backward compatibility, // Amazon S3 continues to support the prior version of this API, ListObjects // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html). @@ -6658,7 +6678,7 @@ func (c *S3) ListPartsRequest(input *ListPartsInput) (req *request.Request, outp // Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html). // // For information on permissions required to use the multipart upload API, -// see Multipart Upload API and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). +// see Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). // // The following operations are related to ListParts: // @@ -6804,8 +6824,8 @@ func (c *S3) PutBucketAccelerateConfigurationRequest(input *PutBucketAccelerateC // To use this operation, you must have permission to perform the s3:PutAccelerateConfiguration // action. The bucket owner has this permission by default. The bucket owner // can grant this permission to others. For more information about permissions, -// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). // // The Transfer Acceleration state of a bucket can be set to one of the following // two values: @@ -6815,7 +6835,7 @@ func (c *S3) PutBucketAccelerateConfigurationRequest(input *PutBucketAccelerateC // * Suspended – Disables accelerated data transfers to the bucket. // // The GetBucketAccelerateConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAccelerateConfiguration.html) -// operation returns the transfer acceleration state of a bucket. +// action returns the transfer acceleration state of a bucket. // // After setting the Transfer Acceleration state of a bucket to Enabled, it // might take up to thirty minutes before the data transfer rates to the bucket @@ -7092,8 +7112,8 @@ func (c *S3) PutBucketAnalyticsConfigurationRequest(input *PutBucketAnalyticsCon // To use this operation, you must have permissions to perform the s3:PutAnalyticsConfiguration // action. The bucket owner has this permission by default. The bucket owner // can grant this permission to others. For more information about permissions, -// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). // // Special Errors // @@ -7227,7 +7247,7 @@ func (c *S3) PutBucketCorsRequest(input *PutBucketCorsInput) (req *request.Reque // // For more information about CORS, go to Enabling Cross-Origin Resource Sharing // (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) in the Amazon -// Simple Storage Service Developer Guide. +// S3 User Guide. // // Related Resources // @@ -7314,7 +7334,7 @@ func (c *S3) PutBucketEncryptionRequest(input *PutBucketEncryptionInput) (req *r // PutBucketEncryption API operation for Amazon Simple Storage Service. // -// This operation uses the encryption subresource to configure default encryption +// This action uses the encryption subresource to configure default encryption // and Amazon S3 Bucket Key for an existing bucket. // // Default encryption for a bucket can use server-side encryption with Amazon @@ -7322,19 +7342,19 @@ func (c *S3) PutBucketEncryptionRequest(input *PutBucketEncryptionInput) (req *r // specify default encryption using SSE-KMS, you can also configure Amazon S3 // Bucket Key. For information about default encryption, see Amazon S3 default // bucket encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html) -// in the Amazon Simple Storage Service Developer Guide. For more information -// about S3 Bucket Keys, see Amazon S3 Bucket Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html) -// in the Amazon Simple Storage Service Developer Guide. +// in the Amazon S3 User Guide. For more information about S3 Bucket Keys, see +// Amazon S3 Bucket Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html) +// in the Amazon S3 User Guide. // -// This operation requires AWS Signature Version 4. For more information, see -// Authenticating Requests (AWS Signature Version 4) (sig-v4-authenticating-requests.html). +// This action requires AWS Signature Version 4. For more information, see Authenticating +// Requests (AWS Signature Version 4) (https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html). // // To use this operation, you must have permissions to perform the s3:PutEncryptionConfiguration // action. The bucket owner has this permission by default. The bucket owner // can grant this permission to others. For more information about permissions, -// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) -// in the Amazon Simple Storage Service Developer Guide. +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) +// in the Amazon S3 User Guide. // // Related Resources // @@ -7415,7 +7435,8 @@ func (c *S3) PutBucketIntelligentTieringConfigurationRequest(input *PutBucketInt // PutBucketIntelligentTieringConfiguration API operation for Amazon Simple Storage Service. // -// Puts a S3 Intelligent-Tiering configuration to the specified bucket. +// Puts a S3 Intelligent-Tiering configuration to the specified bucket. You +// can have up to 1,000 S3 Intelligent-Tiering configurations per bucket. // // The S3 Intelligent-Tiering storage class is designed to optimize storage // costs by automatically moving data to the most cost-effective storage access @@ -7442,6 +7463,22 @@ func (c *S3) PutBucketIntelligentTieringConfigurationRequest(input *PutBucketInt // // * ListBucketIntelligentTieringConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketIntelligentTieringConfigurations.html) // +// You only need S3 Intelligent-Tiering enabled on a bucket if you want to automatically +// move objects stored in the S3 Intelligent-Tiering storage class to the Archive +// Access or Deep Archive Access tier. +// +// Special Errors +// +// * HTTP 400 Bad Request Error Code: InvalidArgument Cause: Invalid Argument +// +// * HTTP 400 Bad Request Error Code: TooManyConfigurations Cause: You are +// attempting to create a new configuration but have already reached the +// 1,000-configuration limit. +// +// * HTTP 403 Forbidden Error Code: AccessDenied Cause: You are not the owner +// of the specified bucket, or you do not have the s3:PutIntelligentTieringConfiguration +// bucket permission to set the configuration on the bucket. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -7515,9 +7552,9 @@ func (c *S3) PutBucketInventoryConfigurationRequest(input *PutBucketInventoryCon // PutBucketInventoryConfiguration API operation for Amazon Simple Storage Service. // -// This implementation of the PUT operation adds an inventory configuration -// (identified by the inventory ID) to the bucket. You can have up to 1,000 -// inventory configurations per bucket. +// This implementation of the PUT action adds an inventory configuration (identified +// by the inventory ID) to the bucket. You can have up to 1,000 inventory configurations +// per bucket. // // Amazon S3 inventory generates inventories of the objects in the bucket on // a daily or weekly basis, and the results are published to a flat file. The @@ -7530,7 +7567,7 @@ func (c *S3) PutBucketInventoryConfigurationRequest(input *PutBucketInventoryCon // the inventory daily or weekly. You can also configure what object metadata // to include and whether to inventory all object versions or only current versions. // For more information, see Amazon S3 Inventory (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html) -// in the Amazon Simple Storage Service Developer Guide. +// in the Amazon S3 User Guide. // // You must create a bucket policy on the destination bucket to grant permissions // to Amazon S3 to write objects to the bucket in the defined location. For @@ -7540,9 +7577,9 @@ func (c *S3) PutBucketInventoryConfigurationRequest(input *PutBucketInventoryCon // To use this operation, you must have permissions to perform the s3:PutInventoryConfiguration // action. The bucket owner has this permission by default and can grant this // permission to others. For more information about permissions, see Permissions -// Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) -// in the Amazon Simple Storage Service Developer Guide. +// Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) +// in the Amazon S3 User Guide. // // Special Errors // @@ -7654,7 +7691,7 @@ func (c *S3) PutBucketLifecycleRequest(input *PutBucketLifecycleInput) (req *req // Creates a new lifecycle configuration for the bucket or replaces an existing // lifecycle configuration. For information about lifecycle configuration, see // Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html) -// in the Amazon Simple Storage Service Developer Guide. +// in the Amazon S3 User Guide. // // By default, all Amazon S3 resources, including buckets, objects, and related // subresources (for example, lifecycle configuration and website configuration) @@ -7675,8 +7712,8 @@ func (c *S3) PutBucketLifecycleRequest(input *PutBucketLifecycleInput) (req *req // * s3:PutLifecycleConfiguration // // For more information about permissions, see Managing Access Permissions to -// your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) -// in the Amazon Simple Storage Service Developer Guide. +// your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) +// in the Amazon S3 User Guide. // // For more examples of transitioning objects to storage classes such as STANDARD_IA // or ONEZONE_IA, see Examples of Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#lifecycle-configuration-examples). @@ -7692,9 +7729,9 @@ func (c *S3) PutBucketLifecycleRequest(input *PutBucketLifecycleInput) (req *req // * By default, a resource owner—in this case, a bucket owner, which is // the AWS account that created the bucket—can perform any of the operations. // A resource owner can also grant others permission to perform the operation. -// For more information, see the following topics in the Amazon Simple Storage -// Service Developer Guide: Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) -// Managing Access Permissions to your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) +// For more information, see the following topics in the Amazon S3 User Guide: +// Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) +// Managing Access Permissions to your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -7779,7 +7816,7 @@ func (c *S3) PutBucketLifecycleConfigurationRequest(input *PutBucketLifecycleCon // // Creates a new lifecycle configuration for the bucket or replaces an existing // lifecycle configuration. For information about lifecycle configuration, see -// Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). // // Bucket lifecycle configuration now supports specifying a lifecycle rule using // an object key name prefix, one or more object tags, or a combination of both. @@ -7831,7 +7868,7 @@ func (c *S3) PutBucketLifecycleConfigurationRequest(input *PutBucketLifecycleCon // * s3:PutLifecycleConfiguration // // For more information about permissions, see Managing Access Permissions to -// Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). // // The following are related to PutBucketLifecycleConfiguration: // @@ -8048,8 +8085,8 @@ func (c *S3) PutBucketMetricsConfigurationRequest(input *PutBucketMetricsConfigu // To use this operation, you must have permissions to perform the s3:PutMetricsConfiguration // action. The bucket owner has this permission by default. The bucket owner // can grant this permission to others. For more information about permissions, -// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). // // For information about CloudWatch request metrics for Amazon S3, see Monitoring // Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html). @@ -8245,8 +8282,8 @@ func (c *S3) PutBucketNotificationConfigurationRequest(input *PutBucketNotificat // // // -// This operation replaces the existing notification configuration with the -// configuration you include in the request body. +// This action replaces the existing notification configuration with the configuration +// you include in the request body. // // After Amazon S3 receives this request, it first verifies that any Amazon // Simple Notification Service (Amazon SNS) or Amazon Simple Queue Service (Amazon @@ -8266,8 +8303,8 @@ func (c *S3) PutBucketNotificationConfigurationRequest(input *PutBucketNotificat // The PUT notification is an atomic operation. For example, suppose your notification // configuration includes SNS topic, SQS queue, and Lambda function configurations. // When you send a PUT request with this configuration, Amazon S3 sends test -// messages to your SNS topic. If the message fails, the entire PUT operation -// will fail, and Amazon S3 will not add the configuration to your bucket. +// messages to your SNS topic. If the message fails, the entire PUT action will +// fail, and Amazon S3 will not add the configuration to your bucket. // // Responses // @@ -8276,7 +8313,7 @@ func (c *S3) PutBucketNotificationConfigurationRequest(input *PutBucketNotificat // will also include the x-amz-sns-test-message-id header containing the message // ID of the test notification sent to the topic. // -// The following operation is related to PutBucketNotificationConfiguration: +// The following action is related to PutBucketNotificationConfiguration: // // * GetBucketNotificationConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketNotificationConfiguration.html) // @@ -8552,8 +8589,8 @@ func (c *S3) PutBucketReplicationRequest(input *PutBucketReplicationInput) (req // information, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html) // in the Amazon S3 Developer Guide. // -// To perform this operation, the user or role performing the operation must -// have the iam:PassRole (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_passrole.html) +// To perform this operation, the user or role performing the action must have +// the iam:PassRole (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_passrole.html) // permission. // // Specify the replication configuration in the request body. In the replication @@ -8583,7 +8620,7 @@ func (c *S3) PutBucketReplicationRequest(input *PutBucketReplicationInput) (req // bucket, can perform this operation. The resource owner can also grant others // permissions to perform the operation. For more information about permissions, // see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). // // Handling Replication of Encrypted Objects // @@ -8786,8 +8823,8 @@ func (c *S3) PutBucketTaggingRequest(input *PutBucketTaggingInput) (req *request // To use this operation, you must have permissions to perform the s3:PutBucketTagging // action. The bucket owner has this permission by default and can grant this // permission to others. For more information about permissions, see Permissions -// Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). // // PutBucketTagging has the following special errors: // @@ -8801,7 +8838,7 @@ func (c *S3) PutBucketTaggingRequest(input *PutBucketTaggingInput) (req *request // match the schema. // // * Error code: OperationAbortedError Description: A conflicting conditional -// operation is currently in progress against this resource. Please try again. +// action is currently in progress against this resource. Please try again. // // * Error code: InternalError Description: The service was unable to apply // the provided tag to the bucket. @@ -9008,7 +9045,7 @@ func (c *S3) PutBucketWebsiteRequest(input *PutBucketWebsiteInput) (req *request // document and any redirect rules. For more information, see Hosting Websites // on Amazon S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html). // -// This PUT operation requires the S3:PutBucketWebsite permission. By default, +// This PUT action requires the S3:PutBucketWebsite permission. By default, // only the bucket owner can configure the website attached to a bucket; however, // bucket owners can allow other users to set the website configuration by writing // a bucket policy that grants them the S3:PutBucketWebsite permission. @@ -9067,7 +9104,7 @@ func (c *S3) PutBucketWebsiteRequest(input *PutBucketWebsiteInput) (req *request // Amazon S3 has a limitation of 50 routing rules per website configuration. // If you require more than 50 routing rules, you can use object redirect. For // more information, see Configuring an Object Redirect (https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html) -// in the Amazon Simple Storage Service Developer Guide. +// in the Amazon S3 User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -9161,7 +9198,7 @@ func (c *S3) PutObjectRequest(input *PutObjectInput) (req *request.Request, outp // The Content-MD5 header is required for any request to upload an object with // a retention period configured using Amazon S3 Object Lock. For more information // about Amazon S3 Object Lock, see Amazon S3 Object Lock Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html) -// in the Amazon Simple Storage Service Developer Guide. +// in the Amazon S3 User Guide. // // Server-side Encryption // @@ -9174,7 +9211,7 @@ func (c *S3) PutObjectRequest(input *PutObjectInput) (req *request.Request, outp // If you request server-side encryption using AWS Key Management Service (SSE-KMS), // you can enable an S3 Bucket Key at the object-level. For more information, // see Amazon S3 Bucket Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html) -// in the Amazon Simple Storage Service Developer Guide. +// in the Amazon S3 User Guide. // // Access Control List (ACL)-Specific Request Headers // @@ -9293,7 +9330,7 @@ func (c *S3) PutObjectAclRequest(input *PutObjectAclInput) (req *request.Request // for a new or existing object in an S3 bucket. You must have WRITE_ACP permission // to set the ACL of an object. For more information, see What permissions can // I grant? (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#permissions) -// in the Amazon Simple Storage Service Developer Guide. +// in the Amazon S3 User Guide. // // This action is not supported by Amazon S3 on Outposts. // @@ -9457,14 +9494,11 @@ func (c *S3) PutObjectLegalHoldRequest(input *PutObjectLegalHoldInput) (req *req // PutObjectLegalHold API operation for Amazon Simple Storage Service. // -// Applies a Legal Hold configuration to the specified object. +// Applies a Legal Hold configuration to the specified object. For more information, +// see Locking Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). // // This action is not supported by Amazon S3 on Outposts. // -// Related Resources -// -// * Locking Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html) -// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -9543,14 +9577,16 @@ func (c *S3) PutObjectLockConfigurationRequest(input *PutObjectLockConfiguration // // Places an Object Lock configuration on the specified bucket. The rule specified // in the Object Lock configuration will be applied by default to every new -// object placed in the specified bucket. +// object placed in the specified bucket. For more information, see Locking +// Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). // -// DefaultRetention requires either Days or Years. You can't specify both at -// the same time. +// * The DefaultRetention settings require both a mode and a period. // -// Related Resources +// * The DefaultRetention period can be either Days or Years but you must +// select one. You cannot specify Days and Years at the same time. // -// * Locking Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html) +// * You can only enable Object Lock for new buckets. If you want to turn +// on Object Lock for an existing bucket, contact AWS Support. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -9628,14 +9664,11 @@ func (c *S3) PutObjectRetentionRequest(input *PutObjectRetentionInput) (req *req // PutObjectRetention API operation for Amazon Simple Storage Service. // -// Places an Object Retention configuration on an object. +// Places an Object Retention configuration on an object. For more information, +// see Locking Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). // // This action is not supported by Amazon S3 on Outposts. // -// Related Resources -// -// * Locking Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html) -// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -9741,7 +9774,7 @@ func (c *S3) PutObjectTaggingRequest(input *PutObjectTaggingInput) (req *request // // * Code: MalformedXMLError Cause: The XML provided does not match the schema. // -// * Code: OperationAbortedError Cause: A conflicting conditional operation +// * Code: OperationAbortedError Cause: A conflicting conditional action // is currently in progress against this resource. Please try again. // // * Code: InternalError Cause: The service was unable to apply the provided @@ -9751,6 +9784,8 @@ func (c *S3) PutObjectTaggingRequest(input *PutObjectTaggingInput) (req *request // // * GetObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html) // +// * DeleteObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjectTagging.html) +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -9938,9 +9973,9 @@ func (c *S3) RestoreObjectRequest(input *RestoreObjectInput) (req *request.Reque // To use this operation, you must have permissions to perform the s3:RestoreObject // action. The bucket owner has this permission by default and can grant this // permission to others. For more information about permissions, see Permissions -// Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) -// in the Amazon Simple Storage Service Developer Guide. +// Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) +// in the Amazon S3 User Guide. // // Querying Archives with Select Requests // @@ -9950,7 +9985,7 @@ func (c *S3) RestoreObjectRequest(input *RestoreObjectInput) (req *request.Reque // queries and custom analytics on your archived data without having to restore // your data to a hotter Amazon S3 tier. For an overview about select requests, // see Querying Archived Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/querying-glacier-archives.html) -// in the Amazon Simple Storage Service Developer Guide. +// in the Amazon S3 User Guide. // // When making a select request, do the following: // @@ -9961,13 +9996,12 @@ func (c *S3) RestoreObjectRequest(input *RestoreObjectInput) (req *request.Reque // the storage class and encryption for the output objects stored in the // bucket. For more information about output, see Querying Archived Objects // (https://docs.aws.amazon.com/AmazonS3/latest/dev/querying-glacier-archives.html) -// in the Amazon Simple Storage Service Developer Guide. For more information -// about the S3 structure in the request body, see the following: PutObject -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) Managing -// Access with ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html) -// in the Amazon Simple Storage Service Developer Guide Protecting Data Using -// Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html) -// in the Amazon Simple Storage Service Developer Guide +// in the Amazon S3 User Guide. For more information about the S3 structure +// in the request body, see the following: PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) +// Managing Access with ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html) +// in the Amazon S3 User Guide Protecting Data Using Server-Side Encryption +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html) +// in the Amazon S3 User Guide // // * Define the SQL expression for the SELECT type of restoration for your // query in the request body's SelectParameters structure. You can use expressions @@ -9983,7 +10017,7 @@ func (c *S3) RestoreObjectRequest(input *RestoreObjectInput) (req *request.Reque // // For more information about using SQL with S3 Glacier Select restore, see // SQL Reference for Amazon S3 Select and S3 Glacier Select (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-glacier-select-sql-reference.html) -// in the Amazon Simple Storage Service Developer Guide. +// in the Amazon S3 User Guide. // // When making a select request, you can also do the following: // @@ -10054,19 +10088,19 @@ func (c *S3) RestoreObjectRequest(input *RestoreObjectInput) (req *request.Reque // // For more information about archive retrieval options and provisioned capacity // for Expedited data access, see Restoring Archived Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html) -// in the Amazon Simple Storage Service Developer Guide. +// in the Amazon S3 User Guide. // // You can use Amazon S3 restore speed upgrade to change the restore speed to // a faster speed while it is in progress. For more information, see Upgrading // the speed of an in-progress restore (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html#restoring-objects-upgrade-tier.title.html) -// in the Amazon Simple Storage Service Developer Guide. +// in the Amazon S3 User Guide. // // To get the status of object restoration, you can send a HEAD request. Operations // return the x-amz-restore header, which provides information about the restoration // status, in the response. You can use Amazon S3 event notifications to notify // you when a restore is initiated or completed. For more information, see Configuring // Amazon S3 Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) -// in the Amazon Simple Storage Service Developer Guide. +// in the Amazon S3 User Guide. // // After restoring an archived object, you can update the restoration period // by reissuing the request with a new period. Amazon S3 updates the restoration @@ -10081,11 +10115,11 @@ func (c *S3) RestoreObjectRequest(input *RestoreObjectInput) (req *request.Reque // the object in 3 days. For more information about lifecycle configuration, // see PutBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html) // and Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html) -// in Amazon Simple Storage Service Developer Guide. +// in Amazon S3 User Guide. // // Responses // -// A successful operation returns either the 200 OK or 202 Accepted status code. +// A successful action returns either the 200 OK or 202 Accepted status code. // // * If the object is not previously restored, then Amazon S3 returns 202 // Accepted in the response. @@ -10112,7 +10146,7 @@ func (c *S3) RestoreObjectRequest(input *RestoreObjectInput) (req *request.Reque // * GetBucketNotificationConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketNotificationConfiguration.html) // // * SQL Reference for Amazon S3 Select and S3 Glacier Select (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-glacier-select-sql-reference.html) -// in the Amazon Simple Storage Service Developer Guide +// in the Amazon S3 User Guide // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -10123,7 +10157,7 @@ func (c *S3) RestoreObjectRequest(input *RestoreObjectInput) (req *request.Reque // // Returned Error Codes: // * ErrCodeObjectAlreadyInActiveTierError "ObjectAlreadyInActiveTierError" -// This operation is not allowed against this storage tier. +// This action is not allowed against this storage tier. // // See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreObject func (c *S3) RestoreObject(input *RestoreObjectInput) (*RestoreObjectOutput, error) { @@ -10200,7 +10234,7 @@ func (c *S3) SelectObjectContentRequest(input *SelectObjectContentInput) (req *r // SelectObjectContent API operation for Amazon Simple Storage Service. // -// This operation filters the contents of an Amazon S3 object based on a simple +// This action filters the contents of an Amazon S3 object based on a simple // structured query language (SQL) statement. In the request, along with the // SQL expression, you must also specify a data serialization format (JSON, // CSV, or Apache Parquet) of the object. Amazon S3 uses this format to parse @@ -10212,18 +10246,18 @@ func (c *S3) SelectObjectContentRequest(input *SelectObjectContentInput) (req *r // // For more information about Amazon S3 Select, see Selecting Content from Objects // (https://docs.aws.amazon.com/AmazonS3/latest/dev/selecting-content-from-objects.html) -// in the Amazon Simple Storage Service Developer Guide. +// in the Amazon S3 User Guide. // // For more information about using SQL with Amazon S3 Select, see SQL Reference // for Amazon S3 Select and S3 Glacier Select (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-glacier-select-sql-reference.html) -// in the Amazon Simple Storage Service Developer Guide. +// in the Amazon S3 User Guide. // // Permissions // // You must have s3:GetObject permission for this operation. Amazon S3 Select // does not support anonymous access. For more information about permissions, // see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) -// in the Amazon Simple Storage Service Developer Guide. +// in the Amazon S3 User Guide. // // Object Data Formats // @@ -10246,13 +10280,13 @@ func (c *S3) SelectObjectContentRequest(input *SelectObjectContentInput) (req *r // you must use the headers that are documented in the GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html). // For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided // Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) -// in the Amazon Simple Storage Service Developer Guide. For objects that -// are encrypted with Amazon S3 managed encryption keys (SSE-S3) and customer -// master keys (CMKs) stored in AWS Key Management Service (SSE-KMS), server-side -// encryption is handled transparently, so you don't need to specify anything. -// For more information about server-side encryption, including SSE-S3 and -// SSE-KMS, see Protecting Data Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html) -// in the Amazon Simple Storage Service Developer Guide. +// in the Amazon S3 User Guide. For objects that are encrypted with Amazon +// S3 managed encryption keys (SSE-S3) and customer master keys (CMKs) stored +// in AWS Key Management Service (SSE-KMS), server-side encryption is handled +// transparently, so you don't need to specify anything. For more information +// about server-side encryption, including SSE-S3 and SSE-KMS, see Protecting +// Data Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html) +// in the Amazon S3 User Guide. // // Working with the Response Body // @@ -10263,8 +10297,8 @@ func (c *S3) SelectObjectContentRequest(input *SelectObjectContentInput) (req *r // // GetObject Support // -// The SelectObjectContent operation does not support the following GetObject -// functionality. For more information, see GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html). +// The SelectObjectContent action does not support the following GetObject functionality. +// For more information, see GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html). // // * Range: Although you can specify a scan range for an Amazon S3 Select // request (see SelectObjectContentRequest - ScanRange (https://docs.aws.amazon.com/AmazonS3/latest/API/API_SelectObjectContent.html#AmazonS3-SelectObjectContent-request-ScanRange) @@ -10274,7 +10308,7 @@ func (c *S3) SelectObjectContentRequest(input *SelectObjectContentInput) (req *r // * GLACIER, DEEP_ARCHIVE and REDUCED_REDUNDANCY storage classes: You cannot // specify the GLACIER, DEEP_ARCHIVE, or REDUCED_REDUNDANCY storage classes. // For more information, about storage classes see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html#storage-class-intro) -// in the Amazon Simple Storage Service Developer Guide. +// in the Amazon S3 User Guide. // // Special Errors // @@ -10567,11 +10601,11 @@ func (c *S3) UploadPartRequest(input *UploadPartInput) (req *request.Request, ou // // For more information on multipart uploads, go to Multipart Upload Overview // (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html) in the -// Amazon Simple Storage Service Developer Guide . +// Amazon S3 User Guide . // // For information on the permissions required to use the multipart upload API, -// go to Multipart Upload API and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) -// in the Amazon Simple Storage Service Developer Guide. +// go to Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) +// in the Amazon S3 User Guide. // // You can optionally request server-side encryption where Amazon S3 encrypts // your data as it writes it to disks in its data centers and decrypts it for @@ -10581,7 +10615,7 @@ func (c *S3) UploadPartRequest(input *UploadPartInput) (req *request.Request, ou // match the headers you used in the request to initiate the upload by using // CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html). // For more information, go to Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html) -// in the Amazon Simple Storage Service Developer Guide. +// in the Amazon S3 User Guide. // // Server-side encryption is supported by the S3 Multipart Upload actions. Unless // you are using a customer-provided encryption key, you don't need to specify @@ -10697,10 +10731,10 @@ func (c *S3) UploadPartCopyRequest(input *UploadPartCopyInput) (req *request.Req // // The minimum allowable part size for a multipart upload is 5 MB. For more // information about multipart upload limits, go to Quick Facts (https://docs.aws.amazon.com/AmazonS3/latest/dev/qfacts.html) -// in the Amazon Simple Storage Service Developer Guide. +// in the Amazon S3 User Guide. // // Instead of using an existing object as part data, you might use the UploadPart -// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) operation +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) action // and provide data in your request. // // You must initiate a multipart upload before you can upload any part. In response @@ -10711,15 +10745,15 @@ func (c *S3) UploadPartCopyRequest(input *UploadPartCopyInput) (req *request.Req // // * For conceptual information about multipart uploads, see Uploading Objects // Using Multipart Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html) -// in the Amazon Simple Storage Service Developer Guide. +// in the Amazon S3 User Guide. // // * For information about permissions required to use the multipart upload -// API, see Multipart Upload API and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) -// in the Amazon Simple Storage Service Developer Guide. +// API, see Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) +// in the Amazon S3 User Guide. // -// * For information about copying objects using a single atomic operation -// vs. the multipart upload, see Operations on Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectOperations.html) -// in the Amazon Simple Storage Service Developer Guide. +// * For information about copying objects using a single atomic action vs. +// the multipart upload, see Operations on Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectOperations.html) +// in the Amazon S3 User Guide. // // * For information about using server-side encryption with customer-provided // encryption keys with the UploadPartCopy operation, see CopyObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html) @@ -10808,11 +10842,128 @@ func (c *S3) UploadPartCopyWithContext(ctx aws.Context, input *UploadPartCopyInp return out, req.Send() } +const opWriteGetObjectResponse = "WriteGetObjectResponse" + +// WriteGetObjectResponseRequest generates a "aws/request.Request" representing the +// client's request for the WriteGetObjectResponse operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See WriteGetObjectResponse for more information on using the WriteGetObjectResponse +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the WriteGetObjectResponseRequest method. +// req, resp := client.WriteGetObjectResponseRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/WriteGetObjectResponse +func (c *S3) WriteGetObjectResponseRequest(input *WriteGetObjectResponseInput) (req *request.Request, output *WriteGetObjectResponseOutput) { + op := &request.Operation{ + Name: opWriteGetObjectResponse, + HTTPMethod: "POST", + HTTPPath: "/WriteGetObjectResponse", + } + + if input == nil { + input = &WriteGetObjectResponseInput{} + } + + output = &WriteGetObjectResponseOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Sign.Remove(v4.SignRequestHandler) + handler := v4.BuildNamedHandler("v4.CustomSignerHandler", v4.WithUnsignedPayload) + req.Handlers.Sign.PushFrontNamed(handler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(protocol.NewHostPrefixHandler("{RequestRoute}.", input.hostLabels)) + req.Handlers.Build.PushBackNamed(protocol.ValidateEndpointHostHandler) + return +} + +// WriteGetObjectResponse API operation for Amazon Simple Storage Service. +// +// Passes transformed objects to a GetObject operation when using Object Lambda +// Access Points. For information about Object Lambda Access Points, see Transforming +// objects with Object Lambda Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/transforming-objects.html) +// in the Amazon S3 User Guide. +// +// This operation supports metadata that can be returned by GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html), +// in addition to RequestRoute, RequestToken, StatusCode, ErrorCode, and ErrorMessage. +// The GetObject response metadata is supported so that the WriteGetObjectResponse +// caller, typically an AWS Lambda function, can provide the same metadata when +// it internally invokes GetObject. When WriteGetObjectResponse is called by +// a customer-owned Lambda function, the metadata returned to the end user GetObject +// call might differ from what Amazon S3 would normally return. +// +// AWS provides some prebuilt Lambda functions that you can use with S3 Object +// Lambda to detect and redact personally identifiable information (PII) and +// decompress S3 objects. These Lambda functions are available in the AWS Serverless +// Application Repository, and can be selected through the AWS Management Console +// when you create your Object Lambda Access Point. +// +// Example 1: PII Access Control - This Lambda function uses Amazon Comprehend, +// a natural language processing (NLP) service using machine learning to find +// insights and relationships in text. It automatically detects personally identifiable +// information (PII) such as names, addresses, dates, credit card numbers, and +// social security numbers from documents in your Amazon S3 bucket. +// +// Example 2: PII Redaction - This Lambda function uses Amazon Comprehend, a +// natural language processing (NLP) service using machine learning to find +// insights and relationships in text. It automatically redacts personally identifiable +// information (PII) such as names, addresses, dates, credit card numbers, and +// social security numbers from documents in your Amazon S3 bucket. +// +// Example 3: Decompression - The Lambda function S3ObjectLambdaDecompression, +// is equipped to decompress objects stored in S3 in one of six compressed file +// formats including bzip2, gzip, snappy, zlib, zstandard and ZIP. +// +// For information on how to view and use these functions, see Using AWS built +// Lambda functions (https://docs.aws.amazon.com/AmazonS3/latest/userguide/olap-examples.html) +// in the Amazon S3 User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation WriteGetObjectResponse for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/WriteGetObjectResponse +func (c *S3) WriteGetObjectResponse(input *WriteGetObjectResponseInput) (*WriteGetObjectResponseOutput, error) { + req, out := c.WriteGetObjectResponseRequest(input) + return out, req.Send() +} + +// WriteGetObjectResponseWithContext is the same as WriteGetObjectResponse with the addition of +// the ability to pass a context and additional request options. +// +// See WriteGetObjectResponse for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) WriteGetObjectResponseWithContext(ctx aws.Context, input *WriteGetObjectResponseInput, opts ...request.Option) (*WriteGetObjectResponseOutput, error) { + req, out := c.WriteGetObjectResponseRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + // Specifies the days since the initiation of an incomplete multipart upload // that Amazon S3 will wait before permanently removing all parts of the upload. // For more information, see Aborting Incomplete Multipart Uploads Using a Bucket // Lifecycle Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config) -// in the Amazon Simple Storage Service Developer Guide. +// in the Amazon S3 User Guide. type AbortIncompleteMultipartUpload struct { _ struct{} `type:"structure"` @@ -10842,25 +10993,25 @@ type AbortMultipartUploadInput struct { // The bucket name to which the upload was taking place. // - // When using this API with an access point, you must direct requests to the - // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation with an access point through the AWS SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) - // in the Amazon Simple Storage Service Developer Guide. + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. // - // When using this API with Amazon S3 on Outposts, you must direct requests + // When using this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this operation using S3 on Outposts through the AWS SDKs, you provide + // using this action using S3 on Outposts through the AWS SDKs, you provide // the Outposts bucket ARN in place of the bucket name. For more information - // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) - // in the Amazon Simple Storage Service Developer Guide. + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -11008,7 +11159,7 @@ func (s *AbortMultipartUploadOutput) SetRequestCharged(v string) *AbortMultipart // Configures the transfer acceleration state for an Amazon S3 bucket. For more // information, see Amazon S3 Transfer Acceleration (https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html) -// in the Amazon Simple Storage Service Developer Guide. +// in the Amazon S3 User Guide. type AccelerateConfiguration struct { _ struct{} `type:"structure"` @@ -11486,7 +11637,7 @@ func (s *Bucket) SetName(v string) *Bucket { // Specifies the lifecycle configuration for objects in an Amazon S3 bucket. // For more information, see Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html) -// in the Amazon Simple Storage Service Developer Guide. +// in the Amazon S3 User Guide. type BucketLifecycleConfiguration struct { _ struct{} `type:"structure"` @@ -11580,7 +11731,7 @@ func (s *BucketLoggingStatus) SetLoggingEnabled(v *LoggingEnabled) *BucketLoggin // Describes the cross-origin access configuration for objects in an Amazon // S3 bucket. For more information, see Enabling Cross-Origin Resource Sharing // (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) in the Amazon -// Simple Storage Service Developer Guide. +// S3 User Guide. type CORSConfiguration struct { _ struct{} `type:"structure"` @@ -11656,6 +11807,9 @@ type CORSRule struct { // object). ExposeHeaders []*string `locationName:"ExposeHeader" type:"list" flattened:"true"` + // Unique identifier for the rule. The value cannot be longer than 255 characters. + ID *string `type:"string"` + // The time in seconds that your browser is to cache the preflight response // for the specified resource. MaxAgeSeconds *int64 `type:"integer"` @@ -11711,6 +11865,12 @@ func (s *CORSRule) SetExposeHeaders(v []*string) *CORSRule { return s } +// SetID sets the ID field's value. +func (s *CORSRule) SetID(v string) *CORSRule { + s.ID = &v + return s +} + // SetMaxAgeSeconds sets the MaxAgeSeconds field's value. func (s *CORSRule) SetMaxAgeSeconds(v int64) *CORSRule { s.MaxAgeSeconds = &v @@ -11991,7 +12151,7 @@ type CompleteMultipartUploadInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -12127,20 +12287,20 @@ type CompleteMultipartUploadOutput struct { // The name of the bucket that contains the newly created object. // - // When using this API with an access point, you must direct requests to the - // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation with an access point through the AWS SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) - // in the Amazon Simple Storage Service Developer Guide. + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. // - // When using this API with Amazon S3 on Outposts, you must direct requests + // When using this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this operation using S3 on Outposts through the AWS SDKs, you provide + // using this action using S3 on Outposts through the AWS SDKs, you provide // the Outposts bucket ARN in place of the bucket name. For more information - // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) - // in the Amazon Simple Storage Service Developer Guide. + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. Bucket *string `type:"string"` // Indicates whether the multipart upload uses an S3 Bucket Key for server-side @@ -12341,6 +12501,10 @@ type Condition struct { // the parent element Condition is specified and sibling HttpErrorCodeReturnedEquals // is not specified. If both conditions are specified, both must be true for // the redirect to be applied. + // + // Replacement must be made for object keys containing special characters (such + // as carriage returns) when using XML requests. For more information, see XML + // related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). KeyPrefixEquals *string `type:"string"` } @@ -12409,20 +12573,20 @@ type CopyObjectInput struct { // The name of the destination bucket. // - // When using this API with an access point, you must direct requests to the - // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation with an access point through the AWS SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) - // in the Amazon Simple Storage Service Developer Guide. + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. // - // When using this API with Amazon S3 on Outposts, you must direct requests + // When using this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this operation using S3 on Outposts through the AWS SDKs, you provide + // using this action using S3 on Outposts through the AWS SDKs, you provide // the Outposts bucket ARN in place of the bucket name. For more information - // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) - // in the Amazon Simple Storage Service Developer Guide. + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -12432,8 +12596,8 @@ type CopyObjectInput struct { // to true causes Amazon S3 to use an S3 Bucket Key for object encryption with // SSE-KMS. // - // Specifying this header with a COPY operation doesn’t affect bucket-level - // settings for S3 Bucket Key. + // Specifying this header with a COPY action doesn’t affect bucket-level settings + // for S3 Bucket Key. BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` // Specifies caching behavior along the request/reply chain. @@ -12455,7 +12619,7 @@ type CopyObjectInput struct { // Specifies the source object for the copy operation. You specify the value // in one of two formats, depending on whether you want to access the source - // object through an access point (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-points.html): + // object through an access point (https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-points.html): // // * For objects not accessed through an access point, specify the name of // the source bucket and the key of the source object, separated by a slash @@ -12513,12 +12677,12 @@ type CopyObjectInput struct { // encryption key was transmitted without error. CopySourceSSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key-MD5" type:"string"` - // The account id of the expected destination bucket owner. If the destination + // The account ID of the expected destination bucket owner. If the destination // bucket is owned by a different account, the request will fail with an HTTP // 403 (Access Denied) error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` - // The account id of the expected source bucket owner. If the source bucket + // The account ID of the expected source bucket owner. If the source bucket // is owned by a different account, the request will fail with an HTTP 403 (Access // Denied) error. ExpectedSourceBucketOwner *string `location:"header" locationName:"x-amz-source-expected-bucket-owner" type:"string"` @@ -13083,10 +13247,10 @@ type CopyObjectResult struct { // Returns the ETag of the new object. The ETag reflects only changes to the // contents of an object, not its metadata. The source and destination ETag - // is identical for a successfully copied object. + // is identical for a successfully copied non-multipart object. ETag *string `type:"string"` - // Returns the date that the object was last modified. + // Creation date of the object. LastModified *time.Time `type:"timestamp"` } @@ -13326,20 +13490,20 @@ type CreateMultipartUploadInput struct { // The name of the bucket to which to initiate the upload // - // When using this API with an access point, you must direct requests to the - // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation with an access point through the AWS SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) - // in the Amazon Simple Storage Service Developer Guide. + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. // - // When using this API with Amazon S3 on Outposts, you must direct requests + // When using this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this operation using S3 on Outposts through the AWS SDKs, you provide + // using this action using S3 on Outposts through the AWS SDKs, you provide // the Outposts bucket ARN in place of the bucket name. For more information - // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) - // in the Amazon Simple Storage Service Developer Guide. + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -13349,7 +13513,7 @@ type CreateMultipartUploadInput struct { // to true causes Amazon S3 to use an S3 Bucket Key for object encryption with // SSE-KMS. // - // Specifying this header with an object operation doesn’t affect bucket-level + // Specifying this header with an object action doesn’t affect bucket-level // settings for S3 Bucket Key. BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` @@ -13370,7 +13534,7 @@ type CreateMultipartUploadInput struct { // A standard MIME type describing the format of the object data. ContentType *string `location:"header" locationName:"Content-Type" type:"string"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -13447,7 +13611,7 @@ type CreateMultipartUploadInput struct { // object encryption. All GET and PUT requests for an object protected by AWS // KMS will fail if not made via SSL or using SigV4. For information about configuring // using any of the officially supported AWS SDKs and AWS CLI, see Specifying - // the Signature Version in Request Authentication (https://docs.aws.amazon.com/http:/docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version) + // the Signature Version in Request Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version) // in the Amazon S3 Developer Guide. SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` @@ -13740,20 +13904,20 @@ type CreateMultipartUploadOutput struct { // The name of the bucket to which the multipart upload was initiated. // - // When using this API with an access point, you must direct requests to the - // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation with an access point through the AWS SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) - // in the Amazon Simple Storage Service Developer Guide. + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. // - // When using this API with Amazon S3 on Outposts, you must direct requests + // When using this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this operation using S3 on Outposts through the AWS SDKs, you provide + // using this action using S3 on Outposts through the AWS SDKs, you provide // the Outposts bucket ARN in place of the bucket name. For more information - // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) - // in the Amazon Simple Storage Service Developer Guide. + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. Bucket *string `locationName:"Bucket" type:"string"` // Indicates whether the multipart upload uses an S3 Bucket Key for server-side @@ -13886,17 +14050,24 @@ func (s *CreateMultipartUploadOutput) SetUploadId(v string) *CreateMultipartUplo // The container element for specifying the default Object Lock retention settings // for new objects placed in the specified bucket. +// +// * The DefaultRetention settings require both a mode and a period. +// +// * The DefaultRetention period can be either Days or Years but you must +// select one. You cannot specify Days and Years at the same time. type DefaultRetention struct { _ struct{} `type:"structure"` // The number of days that you want to specify for the default retention period. + // Must be used with Mode. Days *int64 `type:"integer"` // The default Object Lock retention mode you want to apply to new objects placed - // in the specified bucket. + // in the specified bucket. Must be used with either Days or Years. Mode *string `type:"string" enum:"ObjectLockRetentionMode"` // The number of years that you want to specify for the default retention period. + // Must be used with Mode. Years *int64 `type:"integer"` } @@ -13995,7 +14166,7 @@ type DeleteBucketAnalyticsConfigurationInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -14109,7 +14280,7 @@ type DeleteBucketCorsInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -14210,7 +14381,7 @@ type DeleteBucketEncryptionInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -14310,7 +14481,7 @@ type DeleteBucketInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -14500,7 +14671,7 @@ type DeleteBucketInventoryConfigurationInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -14614,7 +14785,7 @@ type DeleteBucketLifecycleInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -14714,7 +14885,7 @@ type DeleteBucketMetricsConfigurationInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -14842,7 +15013,7 @@ type DeleteBucketOwnershipControlsInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -14942,7 +15113,7 @@ type DeleteBucketPolicyInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -15042,7 +15213,7 @@ type DeleteBucketReplicationInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -15142,7 +15313,7 @@ type DeleteBucketTaggingInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -15242,7 +15413,7 @@ type DeleteBucketWebsiteInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -15438,20 +15609,20 @@ type DeleteObjectInput struct { // The bucket name of the bucket containing the object. // - // When using this API with an access point, you must direct requests to the - // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation with an access point through the AWS SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) - // in the Amazon Simple Storage Service Developer Guide. + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. // - // When using this API with Amazon S3 on Outposts, you must direct requests + // When using this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this operation using S3 on Outposts through the AWS SDKs, you provide + // using this action using S3 on Outposts through the AWS SDKs, you provide // the Outposts bucket ARN in place of the bucket name. For more information - // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) - // in the Amazon Simple Storage Service Developer Guide. + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -15460,7 +15631,7 @@ type DeleteObjectInput struct { // to process this operation. BypassGovernanceRetention *bool `location:"header" locationName:"x-amz-bypass-governance-retention" type:"boolean"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -15644,30 +15815,31 @@ type DeleteObjectTaggingInput struct { // The bucket name containing the objects from which to remove the tags. // - // When using this API with an access point, you must direct requests to the - // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation with an access point through the AWS SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) - // in the Amazon Simple Storage Service Developer Guide. + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. // - // When using this API with Amazon S3 on Outposts, you must direct requests + // When using this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this operation using S3 on Outposts through the AWS SDKs, you provide + // using this action using S3 on Outposts through the AWS SDKs, you provide // the Outposts bucket ARN in place of the bucket name. For more information - // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) - // in the Amazon Simple Storage Service Developer Guide. + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` - // Name of the object key. + // The key that identifies the object in the bucket from which to remove all + // tags. // // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` @@ -15794,20 +15966,20 @@ type DeleteObjectsInput struct { // The bucket name containing the objects to delete. // - // When using this API with an access point, you must direct requests to the - // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation with an access point through the AWS SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) - // in the Amazon Simple Storage Service Developer Guide. + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. // - // When using this API with Amazon S3 on Outposts, you must direct requests + // When using this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this operation using S3 on Outposts through the AWS SDKs, you provide + // using this action using S3 on Outposts through the AWS SDKs, you provide // the Outposts bucket ARN in place of the bucket name. For more information - // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) - // in the Amazon Simple Storage Service Developer Guide. + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -15822,7 +15994,7 @@ type DeleteObjectsInput struct { // Delete is a required field Delete *Delete `locationName:"Delete" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -15952,7 +16124,7 @@ type DeleteObjectsOutput struct { // was successfully deleted. Deleted []*DeletedObject `type:"list" flattened:"true"` - // Container for a failed delete operation that describes the object that Amazon + // Container for a failed delete action that describes the object that Amazon // S3 attempted to delete and the error it encountered. Errors []*Error `locationName:"Error" type:"list" flattened:"true"` @@ -15997,7 +16169,7 @@ type DeletePublicAccessBlockInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -16423,9 +16595,9 @@ type Error struct { // Forbidden SOAP Fault Code Prefix: Client // // * Code: AccountProblem Description: There is a problem with your AWS account - // that prevents the operation from completing successfully. Contact AWS - // Support for further assistance. HTTP Status Code: 403 Forbidden SOAP Fault - // Code Prefix: Client + // that prevents the action from completing successfully. Contact AWS Support + // for further assistance. HTTP Status Code: 403 Forbidden SOAP Fault Code + // Prefix: Client // // * Code: AllAccessDisabled Description: All access to this Amazon S3 resource // has been disabled. Contact AWS Support for further assistance. HTTP Status @@ -16528,9 +16700,9 @@ type Error struct { // Select a Region for Your Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro). // HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client // - // * Code: InvalidObjectState Description: The operation is not valid for - // the current state of the object. HTTP Status Code: 403 Forbidden SOAP - // Fault Code Prefix: Client + // * Code: InvalidObjectState Description: The action is not valid for the + // current state of the object. HTTP Status Code: 403 Forbidden SOAP Fault + // Code Prefix: Client // // * Code: InvalidPart Description: One or more of the specified parts could // not be found. The part might not have been uploaded, or the specified @@ -16695,7 +16867,7 @@ type Error struct { // can sign up at the following URL: https://aws.amazon.com/s3 HTTP Status // Code: 403 Forbidden SOAP Fault Code Prefix: Client // - // * Code: OperationAborted Description: A conflicting conditional operation + // * Code: OperationAborted Description: A conflicting conditional action // is currently in progress against this resource. Try again. HTTP Status // Code: 409 Conflict SOAP Fault Code Prefix: Client // @@ -16821,6 +16993,10 @@ type ErrorDocument struct { // The object key name to use when a 4XX class error occurs. // + // Replacement must be made for object keys containing special characters (such + // as carriage returns) when using XML requests. For more information, see XML + // related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). + // // Key is a required field Key *string `min:"1" type:"string" required:"true"` } @@ -16905,7 +17081,7 @@ type FilterRule struct { // the filtering rule applies. The maximum length is 1,024 characters. Overlapping // prefixes and suffixes are not supported. For more information, see Configuring // Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) - // in the Amazon Simple Storage Service Developer Guide. + // in the Amazon S3 User Guide. Name *string `type:"string" enum:"FilterRuleName"` // The value that the filter searches for in object key names. @@ -16942,7 +17118,7 @@ type GetBucketAccelerateConfigurationInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -17051,7 +17227,7 @@ type GetBucketAclInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -17169,7 +17345,7 @@ type GetBucketAnalyticsConfigurationInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -17292,7 +17468,7 @@ type GetBucketCorsInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -17403,7 +17579,7 @@ type GetBucketEncryptionInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -17625,7 +17801,7 @@ type GetBucketInventoryConfigurationInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -17748,7 +17924,7 @@ type GetBucketLifecycleConfigurationInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -17857,7 +18033,7 @@ type GetBucketLifecycleInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -17966,7 +18142,7 @@ type GetBucketLocationInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -18077,7 +18253,7 @@ type GetBucketLoggingInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -18189,7 +18365,7 @@ type GetBucketMetricsConfigurationInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -18312,7 +18488,7 @@ type GetBucketNotificationConfigurationRequest struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -18398,7 +18574,7 @@ type GetBucketOwnershipControlsInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -18508,7 +18684,7 @@ type GetBucketPolicyInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -18617,7 +18793,7 @@ type GetBucketPolicyStatusInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -18726,7 +18902,7 @@ type GetBucketReplicationInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -18836,7 +19012,7 @@ type GetBucketRequestPaymentInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -18945,7 +19121,7 @@ type GetBucketTaggingInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -19056,7 +19232,7 @@ type GetBucketVersioningInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -19176,7 +19352,7 @@ type GetBucketWebsiteInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -19310,17 +19486,17 @@ type GetObjectAclInput struct { // The bucket name that contains the object for which to get the ACL information. // - // When using this API with an access point, you must direct requests to the - // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation with an access point through the AWS SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) - // in the Amazon Simple Storage Service Developer Guide. + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -19484,25 +19660,25 @@ type GetObjectInput struct { // The bucket name containing the object. // - // When using this API with an access point, you must direct requests to the - // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation with an access point through the AWS SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) - // in the Amazon Simple Storage Service Developer Guide. + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. // - // When using this API with Amazon S3 on Outposts, you must direct requests + // When using this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this operation using S3 on Outposts through the AWS SDKs, you provide + // using this action using S3 on Outposts through the AWS SDKs, you provide // the Outposts bucket ARN in place of the bucket name. For more information - // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) - // in the Amazon Simple Storage Service Developer Guide. + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -19565,14 +19741,14 @@ type GetObjectInput struct { // Sets the Expires header of the response. ResponseExpires *time.Time `location:"querystring" locationName:"response-expires" type:"timestamp" timestampFormat:"rfc822"` - // Specifies the algorithm to use to when encrypting the object (for example, + // Specifies the algorithm to use to when decrypting the object (for example, // AES256). SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` - // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting - // data. This value is used to store the object and then it is discarded; Amazon - // S3 does not store the encryption key. The key must be appropriate for use - // with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm + // Specifies the customer-provided encryption key for Amazon S3 used to encrypt + // the data. This value is used to decrypt the object when recovering it and + // must match the one used when storing the data. The key must be appropriate + // for use with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm // header. SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` @@ -19784,17 +19960,17 @@ type GetObjectLegalHoldInput struct { // The bucket name containing the object whose Legal Hold status you want to // retrieve. // - // When using this API with an access point, you must direct requests to the - // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation with an access point through the AWS SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) - // in the Amazon Simple Storage Service Developer Guide. + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -19939,17 +20115,17 @@ type GetObjectLockConfigurationInput struct { // The bucket whose Object Lock configuration you want to retrieve. // - // When using this API with an access point, you must direct requests to the - // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation with an access point through the AWS SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) - // in the Amazon Simple Storage Service Developer Guide. + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -20103,7 +20279,7 @@ type GetObjectOutput struct { // The date and time at which the object is no longer cacheable. Expires *string `location:"header" locationName:"Expires" type:"string"` - // Last modified date of the object + // Creation date of the object. LastModified *time.Time `location:"header" locationName:"Last-Modified" type:"timestamp"` // A map of metadata to store with the object in S3. @@ -20140,7 +20316,7 @@ type GetObjectOutput struct { // request. RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` - // Provides information about object restoration operation and expiration time + // Provides information about object restoration action and expiration time // of the restored object copy. Restore *string `location:"header" locationName:"x-amz-restore" type:"string"` @@ -20387,17 +20563,17 @@ type GetObjectRetentionInput struct { // The bucket name containing the object whose retention settings you want to // retrieve. // - // When using this API with an access point, you must direct requests to the - // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation with an access point through the AWS SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) - // in the Amazon Simple Storage Service Developer Guide. + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -20542,25 +20718,25 @@ type GetObjectTaggingInput struct { // The bucket name containing the object for which to get the tagging information. // - // When using this API with an access point, you must direct requests to the - // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation with an access point through the AWS SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) - // in the Amazon Simple Storage Service Developer Guide. + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. // - // When using this API with Amazon S3 on Outposts, you must direct requests + // When using this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this operation using S3 on Outposts through the AWS SDKs, you provide + // using this action using S3 on Outposts through the AWS SDKs, you provide // the Outposts bucket ARN in place of the bucket name. For more information - // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) - // in the Amazon Simple Storage Service Developer Guide. + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -20570,6 +20746,13 @@ type GetObjectTaggingInput struct { // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + // The versionId of the object for which to get the tagging information. VersionId *string `location:"querystring" locationName:"versionId" type:"string"` } @@ -20631,6 +20814,12 @@ func (s *GetObjectTaggingInput) SetKey(v string) *GetObjectTaggingInput { return s } +// SetRequestPayer sets the RequestPayer field's value. +func (s *GetObjectTaggingInput) SetRequestPayer(v string) *GetObjectTaggingInput { + s.RequestPayer = &v + return s +} + // SetVersionId sets the VersionId field's value. func (s *GetObjectTaggingInput) SetVersionId(v string) *GetObjectTaggingInput { s.VersionId = &v @@ -20707,7 +20896,7 @@ type GetObjectTorrentInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -20857,7 +21046,7 @@ type GetPublicAccessBlockInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -21149,25 +21338,25 @@ type HeadBucketInput struct { // The bucket name. // - // When using this API with an access point, you must direct requests to the - // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation with an access point through the AWS SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) - // in the Amazon Simple Storage Service Developer Guide. + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. // - // When using this API with Amazon S3 on Outposts, you must direct requests + // When using this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this operation using S3 on Outposts through the AWS SDKs, you provide + // using this action using S3 on Outposts through the AWS SDKs, you provide // the Outposts bucket ARN in place of the bucket name. For more information - // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) - // in the Amazon Simple Storage Service Developer Guide. + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -21264,25 +21453,25 @@ type HeadObjectInput struct { // The name of the bucket containing the object. // - // When using this API with an access point, you must direct requests to the - // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation with an access point through the AWS SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) - // in the Amazon Simple Storage Service Developer Guide. + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. // - // When using this API with Amazon S3 on Outposts, you must direct requests + // When using this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this operation using S3 on Outposts through the AWS SDKs, you provide + // using this action using S3 on Outposts through the AWS SDKs, you provide // the Outposts bucket ARN in place of the bucket name. For more information - // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) - // in the Amazon Simple Storage Service Developer Guide. + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -21555,7 +21744,7 @@ type HeadObjectOutput struct { // The date and time at which the object is no longer cacheable. Expires *string `location:"header" locationName:"Expires" type:"string"` - // Last modified date of the object + // Creation date of the object. LastModified *time.Time `location:"header" locationName:"Last-Modified" type:"timestamp"` // A map of metadata to store with the object in S3. @@ -21636,7 +21825,7 @@ type HeadObjectOutput struct { // If an archive copy is already restored, the header value indicates when Amazon // S3 is scheduled to delete the object copy. For example: // - // x-amz-restore: ongoing-request="false", expiry-date="Fri, 23 Dec 2012 00:00:00 + // x-amz-restore: ongoing-request="false", expiry-date="Fri, 21 Dec 2012 00:00:00 // GMT" // // If the object restoration is in progress, the header returns the value ongoing-request="true". @@ -21881,6 +22070,10 @@ type IndexDocument struct { // with the key name images/index.html) The suffix must not be empty and must // not include a slash character. // + // Replacement must be made for object keys containing special characters (such + // as carriage returns) when using XML requests. For more information, see XML + // related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). + // // Suffix is a required field Suffix *string `type:"string" required:"true"` } @@ -22164,6 +22357,10 @@ type IntelligentTieringFilter struct { // An object key name prefix that identifies the subset of objects to which // the rule applies. + // + // Replacement must be made for object keys containing special characters (such + // as carriage returns) when using XML requests. For more information, see XML + // related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). Prefix *string `type:"string"` // A container of a key value name pair. @@ -22705,14 +22902,14 @@ type LambdaFunctionConfiguration struct { // The Amazon S3 bucket event for which to invoke the AWS Lambda function. For // more information, see Supported Event Types (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) - // in the Amazon Simple Storage Service Developer Guide. + // in the Amazon S3 User Guide. // // Events is a required field Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true"` // Specifies object key name filtering rules. For information about key name // filtering, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) - // in the Amazon Simple Storage Service Developer Guide. + // in the Amazon S3 User Guide. Filter *NotificationConfigurationFilter `type:"structure"` // An optional unique identifier for configurations in a notification configuration. @@ -22880,7 +23077,7 @@ type LifecycleRule struct { // that Amazon S3 will wait before permanently removing all parts of the upload. // For more information, see Aborting Incomplete Multipart Uploads Using a Bucket // Lifecycle Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config) - // in the Amazon Simple Storage Service Developer Guide. + // in the Amazon S3 User Guide. AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload `type:"structure"` // Specifies the expiration for the lifecycle of the object in the form of date, @@ -22888,7 +23085,8 @@ type LifecycleRule struct { Expiration *LifecycleExpiration `type:"structure"` // The Filter is used to identify objects that a Lifecycle Rule applies to. - // A Filter must have exactly one of Prefix, Tag, or And specified. + // A Filter must have exactly one of Prefix, Tag, or And specified. Filter is + // required if the LifecycleRule does not containt a Prefix element. Filter *LifecycleRuleFilter `type:"structure"` // Unique identifier for the rule. The value cannot be longer than 255 characters. @@ -22909,7 +23107,11 @@ type LifecycleRule struct { NoncurrentVersionTransitions []*NoncurrentVersionTransition `locationName:"NoncurrentVersionTransition" type:"list" flattened:"true"` // Prefix identifying one or more objects to which the rule applies. This is - // No longer used; use Filter instead. + // no longer used; use Filter instead. + // + // Replacement must be made for object keys containing special characters (such + // as carriage returns) when using XML requests. For more information, see XML + // related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). // // Deprecated: Prefix has been deprecated Prefix *string `deprecated:"true" type:"string"` @@ -23073,6 +23275,10 @@ type LifecycleRuleFilter struct { And *LifecycleRuleAndOperator `type:"structure"` // Prefix identifying one or more objects to which the rule applies. + // + // Replacement must be made for object keys containing special characters (such + // as carriage returns) when using XML requests. For more information, see XML + // related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). Prefix *string `type:"string"` // This tag must exist in the object's tag set in order for the rule to apply. @@ -23139,7 +23345,7 @@ type ListBucketAnalyticsConfigurationsInput struct { // should begin. ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -23433,7 +23639,7 @@ type ListBucketInventoryConfigurationsInput struct { // that Amazon S3 understands. ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -23586,7 +23792,7 @@ type ListBucketMetricsConfigurationsInput struct { // value that Amazon S3 understands. ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -23777,20 +23983,20 @@ type ListMultipartUploadsInput struct { // The name of the bucket to which the multipart upload was initiated. // - // When using this API with an access point, you must direct requests to the - // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation with an access point through the AWS SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) - // in the Amazon Simple Storage Service Developer Guide. + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. // - // When using this API with Amazon S3 on Outposts, you must direct requests + // When using this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this operation using S3 on Outposts through the AWS SDKs, you provide + // using this action using S3 on Outposts through the AWS SDKs, you provide // the Outposts bucket ARN in place of the bucket name. For more information - // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) - // in the Amazon Simple Storage Service Developer Guide. + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -23813,7 +24019,7 @@ type ListMultipartUploadsInput struct { // keys in the response. EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -24125,7 +24331,7 @@ type ListObjectVersionsInput struct { // keys in the response. EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -24134,7 +24340,7 @@ type ListObjectVersionsInput struct { KeyMarker *string `location:"querystring" locationName:"key-marker" type:"string"` // Sets the maximum number of keys returned in the response. By default the - // API returns up to 1,000 key names. The response might contain fewer keys + // action returns up to 1,000 key names. The response might contain fewer keys // but will never contain more. If additional keys satisfy the search criteria, // but were not returned because max-keys was exceeded, the response contains // true. To return the additional keys, see key-marker @@ -24417,20 +24623,20 @@ type ListObjectsInput struct { // The name of the bucket containing the objects. // - // When using this API with an access point, you must direct requests to the - // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation with an access point through the AWS SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) - // in the Amazon Simple Storage Service Developer Guide. + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. // - // When using this API with Amazon S3 on Outposts, you must direct requests + // When using this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this operation using S3 on Outposts through the AWS SDKs, you provide + // using this action using S3 on Outposts through the AWS SDKs, you provide // the Outposts bucket ARN in place of the bucket name. For more information - // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) - // in the Amazon Simple Storage Service Developer Guide. + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -24446,7 +24652,7 @@ type ListObjectsInput struct { // keys in the response. EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -24455,7 +24661,7 @@ type ListObjectsInput struct { Marker *string `location:"querystring" locationName:"marker" type:"string"` // Sets the maximum number of keys returned in the response. By default the - // API returns up to 1,000 key names. The response might contain fewer keys + // action returns up to 1,000 key names. The response might contain fewer keys // but will never contain more. MaxKeys *int64 `location:"querystring" locationName:"max-keys" type:"integer"` @@ -24579,8 +24785,8 @@ func (s ListObjectsInput) updateArnableField(v string) (interface{}, error) { type ListObjectsOutput struct { _ struct{} `type:"structure"` - // All of the keys rolled up in a common prefix count as a single return when - // calculating the number of returns. + // All of the keys (up to 1,000) rolled up in a common prefix count as a single + // return when calculating the number of returns. // // A response can contain CommonPrefixes only if you specify a delimiter. // @@ -24711,20 +24917,20 @@ type ListObjectsV2Input struct { // Bucket name to list. // - // When using this API with an access point, you must direct requests to the - // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation with an access point through the AWS SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) - // in the Amazon Simple Storage Service Developer Guide. + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. // - // When using this API with Amazon S3 on Outposts, you must direct requests + // When using this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this operation using S3 on Outposts through the AWS SDKs, you provide + // using this action using S3 on Outposts through the AWS SDKs, you provide // the Outposts bucket ARN in place of the bucket name. For more information - // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) - // in the Amazon Simple Storage Service Developer Guide. + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -24740,7 +24946,7 @@ type ListObjectsV2Input struct { // Encoding type used by Amazon S3 to encode object keys in the response. EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -24751,7 +24957,7 @@ type ListObjectsV2Input struct { FetchOwner *bool `location:"querystring" locationName:"fetch-owner" type:"boolean"` // Sets the maximum number of keys returned in the response. By default the - // API returns up to 1,000 key names. The response might contain fewer keys + // action returns up to 1,000 key names. The response might contain fewer keys // but will never contain more. MaxKeys *int64 `location:"querystring" locationName:"max-keys" type:"integer"` @@ -24891,8 +25097,8 @@ func (s ListObjectsV2Input) updateArnableField(v string) (interface{}, error) { type ListObjectsV2Output struct { _ struct{} `type:"structure"` - // All of the keys rolled up into a common prefix count as a single return when - // calculating the number of returns. + // All of the keys (up to 1,000) rolled up into a common prefix count as a single + // return when calculating the number of returns. // // A response can contain CommonPrefixes only if you specify a delimiter. // @@ -24936,31 +25142,31 @@ type ListObjectsV2Output struct { IsTruncated *bool `type:"boolean"` // KeyCount is the number of keys returned with this request. KeyCount will - // always be less than equals to MaxKeys field. Say you ask for 50 keys, your - // result will include less than equals 50 keys + // always be less than or equals to MaxKeys field. Say you ask for 50 keys, + // your result will include less than equals 50 keys KeyCount *int64 `type:"integer"` // Sets the maximum number of keys returned in the response. By default the - // API returns up to 1,000 key names. The response might contain fewer keys + // action returns up to 1,000 key names. The response might contain fewer keys // but will never contain more. MaxKeys *int64 `type:"integer"` // The bucket name. // - // When using this API with an access point, you must direct requests to the - // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation with an access point through the AWS SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) - // in the Amazon Simple Storage Service Developer Guide. + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. // - // When using this API with Amazon S3 on Outposts, you must direct requests + // When using this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this operation using S3 on Outposts through the AWS SDKs, you provide + // using this action using S3 on Outposts through the AWS SDKs, you provide // the Outposts bucket ARN in place of the bucket name. For more information - // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) - // in the Amazon Simple Storage Service Developer Guide. + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. Name *string `type:"string"` // NextContinuationToken is sent when isTruncated is true, which means there @@ -25063,25 +25269,25 @@ type ListPartsInput struct { // The name of the bucket to which the parts are being uploaded. // - // When using this API with an access point, you must direct requests to the - // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation with an access point through the AWS SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) - // in the Amazon Simple Storage Service Developer Guide. + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. // - // When using this API with Amazon S3 on Outposts, you must direct requests + // When using this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this operation using S3 on Outposts through the AWS SDKs, you provide + // using this action using S3 on Outposts through the AWS SDKs, you provide // the Outposts bucket ARN in place of the bucket name. For more information - // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) - // in the Amazon Simple Storage Service Developer Guide. + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -25983,7 +26189,7 @@ type NoncurrentVersionTransition struct { // perform the associated action. For information about the noncurrent days // calculations, see How Amazon S3 Calculates How Long an Object Has Been Noncurrent // (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#non-current-days-calculations) - // in the Amazon Simple Storage Service Developer Guide. + // in the Amazon S3 User Guide. NoncurrentDays *int64 `type:"integer"` // The class of storage used to store the object. @@ -26145,7 +26351,7 @@ func (s *NotificationConfigurationDeprecated) SetTopicConfiguration(v *TopicConf // Specifies object key name filtering rules. For information about key name // filtering, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) -// in the Amazon Simple Storage Service Developer Guide. +// in the Amazon S3 User Guide. type NotificationConfigurationFilter struct { _ struct{} `type:"structure"` @@ -26195,7 +26401,7 @@ type Object struct { // the object. Key *string `min:"1" type:"string"` - // The date the Object was Last Modified + // Creation date of the object. LastModified *time.Time `type:"timestamp"` // The owner of the object @@ -26258,7 +26464,11 @@ func (s *Object) SetStorageClass(v string) *Object { type ObjectIdentifier struct { _ struct{} `type:"structure"` - // Key name of the object to delete. + // Key name of the object. + // + // Replacement must be made for object keys containing special characters (such + // as carriage returns) when using XML requests. For more information, see XML + // related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). // // Key is a required field Key *string `min:"1" type:"string" required:"true"` @@ -26309,10 +26519,14 @@ func (s *ObjectIdentifier) SetVersionId(v string) *ObjectIdentifier { type ObjectLockConfiguration struct { _ struct{} `type:"structure"` - // Indicates whether this bucket has an Object Lock configuration enabled. + // Indicates whether this bucket has an Object Lock configuration enabled. Enable + // ObjectLockEnabled when you apply ObjectLockConfiguration to a bucket. ObjectLockEnabled *string `type:"string" enum:"ObjectLockEnabled"` - // The Object Lock rule in place for the specified object. + // Specifies the Object Lock rule for the specified object. Enable the this + // rule when you apply ObjectLockConfiguration to a bucket. Bucket settings + // require both a mode and a period. The period can be either Days or Years + // but you must select one. You cannot specify Days and Years at the same time. Rule *ObjectLockRule `type:"structure"` } @@ -26399,8 +26613,10 @@ func (s *ObjectLockRetention) SetRetainUntilDate(v time.Time) *ObjectLockRetenti type ObjectLockRule struct { _ struct{} `type:"structure"` - // The default retention period that you want to apply to new objects placed - // in the specified bucket. + // The default Object Lock retention mode and period that you want to apply + // to new objects placed in the specified bucket. Bucket settings require both + // a mode and a period. The period can be either Days or Years but you must + // select one. You cannot specify Days and Years at the same time. DefaultRetention *DefaultRetention `type:"structure"` } @@ -26899,7 +27115,7 @@ func (s *ProgressEvent) MarshalEvent(pm protocol.PayloadMarshaler) (msg eventstr // S3 bucket. You can enable the configuration options in any combination. For // more information about when Amazon S3 considers a bucket or object public, // see The Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status) -// in the Amazon Simple Storage Service Developer Guide. +// in the Amazon S3 User Guide. type PublicAccessBlockConfiguration struct { _ struct{} `type:"structure"` @@ -26990,7 +27206,7 @@ type PutBucketAccelerateConfigurationInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -27105,7 +27321,7 @@ type PutBucketAclInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -27273,7 +27489,7 @@ type PutBucketAnalyticsConfigurationInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -27404,12 +27620,12 @@ type PutBucketCorsInput struct { // Describes the cross-origin access configuration for objects in an Amazon // S3 bucket. For more information, see Enabling Cross-Origin Resource Sharing // (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) in the Amazon - // Simple Storage Service Developer Guide. + // S3 User Guide. // // CORSConfiguration is a required field CORSConfiguration *CORSConfiguration `locationName:"CORSConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -27522,12 +27738,12 @@ type PutBucketEncryptionInput struct { // Amazon S3-managed keys (SSE-S3) or customer master keys stored in AWS KMS // (SSE-KMS). For information about the Amazon S3 default encryption feature, // see Amazon S3 Default Bucket Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html) - // in the Amazon Simple Storage Service Developer Guide. + // in the Amazon S3 User Guide. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -27769,7 +27985,7 @@ type PutBucketInventoryConfigurationInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -27902,7 +28118,7 @@ type PutBucketLifecycleConfigurationInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -28014,7 +28230,7 @@ type PutBucketLifecycleInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -28133,7 +28349,7 @@ type PutBucketLoggingInput struct { // BucketLoggingStatus is a required field BucketLoggingStatus *BucketLoggingStatus `locationName:"BucketLoggingStatus" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -28247,7 +28463,7 @@ type PutBucketMetricsConfigurationInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -28380,7 +28596,7 @@ type PutBucketNotificationConfigurationInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -28500,7 +28716,7 @@ type PutBucketNotificationInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -28614,7 +28830,7 @@ type PutBucketOwnershipControlsInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -28738,7 +28954,7 @@ type PutBucketPolicyInput struct { // to change this bucket policy in the future. ConfirmRemoveSelfBucketAccess *bool `location:"header" locationName:"x-amz-confirm-remove-self-bucket-access" type:"boolean"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -28858,7 +29074,7 @@ type PutBucketReplicationInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -28987,7 +29203,7 @@ type PutBucketRequestPaymentInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -29106,7 +29322,7 @@ type PutBucketTaggingInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -29225,7 +29441,7 @@ type PutBucketVersioningInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -29349,7 +29565,7 @@ type PutBucketWebsiteInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -29473,17 +29689,17 @@ type PutObjectAclInput struct { // The bucket name that contains the object to which you want to attach the // ACL. // - // When using this API with an access point, you must direct requests to the - // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation with an access point through the AWS SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) - // in the Amazon Simple Storage Service Developer Guide. + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -29512,22 +29728,22 @@ type PutObjectAclInput struct { // This action is not supported by Amazon S3 on Outposts. GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` - // Key for which the PUT operation was initiated. + // Key for which the PUT action was initiated. // - // When using this API with an access point, you must direct requests to the - // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation with an access point through the AWS SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) - // in the Amazon Simple Storage Service Developer Guide. + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. // - // When using this API with Amazon S3 on Outposts, you must direct requests + // When using this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this operation using S3 on Outposts through the AWS SDKs, you provide + // using this action using S3 on Outposts through the AWS SDKs, you provide // the Outposts bucket ARN in place of the bucket name. For more information - // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) - // in the Amazon Simple Storage Service Developer Guide. + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. // // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` @@ -29722,22 +29938,22 @@ type PutObjectInput struct { // Object data. Body io.ReadSeeker `type:"blob"` - // The bucket name to which the PUT operation was initiated. + // The bucket name to which the PUT action was initiated. // - // When using this API with an access point, you must direct requests to the - // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation with an access point through the AWS SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) - // in the Amazon Simple Storage Service Developer Guide. + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. // - // When using this API with Amazon S3 on Outposts, you must direct requests + // When using this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this operation using S3 on Outposts through the AWS SDKs, you provide + // using this action using S3 on Outposts through the AWS SDKs, you provide // the Outposts bucket ARN in place of the bucket name. For more information - // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) - // in the Amazon Simple Storage Service Developer Guide. + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -29747,8 +29963,8 @@ type PutObjectInput struct { // to true causes Amazon S3 to use an S3 Bucket Key for object encryption with // SSE-KMS. // - // Specifying this header with a PUT operation doesn’t affect bucket-level - // settings for S3 Bucket Key. + // Specifying this header with a PUT action doesn’t affect bucket-level settings + // for S3 Bucket Key. BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` // Can be used to specify caching behavior along the request/reply chain. For @@ -29786,7 +30002,7 @@ type PutObjectInput struct { // see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.17 (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.17). ContentType *string `location:"header" locationName:"Content-Type" type:"string"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -29815,7 +30031,7 @@ type PutObjectInput struct { // This action is not supported by Amazon S3 on Outposts. GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` - // Object key for which the PUT operation was initiated. + // Object key for which the PUT action was initiated. // // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` @@ -30181,17 +30397,17 @@ type PutObjectLegalHoldInput struct { // The bucket name containing the object that you want to place a Legal Hold // on. // - // When using this API with an access point, you must direct requests to the - // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation with an access point through the AWS SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) - // in the Amazon Simple Storage Service Developer Guide. + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -30350,7 +30566,7 @@ type PutObjectLockConfigurationInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -30611,20 +30827,20 @@ type PutObjectRetentionInput struct { // The bucket name that contains the object you want to apply this Object Retention // configuration to. // - // When using this API with an access point, you must direct requests to the - // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation with an access point through the AWS SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) - // in the Amazon Simple Storage Service Developer Guide. + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // Indicates whether this operation should bypass Governance-mode restrictions. + // Indicates whether this action should bypass Governance-mode restrictions. BypassGovernanceRetention *bool `location:"header" locationName:"x-amz-bypass-governance-retention" type:"boolean"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -30787,25 +31003,25 @@ type PutObjectTaggingInput struct { // The bucket name containing the object. // - // When using this API with an access point, you must direct requests to the - // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation with an access point through the AWS SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) - // in the Amazon Simple Storage Service Developer Guide. + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. // - // When using this API with Amazon S3 on Outposts, you must direct requests + // When using this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this operation using S3 on Outposts through the AWS SDKs, you provide + // using this action using S3 on Outposts through the AWS SDKs, you provide // the Outposts bucket ARN in place of the bucket name. For more information - // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) - // in the Amazon Simple Storage Service Developer Guide. + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -30815,6 +31031,13 @@ type PutObjectTaggingInput struct { // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. For information + // about downloading objects from requester pays buckets, see Downloading Objects + // in Requestor Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 Developer Guide. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + // Container for the TagSet and Tag elements // // Tagging is a required field @@ -30889,6 +31112,12 @@ func (s *PutObjectTaggingInput) SetKey(v string) *PutObjectTaggingInput { return s } +// SetRequestPayer sets the RequestPayer field's value. +func (s *PutObjectTaggingInput) SetRequestPayer(v string) *PutObjectTaggingInput { + s.RequestPayer = &v + return s +} + // SetTagging sets the Tagging field's value. func (s *PutObjectTaggingInput) SetTagging(v *Tagging) *PutObjectTaggingInput { s.Tagging = v @@ -30960,7 +31189,7 @@ type PutPublicAccessBlockInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -30969,7 +31198,7 @@ type PutPublicAccessBlockInput struct { // S3 bucket. You can enable the configuration options in any combination. For // more information about when Amazon S3 considers a bucket or object public, // see The Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status) - // in the Amazon Simple Storage Service Developer Guide. + // in the Amazon S3 User Guide. // // PublicAccessBlockConfiguration is a required field PublicAccessBlockConfiguration *PublicAccessBlockConfiguration `locationName:"PublicAccessBlockConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` @@ -31082,7 +31311,7 @@ type QueueConfiguration struct { // Specifies object key name filtering rules. For information about key name // filtering, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) - // in the Amazon Simple Storage Service Developer Guide. + // in the Amazon S3 User Guide. Filter *NotificationConfigurationFilter `type:"structure"` // An optional unique identifier for configurations in a notification configuration. @@ -31158,7 +31387,7 @@ type QueueConfigurationDeprecated struct { // Deprecated: Event has been deprecated Event *string `deprecated:"true" type:"string" enum:"Event"` - // A collection of bucket events for which to send notifications + // A collection of bucket events for which to send notifications. Events []*string `locationName:"Event" type:"list" flattened:"true"` // An optional unique identifier for configurations in a notification configuration. @@ -31275,11 +31504,19 @@ type Redirect struct { // and in the Redirect set ReplaceKeyPrefixWith to /documents. Not required // if one of the siblings is present. Can be present only if ReplaceKeyWith // is not provided. + // + // Replacement must be made for object keys containing special characters (such + // as carriage returns) when using XML requests. For more information, see XML + // related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). ReplaceKeyPrefixWith *string `type:"string"` // The specific object key to use in the redirect request. For example, redirect // request to error.html. Not required if one of the siblings is present. Can // be present only if ReplaceKeyPrefixWith is not provided. + // + // Replacement must be made for object keys containing special characters (such + // as carriage returns) when using XML requests. For more information, see XML + // related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). ReplaceKeyWith *string `type:"string"` } @@ -31428,7 +31665,7 @@ type ReplicationConfiguration struct { // The Amazon Resource Name (ARN) of the AWS Identity and Access Management // (IAM) role that Amazon S3 assumes when replicating objects. For more information, // see How to Set Up Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-how-setup.html) - // in the Amazon Simple Storage Service Developer Guide. + // in the Amazon S3 User Guide. // // Role is a required field Role *string `type:"string" required:"true"` @@ -31529,6 +31766,10 @@ type ReplicationRule struct { // the rule applies. The maximum prefix length is 1,024 characters. To include // all objects in a bucket, specify an empty string. // + // Replacement must be made for object keys containing special characters (such + // as carriage returns) when using XML requests. For more information, see XML + // related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). + // // Deprecated: Prefix has been deprecated Prefix *string `deprecated:"true" type:"string"` @@ -31539,7 +31780,7 @@ type ReplicationRule struct { // with the highest priority. The higher the number, the higher the priority. // // For more information, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html) - // in the Amazon Simple Storage Service Developer Guide. + // in the Amazon S3 User Guide. Priority *int64 `type:"integer"` // A container that describes additional filters for identifying the source @@ -31665,7 +31906,7 @@ func (s *ReplicationRule) SetStatus(v string) *ReplicationRule { // an And tag. // // * If you specify a filter based on multiple tags, wrap the Tag elements -// in an And tag +// in an And tag. type ReplicationRuleAndOperator struct { _ struct{} `type:"structure"` @@ -31737,6 +31978,10 @@ type ReplicationRuleFilter struct { // An object key name prefix that identifies the subset of objects to which // the rule applies. + // + // Replacement must be made for object keys containing special characters (such + // as carriage returns) when using XML requests. For more information, see XML + // related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). Prefix *string `type:"string"` // A container for specifying a tag key and value. @@ -31946,30 +32191,30 @@ type RestoreObjectInput struct { // The bucket name containing the object to restore. // - // When using this API with an access point, you must direct requests to the - // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation with an access point through the AWS SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) - // in the Amazon Simple Storage Service Developer Guide. + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. // - // When using this API with Amazon S3 on Outposts, you must direct requests + // When using this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this operation using S3 on Outposts through the AWS SDKs, you provide + // using this action using S3 on Outposts through the AWS SDKs, you provide // the Outposts bucket ARN in place of the bucket name. For more information - // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) - // in the Amazon Simple Storage Service Developer Guide. + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` - // Object key for which the operation was initiated. + // Object key for which the action was initiated. // // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` @@ -32240,7 +32485,7 @@ func (s *RestoreRequest) SetType(v string) *RestoreRequest { // Specifies the redirect behavior and when a redirect is applied. For more // information about routing rules, see Configuring advanced conditional redirects // (https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html#advanced-conditional-redirects) -// in the Amazon Simple Storage Service Developer Guide. +// in the Amazon S3 User Guide. type RoutingRule struct { _ struct{} `type:"structure"` @@ -32296,7 +32541,7 @@ func (s *RoutingRule) SetRedirect(v *Redirect) *RoutingRule { // Specifies lifecycle rules for an Amazon S3 bucket. For more information, // see Put Bucket Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlifecycle.html) // in the Amazon Simple Storage Service API Reference. For examples, see Put -// Bucket Lifecycle Configuration Examples (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html#API_PutBucketLifecycleConfiguration_Examples) +// Bucket Lifecycle Configuration Examples (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html#API_PutBucketLifecycleConfiguration_Examples). type Rule struct { _ struct{} `type:"structure"` @@ -32304,7 +32549,7 @@ type Rule struct { // that Amazon S3 will wait before permanently removing all parts of the upload. // For more information, see Aborting Incomplete Multipart Uploads Using a Bucket // Lifecycle Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config) - // in the Amazon Simple Storage Service Developer Guide. + // in the Amazon S3 User Guide. AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload `type:"structure"` // Specifies the expiration for the lifecycle of the object. @@ -32332,6 +32577,10 @@ type Rule struct { // Object key prefix that identifies one or more objects to which this rule // applies. // + // Replacement must be made for object keys containing special characters (such + // as carriage returns) when using XML requests. For more information, see XML + // related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). + // // Prefix is a required field Prefix *string `type:"string" required:"true"` @@ -32344,7 +32593,7 @@ type Rule struct { // Specifies when an object transitions to a specified storage class. For more // information about Amazon S3 lifecycle configuration rules, see Transitioning // Objects Using Amazon S3 Lifecycle (https://docs.aws.amazon.com/AmazonS3/latest/dev/lifecycle-transition-general-considerations.html) - // in the Amazon Simple Storage Service Developer Guide. + // in the Amazon S3 User Guide. Transition *Transition `type:"structure"` } @@ -32703,7 +32952,7 @@ type SelectObjectContentInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -33168,7 +33417,7 @@ type ServerSideEncryptionRule struct { // S3 to use an S3 Bucket Key. By default, S3 Bucket Key is not enabled. // // For more information, see Amazon S3 Bucket Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html) - // in the Amazon Simple Storage Service Developer Guide. + // in the Amazon S3 User Guide. BucketKeyEnabled *bool `type:"boolean"` } @@ -33735,14 +33984,14 @@ type TopicConfiguration struct { // The Amazon S3 bucket event about which to send notifications. For more information, // see Supported Event Types (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) - // in the Amazon Simple Storage Service Developer Guide. + // in the Amazon S3 User Guide. // // Events is a required field Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true"` // Specifies object key name filtering rules. For information about key name // filtering, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) - // in the Amazon Simple Storage Service Developer Guide. + // in the Amazon S3 User Guide. Filter *NotificationConfigurationFilter `type:"structure"` // An optional unique identifier for configurations in a notification configuration. @@ -33868,7 +34117,7 @@ func (s *TopicConfigurationDeprecated) SetTopic(v string) *TopicConfigurationDep // Specifies when an object transitions to a specified storage class. For more // information about Amazon S3 lifecycle configuration rules, see Transitioning // Objects Using Amazon S3 Lifecycle (https://docs.aws.amazon.com/AmazonS3/latest/dev/lifecycle-transition-general-considerations.html) -// in the Amazon Simple Storage Service Developer Guide. +// in the Amazon S3 User Guide. type Transition struct { _ struct{} `type:"structure"` @@ -33917,27 +34166,27 @@ type UploadPartCopyInput struct { // The bucket name. // - // When using this API with an access point, you must direct requests to the - // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation with an access point through the AWS SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) - // in the Amazon Simple Storage Service Developer Guide. + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. // - // When using this API with Amazon S3 on Outposts, you must direct requests + // When using this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this operation using S3 on Outposts through the AWS SDKs, you provide + // using this action using S3 on Outposts through the AWS SDKs, you provide // the Outposts bucket ARN in place of the bucket name. For more information - // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) - // in the Amazon Simple Storage Service Developer Guide. + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` // Specifies the source object for the copy operation. You specify the value // in one of two formats, depending on whether you want to access the source - // object through an access point (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-points.html): + // object through an access point (https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-points.html): // // * For objects not accessed through an access point, specify the name of // the source bucket and key of the source object, separated by a slash (/). @@ -34001,12 +34250,12 @@ type UploadPartCopyInput struct { // encryption key was transmitted without error. CopySourceSSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key-MD5" type:"string"` - // The account id of the expected destination bucket owner. If the destination + // The account ID of the expected destination bucket owner. If the destination // bucket is owned by a different account, the request will fail with an HTTP // 403 (Access Denied) error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` - // The account id of the expected source bucket owner. If the source bucket + // The account ID of the expected source bucket owner. If the source bucket // is owned by a different account, the request will fail with an HTTP 403 (Access // Denied) error. ExpectedSourceBucketOwner *string `location:"header" locationName:"x-amz-source-expected-bucket-owner" type:"string"` @@ -34359,20 +34608,20 @@ type UploadPartInput struct { // The name of the bucket to which the multipart upload was initiated. // - // When using this API with an access point, you must direct requests to the - // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation with an access point through the AWS SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) - // in the Amazon Simple Storage Service Developer Guide. + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. // - // When using this API with Amazon S3 on Outposts, you must direct requests + // When using this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this operation using S3 on Outposts through the AWS SDKs, you provide + // using this action using S3 on Outposts through the AWS SDKs, you provide // the Outposts bucket ARN in place of the bucket name. For more information - // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) - // in the Amazon Simple Storage Service Developer Guide. + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -34386,7 +34635,7 @@ type UploadPartInput struct { // if object lock parameters are specified. ContentMD5 *string `location:"header" locationName:"Content-MD5" type:"string"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -34793,6 +35042,452 @@ func (s *WebsiteConfiguration) SetRoutingRules(v []*RoutingRule) *WebsiteConfigu return s } +type WriteGetObjectResponseInput struct { + _ struct{} `locationName:"WriteGetObjectResponseRequest" type:"structure" payload:"Body"` + + // Indicates that a range of bytes was specified. + AcceptRanges *string `location:"header" locationName:"x-amz-fwd-header-accept-ranges" type:"string"` + + // The object data. + // + // To use an non-seekable io.Reader for this request wrap the io.Reader with + // "aws.ReadSeekCloser". The SDK will not retry request errors for non-seekable + // readers. This will allow the SDK to send the reader's payload as chunked + // transfer encoding. + Body io.ReadSeeker `type:"blob"` + + // Indicates whether the object stored in Amazon S3 uses an S3 bucket key for + // server-side encryption with AWS KMS (SSE-KMS). + BucketKeyEnabled *bool `location:"header" locationName:"x-amz-fwd-header-x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` + + // Specifies caching behavior along the request/reply chain. + CacheControl *string `location:"header" locationName:"x-amz-fwd-header-Cache-Control" type:"string"` + + // Specifies presentational information for the object. + ContentDisposition *string `location:"header" locationName:"x-amz-fwd-header-Content-Disposition" type:"string"` + + // Specifies what content encodings have been applied to the object and thus + // what decoding mechanisms must be applied to obtain the media-type referenced + // by the Content-Type header field. + ContentEncoding *string `location:"header" locationName:"x-amz-fwd-header-Content-Encoding" type:"string"` + + // The language the content is in. + ContentLanguage *string `location:"header" locationName:"x-amz-fwd-header-Content-Language" type:"string"` + + // The size of the content body in bytes. + ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"` + + // The portion of the object returned in the response. + ContentRange *string `location:"header" locationName:"x-amz-fwd-header-Content-Range" type:"string"` + + // A standard MIME type describing the format of the object data. + ContentType *string `location:"header" locationName:"x-amz-fwd-header-Content-Type" type:"string"` + + // Specifies whether an object stored in Amazon S3 is (true) or is not (false) + // a delete marker. + DeleteMarker *bool `location:"header" locationName:"x-amz-fwd-header-x-amz-delete-marker" type:"boolean"` + + // An opaque identifier assigned by a web server to a specific version of a + // resource found at a URL. + ETag *string `location:"header" locationName:"x-amz-fwd-header-ETag" type:"string"` + + // A string that uniquely identifies an error condition. Returned in the + // tag of the error XML response for a corresponding GetObject call. Cannot + // be used with a successful StatusCode header or when the transformed object + // is provided in the body. All error codes from S3 are sentence-cased. Regex + // value is "^[A-Z][a-zA-Z]+$". + ErrorCode *string `location:"header" locationName:"x-amz-fwd-error-code" type:"string"` + + // Contains a generic description of the error condition. Returned in the + // tag of the error XML response for a corresponding GetObject call. Cannot + // be used with a successful StatusCode header or when the transformed object + // is provided in body. + ErrorMessage *string `location:"header" locationName:"x-amz-fwd-error-message" type:"string"` + + // If object stored in Amazon S3 expiration is configured (see PUT Bucket lifecycle) + // it includes expiry-date and rule-id key-value pairs providing object expiration + // information. The value of the rule-id is URL encoded. + Expiration *string `location:"header" locationName:"x-amz-fwd-header-x-amz-expiration" type:"string"` + + // The date and time at which the object is no longer cacheable. + Expires *time.Time `location:"header" locationName:"x-amz-fwd-header-Expires" type:"timestamp"` + + // The date and time that the object was last modified. + LastModified *time.Time `location:"header" locationName:"x-amz-fwd-header-Last-Modified" type:"timestamp"` + + // A map of metadata to store with the object in S3. + Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` + + // Set to the number of metadata entries not returned in x-amz-meta headers. + // This can happen if you create metadata using an API like SOAP that supports + // more flexible metadata than the REST API. For example, using SOAP, you can + // create metadata whose values are not legal HTTP headers. + MissingMeta *int64 `location:"header" locationName:"x-amz-fwd-header-x-amz-missing-meta" type:"integer"` + + // Indicates whether an object stored in Amazon S3 has an active legal hold. + ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-fwd-header-x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"` + + // Indicates whether an object stored in Amazon S3 has Object Lock enabled. + // For more information about S3 Object Lock, see Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lock.html). + ObjectLockMode *string `location:"header" locationName:"x-amz-fwd-header-x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"` + + // The date and time when Object Lock is configured to expire. + ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-fwd-header-x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"` + + // The count of parts this object has. + PartsCount *int64 `location:"header" locationName:"x-amz-fwd-header-x-amz-mp-parts-count" type:"integer"` + + // Indicates if request involves bucket that is either a source or destination + // in a Replication rule. For more information about S3 Replication, see Replication + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/replication.html). + ReplicationStatus *string `location:"header" locationName:"x-amz-fwd-header-x-amz-replication-status" type:"string" enum:"ReplicationStatus"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-fwd-header-x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // Route prefix to the HTTP URL generated. + // + // RequestRoute is a required field + RequestRoute *string `location:"header" locationName:"x-amz-request-route" type:"string" required:"true"` + + // A single use encrypted token that maps WriteGetObjectResponse to the end + // user GetObject request. + // + // RequestToken is a required field + RequestToken *string `location:"header" locationName:"x-amz-request-token" type:"string" required:"true"` + + // Provides information about object restoration operation and expiration time + // of the restored object copy. + Restore *string `location:"header" locationName:"x-amz-fwd-header-x-amz-restore" type:"string"` + + // Encryption algorithm used if server-side encryption with a customer-provided + // encryption key was specified for object stored in Amazon S3. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-fwd-header-x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // 128-bit MD5 digest of customer-provided encryption key used in Amazon S3 + // to encrypt data stored in S3. For more information, see Protecting data using + // server-side encryption with customer-provided encryption keys (SSE-C) (https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerSideEncryptionCustomerKeys.html). + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-fwd-header-x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, specifies the ID of the AWS Key Management Service (AWS KMS) + // symmetric customer managed customer master key (CMK) that was used for stored + // in Amazon S3 object. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-fwd-header-x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` + + // The server-side encryption algorithm used when storing requested object in + // Amazon S3 (for example, AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-fwd-header-x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // The integer status code for an HTTP response of a corresponding GetObject + // request. + // + // Status Codes + // + // * 200 - OK + // + // * 206 - Partial Content + // + // * 304 - Not Modified + // + // * 400 - Bad Request + // + // * 401 - Unauthorized + // + // * 403 - Forbidden + // + // * 404 - Not Found + // + // * 405 - Method Not Allowed + // + // * 409 - Conflict + // + // * 411 - Length Required + // + // * 412 - Precondition Failed + // + // * 416 - Range Not Satisfiable + // + // * 500 - Internal Server Error + // + // * 503 - Service Unavailable + StatusCode *int64 `location:"header" locationName:"x-amz-fwd-status" type:"integer"` + + // The class of storage used to store object in Amazon S3. + StorageClass *string `location:"header" locationName:"x-amz-fwd-header-x-amz-storage-class" type:"string" enum:"StorageClass"` + + // The number of tags, if any, on the object. + TagCount *int64 `location:"header" locationName:"x-amz-fwd-header-x-amz-tagging-count" type:"integer"` + + // An ID used to reference a specific version of the object. + VersionId *string `location:"header" locationName:"x-amz-fwd-header-x-amz-version-id" type:"string"` +} + +// String returns the string representation +func (s WriteGetObjectResponseInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s WriteGetObjectResponseInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *WriteGetObjectResponseInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "WriteGetObjectResponseInput"} + if s.RequestRoute == nil { + invalidParams.Add(request.NewErrParamRequired("RequestRoute")) + } + if s.RequestRoute != nil && len(*s.RequestRoute) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RequestRoute", 1)) + } + if s.RequestToken == nil { + invalidParams.Add(request.NewErrParamRequired("RequestToken")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAcceptRanges sets the AcceptRanges field's value. +func (s *WriteGetObjectResponseInput) SetAcceptRanges(v string) *WriteGetObjectResponseInput { + s.AcceptRanges = &v + return s +} + +// SetBody sets the Body field's value. +func (s *WriteGetObjectResponseInput) SetBody(v io.ReadSeeker) *WriteGetObjectResponseInput { + s.Body = v + return s +} + +// SetBucketKeyEnabled sets the BucketKeyEnabled field's value. +func (s *WriteGetObjectResponseInput) SetBucketKeyEnabled(v bool) *WriteGetObjectResponseInput { + s.BucketKeyEnabled = &v + return s +} + +// SetCacheControl sets the CacheControl field's value. +func (s *WriteGetObjectResponseInput) SetCacheControl(v string) *WriteGetObjectResponseInput { + s.CacheControl = &v + return s +} + +// SetContentDisposition sets the ContentDisposition field's value. +func (s *WriteGetObjectResponseInput) SetContentDisposition(v string) *WriteGetObjectResponseInput { + s.ContentDisposition = &v + return s +} + +// SetContentEncoding sets the ContentEncoding field's value. +func (s *WriteGetObjectResponseInput) SetContentEncoding(v string) *WriteGetObjectResponseInput { + s.ContentEncoding = &v + return s +} + +// SetContentLanguage sets the ContentLanguage field's value. +func (s *WriteGetObjectResponseInput) SetContentLanguage(v string) *WriteGetObjectResponseInput { + s.ContentLanguage = &v + return s +} + +// SetContentLength sets the ContentLength field's value. +func (s *WriteGetObjectResponseInput) SetContentLength(v int64) *WriteGetObjectResponseInput { + s.ContentLength = &v + return s +} + +// SetContentRange sets the ContentRange field's value. +func (s *WriteGetObjectResponseInput) SetContentRange(v string) *WriteGetObjectResponseInput { + s.ContentRange = &v + return s +} + +// SetContentType sets the ContentType field's value. +func (s *WriteGetObjectResponseInput) SetContentType(v string) *WriteGetObjectResponseInput { + s.ContentType = &v + return s +} + +// SetDeleteMarker sets the DeleteMarker field's value. +func (s *WriteGetObjectResponseInput) SetDeleteMarker(v bool) *WriteGetObjectResponseInput { + s.DeleteMarker = &v + return s +} + +// SetETag sets the ETag field's value. +func (s *WriteGetObjectResponseInput) SetETag(v string) *WriteGetObjectResponseInput { + s.ETag = &v + return s +} + +// SetErrorCode sets the ErrorCode field's value. +func (s *WriteGetObjectResponseInput) SetErrorCode(v string) *WriteGetObjectResponseInput { + s.ErrorCode = &v + return s +} + +// SetErrorMessage sets the ErrorMessage field's value. +func (s *WriteGetObjectResponseInput) SetErrorMessage(v string) *WriteGetObjectResponseInput { + s.ErrorMessage = &v + return s +} + +// SetExpiration sets the Expiration field's value. +func (s *WriteGetObjectResponseInput) SetExpiration(v string) *WriteGetObjectResponseInput { + s.Expiration = &v + return s +} + +// SetExpires sets the Expires field's value. +func (s *WriteGetObjectResponseInput) SetExpires(v time.Time) *WriteGetObjectResponseInput { + s.Expires = &v + return s +} + +// SetLastModified sets the LastModified field's value. +func (s *WriteGetObjectResponseInput) SetLastModified(v time.Time) *WriteGetObjectResponseInput { + s.LastModified = &v + return s +} + +// SetMetadata sets the Metadata field's value. +func (s *WriteGetObjectResponseInput) SetMetadata(v map[string]*string) *WriteGetObjectResponseInput { + s.Metadata = v + return s +} + +// SetMissingMeta sets the MissingMeta field's value. +func (s *WriteGetObjectResponseInput) SetMissingMeta(v int64) *WriteGetObjectResponseInput { + s.MissingMeta = &v + return s +} + +// SetObjectLockLegalHoldStatus sets the ObjectLockLegalHoldStatus field's value. +func (s *WriteGetObjectResponseInput) SetObjectLockLegalHoldStatus(v string) *WriteGetObjectResponseInput { + s.ObjectLockLegalHoldStatus = &v + return s +} + +// SetObjectLockMode sets the ObjectLockMode field's value. +func (s *WriteGetObjectResponseInput) SetObjectLockMode(v string) *WriteGetObjectResponseInput { + s.ObjectLockMode = &v + return s +} + +// SetObjectLockRetainUntilDate sets the ObjectLockRetainUntilDate field's value. +func (s *WriteGetObjectResponseInput) SetObjectLockRetainUntilDate(v time.Time) *WriteGetObjectResponseInput { + s.ObjectLockRetainUntilDate = &v + return s +} + +// SetPartsCount sets the PartsCount field's value. +func (s *WriteGetObjectResponseInput) SetPartsCount(v int64) *WriteGetObjectResponseInput { + s.PartsCount = &v + return s +} + +// SetReplicationStatus sets the ReplicationStatus field's value. +func (s *WriteGetObjectResponseInput) SetReplicationStatus(v string) *WriteGetObjectResponseInput { + s.ReplicationStatus = &v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *WriteGetObjectResponseInput) SetRequestCharged(v string) *WriteGetObjectResponseInput { + s.RequestCharged = &v + return s +} + +// SetRequestRoute sets the RequestRoute field's value. +func (s *WriteGetObjectResponseInput) SetRequestRoute(v string) *WriteGetObjectResponseInput { + s.RequestRoute = &v + return s +} + +// SetRequestToken sets the RequestToken field's value. +func (s *WriteGetObjectResponseInput) SetRequestToken(v string) *WriteGetObjectResponseInput { + s.RequestToken = &v + return s +} + +// SetRestore sets the Restore field's value. +func (s *WriteGetObjectResponseInput) SetRestore(v string) *WriteGetObjectResponseInput { + s.Restore = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *WriteGetObjectResponseInput) SetSSECustomerAlgorithm(v string) *WriteGetObjectResponseInput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *WriteGetObjectResponseInput) SetSSECustomerKeyMD5(v string) *WriteGetObjectResponseInput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *WriteGetObjectResponseInput) SetSSEKMSKeyId(v string) *WriteGetObjectResponseInput { + s.SSEKMSKeyId = &v + return s +} + +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *WriteGetObjectResponseInput) SetServerSideEncryption(v string) *WriteGetObjectResponseInput { + s.ServerSideEncryption = &v + return s +} + +// SetStatusCode sets the StatusCode field's value. +func (s *WriteGetObjectResponseInput) SetStatusCode(v int64) *WriteGetObjectResponseInput { + s.StatusCode = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *WriteGetObjectResponseInput) SetStorageClass(v string) *WriteGetObjectResponseInput { + s.StorageClass = &v + return s +} + +// SetTagCount sets the TagCount field's value. +func (s *WriteGetObjectResponseInput) SetTagCount(v int64) *WriteGetObjectResponseInput { + s.TagCount = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *WriteGetObjectResponseInput) SetVersionId(v string) *WriteGetObjectResponseInput { + s.VersionId = &v + return s +} + +func (s *WriteGetObjectResponseInput) hostLabels() map[string]string { + return map[string]string{ + "RequestRoute": aws.StringValue(s.RequestRoute), + } +} + +type WriteGetObjectResponseOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s WriteGetObjectResponseOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s WriteGetObjectResponseOutput) GoString() string { + return s.String() +} + const ( // AnalyticsS3ExportFileFormatCsv is a AnalyticsS3ExportFileFormat enum value AnalyticsS3ExportFileFormatCsv = "CSV" diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go b/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go index f1959b0..ce87ab3 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go @@ -48,6 +48,8 @@ func defaultInitRequestFn(r *request.Request) { // case opGetObject: // r.Handlers.Build.PushBack(askForTxEncodingAppendMD5) // r.Handlers.Unmarshal.PushBack(useMD5ValidationReader) + case opWriteGetObjectResponse: + r.Handlers.Build.PushFront(buildWriteGetObjectResponseEndpoint) } } diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/endpoint.go b/vendor/github.com/aws/aws-sdk-go/service/s3/endpoint.go index 6346b92..9fc2105 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/endpoint.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/endpoint.go @@ -1,6 +1,8 @@ package s3 import ( + "fmt" + "github.com/aws/aws-sdk-go/aws/awserr" "net/url" "strings" @@ -11,6 +13,13 @@ import ( "github.com/aws/aws-sdk-go/internal/s3shared/arn" ) +const ( + s3Namespace = "s3" + s3AccessPointNamespace = "s3-accesspoint" + s3ObjectsLambdaNamespace = "s3-object-lambda" + s3OutpostsNamespace = "s3-outposts" +) + // Used by shapes with members decorated as endpoint ARN. func parseEndpointARN(v string) (arn.Resource, error) { return arn.ParseResource(v, accessPointResourceParser) @@ -20,10 +29,14 @@ func accessPointResourceParser(a awsarn.ARN) (arn.Resource, error) { resParts := arn.SplitResource(a.Resource) switch resParts[0] { case "accesspoint": - if a.Service != "s3" { - return arn.AccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: "service is not s3"} + switch a.Service { + case s3Namespace: + return arn.ParseAccessPointResource(a, resParts[1:]) + case s3ObjectsLambdaNamespace: + return parseS3ObjectLambdaAccessPointResource(a, resParts) + default: + return arn.AccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: fmt.Sprintf("service is not %s or %s", s3Namespace, s3ObjectsLambdaNamespace)} } - return arn.ParseAccessPointResource(a, resParts[1:]) case "outpost": if a.Service != "s3-outposts" { return arn.OutpostAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: "service is not s3-outposts"} @@ -80,6 +93,25 @@ func parseOutpostAccessPointResource(a awsarn.ARN, resParts []string) (arn.Outpo return outpostAccessPointARN, nil } +func parseS3ObjectLambdaAccessPointResource(a awsarn.ARN, resParts []string) (arn.S3ObjectLambdaAccessPointARN, error) { + if a.Service != s3ObjectsLambdaNamespace { + return arn.S3ObjectLambdaAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: fmt.Sprintf("service is not %s", s3ObjectsLambdaNamespace)} + } + + accessPointARN, err := arn.ParseAccessPointResource(a, resParts[1:]) + if err != nil { + return arn.S3ObjectLambdaAccessPointARN{}, err + } + + if len(accessPointARN.Region) == 0 { + return arn.S3ObjectLambdaAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: fmt.Sprintf("%s region not set", s3ObjectsLambdaNamespace)} + } + + return arn.S3ObjectLambdaAccessPointARN{ + AccessPointARN: accessPointARN, + }, nil +} + func endpointHandler(req *request.Request) { endpoint, ok := req.Params.(endpointARNGetter) if !ok || !endpoint.hasEndpointARN() { @@ -116,6 +148,11 @@ func endpointHandler(req *request.Request) { if err != nil { req.Error = err } + case arn.S3ObjectLambdaAccessPointARN: + err = updateRequestS3ObjectLambdaAccessPointEndpoint(req, tv) + if err != nil { + req.Error = err + } case arn.OutpostAccessPointARN: // outposts does not support FIPS regions if resReq.ResourceConfiguredForFIPS() { @@ -162,6 +199,31 @@ func updateRequestAccessPointEndpoint(req *request.Request, accessPoint arn.Acce return nil } +func updateRequestS3ObjectLambdaAccessPointEndpoint(req *request.Request, accessPoint arn.S3ObjectLambdaAccessPointARN) error { + // DualStack not supported + if aws.BoolValue(req.Config.UseDualStack) { + return s3shared.NewClientConfiguredForDualStackError(accessPoint, + req.ClientInfo.PartitionID, aws.StringValue(req.Config.Region), nil) + } + + // Accelerate not supported + if aws.BoolValue(req.Config.S3UseAccelerate) { + return s3shared.NewClientConfiguredForAccelerateError(accessPoint, + req.ClientInfo.PartitionID, aws.StringValue(req.Config.Region), nil) + } + + // Ignore the disable host prefix for access points + req.Config.DisableEndpointHostPrefix = aws.Bool(false) + + if err := s3ObjectLambdaAccessPointEndpointBuilder(accessPoint).build(req); err != nil { + return err + } + + removeBucketFromPath(req.HTTPRequest.URL) + + return nil +} + func updateRequestOutpostAccessPointEndpoint(req *request.Request, accessPoint arn.OutpostAccessPointARN) error { // Accelerate not supported if aws.BoolValue(req.Config.S3UseAccelerate) { @@ -192,3 +254,37 @@ func removeBucketFromPath(u *url.URL) { u.Path = "/" } } + +func buildWriteGetObjectResponseEndpoint(req *request.Request) { + // DualStack not supported + if aws.BoolValue(req.Config.UseDualStack) { + req.Error = awserr.New("ConfigurationError", "client configured for dualstack but not supported for operation", nil) + return + } + + // Accelerate not supported + if aws.BoolValue(req.Config.S3UseAccelerate) { + req.Error = awserr.New("ConfigurationError", "client configured for accelerate but not supported for operation", nil) + return + } + + signingName := s3ObjectsLambdaNamespace + signingRegion := req.ClientInfo.SigningRegion + + if !hasCustomEndpoint(req) { + endpoint, err := resolveRegionalEndpoint(req, aws.StringValue(req.Config.Region), EndpointsID) + if err != nil { + req.Error = awserr.New(request.ErrCodeSerialization, "failed to resolve endpoint", err) + return + } + signingRegion = endpoint.SigningRegion + + if err = updateRequestEndpoint(req, endpoint.URL); err != nil { + req.Error = err + return + } + updateS3HostPrefixForS3ObjectLambda(req) + } + + redirectSigner(req, signingName, signingRegion) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/endpoint_builder.go b/vendor/github.com/aws/aws-sdk-go/service/s3/endpoint_builder.go index eb77d98..71e9c9e 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/endpoint_builder.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/endpoint_builder.go @@ -66,13 +66,9 @@ func (a accessPointEndpointBuilder) build(req *request.Request) error { if err = updateRequestEndpoint(req, endpoint.URL); err != nil { return err } - const serviceEndpointLabel = "s3-accesspoint" // dual stack provided by endpoint resolver - cfgHost := req.HTTPRequest.URL.Host - if strings.HasPrefix(cfgHost, "s3") { - req.HTTPRequest.URL.Host = serviceEndpointLabel + cfgHost[2:] - } + updateS3HostForS3AccessPoint(req) } protocol.HostPrefixBuilder{ @@ -98,6 +94,73 @@ func (a accessPointEndpointBuilder) hostPrefixLabelValues() map[string]string { } } +// s3ObjectLambdaAccessPointEndpointBuilder represents the endpoint builder for an s3 object lambda access point arn +type s3ObjectLambdaAccessPointEndpointBuilder arn.S3ObjectLambdaAccessPointARN + +// build builds the endpoint for corresponding access point arn +// +// For building an endpoint from access point arn, format used is: +// - Access point endpoint format : {accesspointName}-{accountId}.s3-object-lambda.{region}.{dnsSuffix} +// - example : myaccesspoint-012345678901.s3-object-lambda.us-west-2.amazonaws.com +// +// Access Point Endpoint requests are signed using "s3-object-lambda" as signing name. +// +func (a s3ObjectLambdaAccessPointEndpointBuilder) build(req *request.Request) error { + resolveRegion := arn.S3ObjectLambdaAccessPointARN(a).Region + cfgRegion := aws.StringValue(req.Config.Region) + + if s3shared.IsFIPS(cfgRegion) { + if aws.BoolValue(req.Config.S3UseARNRegion) && s3shared.IsCrossRegion(req, resolveRegion) { + // FIPS with cross region is not supported, the SDK must fail + // because there is no well defined method for SDK to construct a + // correct FIPS endpoint. + return s3shared.NewClientConfiguredForCrossRegionFIPSError(arn.S3ObjectLambdaAccessPointARN(a), + req.ClientInfo.PartitionID, cfgRegion, nil) + } + resolveRegion = cfgRegion + } + + endpoint, err := resolveRegionalEndpoint(req, resolveRegion, EndpointsID) + if err != nil { + return s3shared.NewFailedToResolveEndpointError(arn.S3ObjectLambdaAccessPointARN(a), + req.ClientInfo.PartitionID, cfgRegion, err) + } + + endpoint.URL = endpoints.AddScheme(endpoint.URL, aws.BoolValue(req.Config.DisableSSL)) + + endpoint.SigningName = s3ObjectsLambdaNamespace + + if !hasCustomEndpoint(req) { + if err = updateRequestEndpoint(req, endpoint.URL); err != nil { + return err + } + + updateS3HostPrefixForS3ObjectLambda(req) + } + + protocol.HostPrefixBuilder{ + Prefix: accessPointPrefixTemplate, + LabelsFn: a.hostPrefixLabelValues, + }.Build(req) + + // signer redirection + redirectSigner(req, endpoint.SigningName, endpoint.SigningRegion) + + err = protocol.ValidateEndpointHost(req.Operation.Name, req.HTTPRequest.URL.Host) + if err != nil { + return s3shared.NewInvalidARNError(arn.S3ObjectLambdaAccessPointARN(a), err) + } + + return nil +} + +func (a s3ObjectLambdaAccessPointEndpointBuilder) hostPrefixLabelValues() map[string]string { + return map[string]string{ + accessPointPrefixLabel: arn.S3ObjectLambdaAccessPointARN(a).AccessPointName, + accountIDPrefixLabel: arn.S3ObjectLambdaAccessPointARN(a).AccountID, + } +} + // outpostAccessPointEndpointBuilder represents the Endpoint builder for outpost access point arn. type outpostAccessPointEndpointBuilder arn.OutpostAccessPointARN @@ -114,7 +177,7 @@ func (o outpostAccessPointEndpointBuilder) build(req *request.Request) error { resolveService := o.Service endpointsID := resolveService - if resolveService == "s3-outposts" { + if resolveService == s3OutpostsNamespace { endpointsID = "s3" } @@ -130,11 +193,7 @@ func (o outpostAccessPointEndpointBuilder) build(req *request.Request) error { if err = updateRequestEndpoint(req, endpoint.URL); err != nil { return err } - // add url host as s3-outposts - cfgHost := req.HTTPRequest.URL.Host - if strings.HasPrefix(cfgHost, endpointsID) { - req.HTTPRequest.URL.Host = resolveService + cfgHost[len(endpointsID):] - } + updateHostPrefix(req, endpointsID, resolveService) } protocol.HostPrefixBuilder{ @@ -170,7 +229,6 @@ func resolveRegionalEndpoint(r *request.Request, region string, endpointsID stri } func updateRequestEndpoint(r *request.Request, endpoint string) (err error) { - r.HTTPRequest.URL, err = url.Parse(endpoint + r.Operation.HTTPPath) if err != nil { return awserr.New(request.ErrCodeSerialization, @@ -185,3 +243,19 @@ func redirectSigner(req *request.Request, signingName string, signingRegion stri req.ClientInfo.SigningName = signingName req.ClientInfo.SigningRegion = signingRegion } + +func updateS3HostForS3AccessPoint(req *request.Request) { + updateHostPrefix(req, "s3", s3AccessPointNamespace) +} + +func updateS3HostPrefixForS3ObjectLambda(req *request.Request) { + updateHostPrefix(req, "s3", s3ObjectsLambdaNamespace) +} + +func updateHostPrefix(req *request.Request, oldEndpointPrefix, newEndpointPrefix string) { + host := req.HTTPRequest.URL.Host + if strings.HasPrefix(host, oldEndpointPrefix) { + // replace service hostlabel oldEndpointPrefix to newEndpointPrefix + req.HTTPRequest.URL.Host = newEndpointPrefix + host[len(oldEndpointPrefix):] + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/errors.go b/vendor/github.com/aws/aws-sdk-go/service/s3/errors.go index f64b551..6d3e726 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/errors.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/errors.go @@ -48,13 +48,13 @@ const ( // ErrCodeObjectAlreadyInActiveTierError for service response error code // "ObjectAlreadyInActiveTierError". // - // This operation is not allowed against this storage tier. + // This action is not allowed against this storage tier. ErrCodeObjectAlreadyInActiveTierError = "ObjectAlreadyInActiveTierError" // ErrCodeObjectNotInActiveTierError for service response error code // "ObjectNotInActiveTierError". // - // The source object of the COPY operation is not in the active tier and is - // only stored in Amazon S3 Glacier. + // The source object of the COPY action is not in the active tier and is only + // stored in Amazon S3 Glacier. ErrCodeObjectNotInActiveTierError = "ObjectNotInActiveTierError" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3iface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3iface/interface.go index 7c62218..1e32fb9 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/s3iface/interface.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/s3iface/interface.go @@ -455,6 +455,10 @@ type S3API interface { UploadPartCopyWithContext(aws.Context, *s3.UploadPartCopyInput, ...request.Option) (*s3.UploadPartCopyOutput, error) UploadPartCopyRequest(*s3.UploadPartCopyInput) (*request.Request, *s3.UploadPartCopyOutput) + WriteGetObjectResponse(*s3.WriteGetObjectResponseInput) (*s3.WriteGetObjectResponseOutput, error) + WriteGetObjectResponseWithContext(aws.Context, *s3.WriteGetObjectResponseInput, ...request.Option) (*s3.WriteGetObjectResponseOutput, error) + WriteGetObjectResponseRequest(*s3.WriteGetObjectResponseInput) (*request.Request, *s3.WriteGetObjectResponseOutput) + WaitUntilBucketExists(*s3.HeadBucketInput) error WaitUntilBucketExistsWithContext(aws.Context, *s3.HeadBucketInput, ...request.WaiterOption) error diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/arn.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/arn.go new file mode 100644 index 0000000..f0a7f9b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/arn.go @@ -0,0 +1,23 @@ +package s3manager + +import ( + "fmt" + "github.com/aws/aws-sdk-go/aws/arn" +) + +func validateSupportedARNType(bucket string) error { + if !arn.IsARN(bucket) { + return nil + } + + parsedARN, err := arn.Parse(bucket) + if err != nil { + return err + } + + if parsedARN.Service == "s3-object-lambda" { + return fmt.Errorf("manager does not support s3-object-lambda service ARNs") + } + + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/download.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/download.go index 4b54b7c..bbf3595 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/download.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/download.go @@ -192,6 +192,10 @@ func (d Downloader) Download(w io.WriterAt, input *s3.GetObjectInput, options .. // to perform a single GetObjectInput request for that object's range. This will // caused the part size, and concurrency configurations to be ignored. func (d Downloader) DownloadWithContext(ctx aws.Context, w io.WriterAt, input *s3.GetObjectInput, options ...func(*Downloader)) (n int64, err error) { + if err := validateSupportedARNType(aws.StringValue(input.Bucket)); err != nil { + return 0, err + } + impl := downloader{w: w, in: input, cfg: d, ctx: ctx} for _, option := range options { diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload.go index 7dba834..9fa98fa 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload.go @@ -391,6 +391,10 @@ func (u *uploader) upload() (*UploadOutput, error) { // init will initialize all default options. func (u *uploader) init() error { + if err := validateSupportedARNType(aws.StringValue(u.in.Bucket)); err != nil { + return err + } + if u.cfg.Concurrency == 0 { u.cfg.Concurrency = DefaultUploadConcurrency } diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload_input.go b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload_input.go index 6cac26f..fb88537 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload_input.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/s3manager/upload_input.go @@ -23,22 +23,22 @@ type UploadInput struct { // The readable body payload to send to S3. Body io.Reader - // The bucket name to which the PUT operation was initiated. + // The bucket name to which the PUT action was initiated. // - // When using this API with an access point, you must direct requests to the - // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation with an access point through the AWS SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) - // in the Amazon Simple Storage Service Developer Guide. + // When using this action with an access point, you must direct requests to + // the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the AWS SDKs, you provide + // the access point ARN in place of the bucket name. For more information about + // access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. // - // When using this API with Amazon S3 on Outposts, you must direct requests + // When using this action with Amazon S3 on Outposts, you must direct requests // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When - // using this operation using S3 on Outposts through the AWS SDKs, you provide + // using this action using S3 on Outposts through the AWS SDKs, you provide // the Outposts bucket ARN in place of the bucket name. For more information - // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) - // in the Amazon Simple Storage Service Developer Guide. + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -48,8 +48,8 @@ type UploadInput struct { // to true causes Amazon S3 to use an S3 Bucket Key for object encryption with // SSE-KMS. // - // Specifying this header with a PUT operation doesn’t affect bucket-level - // settings for S3 Bucket Key. + // Specifying this header with a PUT action doesn’t affect bucket-level settings + // for S3 Bucket Key. BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` // Can be used to specify caching behavior along the request/reply chain. For @@ -82,7 +82,7 @@ type UploadInput struct { // see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.17 (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.17). ContentType *string `location:"header" locationName:"Content-Type" type:"string"` - // The account id of the expected bucket owner. If the bucket is owned by a + // The account ID of the expected bucket owner. If the bucket is owned by a // different account, the request will fail with an HTTP 403 (Access Denied) // error. ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` @@ -111,7 +111,7 @@ type UploadInput struct { // This action is not supported by Amazon S3 on Outposts. GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` - // Object key for which the PUT operation was initiated. + // Object key for which the PUT action was initiated. // // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go index bfc4372..17c4637 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go @@ -65,34 +65,6 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o // and Comparing the AWS STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) // in the IAM User Guide. // -// You cannot use AWS account root user credentials to call AssumeRole. You -// must use credentials for an IAM user or an IAM role to call AssumeRole. -// -// For cross-account access, imagine that you own multiple accounts and need -// to access resources in each account. You could create long-term credentials -// in each account to access those resources. However, managing all those credentials -// and remembering which one can access which account can be time consuming. -// Instead, you can create one set of long-term credentials in one account. -// Then use temporary security credentials to access all the other accounts -// by assuming roles in those accounts. For more information about roles, see -// IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles.html) -// in the IAM User Guide. -// -// Session Duration -// -// By default, the temporary security credentials created by AssumeRole last -// for one hour. However, you can use the optional DurationSeconds parameter -// to specify the duration of your session. You can provide a value from 900 -// seconds (15 minutes) up to the maximum session duration setting for the role. -// This setting can have a value from 1 hour to 12 hours. To learn how to view -// the maximum value for your role, see View the Maximum Session Duration Setting -// for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) -// in the IAM User Guide. The maximum session duration limit applies when you -// use the AssumeRole* API operations or the assume-role* CLI commands. However -// the limit does not apply when you use those operations to create a console -// URL. For more information, see Using IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) -// in the IAM User Guide. -// // Permissions // // The temporary security credentials created by AssumeRole can be used to make @@ -102,7 +74,7 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o // (Optional) You can pass inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // to this operation. You can pass a single JSON policy document to use as an // inline session policy. You can also specify up to 10 managed policies to -// use as managed session policies. The plain text that you use for both inline +// use as managed session policies. The plaintext that you use for both inline // and managed session policies can't exceed 2,048 characters. Passing policies // to this operation returns new temporary credentials. The resulting session's // permissions are the intersection of the role's identity-based policy and @@ -308,6 +280,15 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re // URL. For more information, see Using IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) // in the IAM User Guide. // +// Role chaining (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts.html#iam-term-role-chaining) +// limits your AWS CLI or AWS API role session to a maximum of one hour. When +// you use the AssumeRole API operation to assume a role, you can specify the +// duration of your role session with the DurationSeconds parameter. You can +// specify a parameter value of up to 43200 seconds (12 hours), depending on +// the maximum session duration setting for your role. However, if you assume +// a role using role chaining and provide a DurationSeconds parameter value +// greater than one hour, the operation fails. +// // Permissions // // The temporary security credentials created by AssumeRoleWithSAML can be used @@ -317,7 +298,7 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re // (Optional) You can pass inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // to this operation. You can pass a single JSON policy document to use as an // inline session policy. You can also specify up to 10 managed policies to -// use as managed session policies. The plain text that you use for both inline +// use as managed session policies. The plaintext that you use for both inline // and managed session policies can't exceed 2,048 characters. Passing policies // to this operation returns new temporary credentials. The resulting session's // permissions are the intersection of the role's identity-based policy and @@ -346,16 +327,16 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re // in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) // in the IAM User Guide. // -// You can pass up to 50 session tags. The plain text session tag keys can’t +// You can pass up to 50 session tags. The plaintext session tag keys can’t // exceed 128 characters and the values can’t exceed 256 characters. For these // and additional limits, see IAM and STS Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) // in the IAM User Guide. // // An AWS conversion compresses the passed session policies and session tags // into a packed binary format that has a separate limit. Your request can fail -// for this limit even if your plain text meets the other requirements. The -// PackedPolicySize response element indicates by percentage how close the policies -// and tags for your request are to the upper size limit. +// for this limit even if your plaintext meets the other requirements. The PackedPolicySize +// response element indicates by percentage how close the policies and tags +// for your request are to the upper size limit. // // You can pass a session tag with the same key as a tag that is attached to // the role. When you do, session tags override the role's tags with the same @@ -564,7 +545,7 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI // (Optional) You can pass inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // to this operation. You can pass a single JSON policy document to use as an // inline session policy. You can also specify up to 10 managed policies to -// use as managed session policies. The plain text that you use for both inline +// use as managed session policies. The plaintext that you use for both inline // and managed session policies can't exceed 2,048 characters. Passing policies // to this operation returns new temporary credentials. The resulting session's // permissions are the intersection of the role's identity-based policy and @@ -583,16 +564,16 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI // in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) // in the IAM User Guide. // -// You can pass up to 50 session tags. The plain text session tag keys can’t +// You can pass up to 50 session tags. The plaintext session tag keys can’t // exceed 128 characters and the values can’t exceed 256 characters. For these // and additional limits, see IAM and STS Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) // in the IAM User Guide. // // An AWS conversion compresses the passed session policies and session tags // into a packed binary format that has a separate limit. Your request can fail -// for this limit even if your plain text meets the other requirements. The -// PackedPolicySize response element indicates by percentage how close the policies -// and tags for your request are to the upper size limit. +// for this limit even if your plaintext meets the other requirements. The PackedPolicySize +// response element indicates by percentage how close the policies and tags +// for your request are to the upper size limit. // // You can pass a session tag with the same key as a tag that is attached to // the role. When you do, the session tag overrides the role tag with the same @@ -619,7 +600,7 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI // // Calling AssumeRoleWithWebIdentity can result in an entry in your AWS CloudTrail // logs. The entry includes the Subject (http://openid.net/specs/openid-connect-core-1_0.html#Claims) -// of the provided Web Identity Token. We recommend that you avoid using any +// of the provided web identity token. We recommend that you avoid using any // personally identifiable information (PII) in this field. For example, you // could instead use a GUID or a pairwise identifier, as suggested in the OIDC // specification (http://openid.net/specs/openid-connect-core-1_0.html#SubjectIDTypes). @@ -1108,6 +1089,70 @@ func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *re // You must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // to this operation. You can pass a single JSON policy document to use as an // inline session policy. You can also specify up to 10 managed policies to +// use as managed session policies. The plaintext that you use for both inline +// and managed session policies can't exceed 2,048 characters. +// +// Though the session policy parameters are optional, if you do not pass a policy, +// then the resulting federated user session has no permissions. When you pass +// session policies, the session permissions are the intersection of the IAM +// user policies and the session policies that you pass. This gives you a way +// to further restrict the permissions for a federated user. You cannot use +// session policies to grant more permissions than those that are defined in +// the permissions policy of the IAM user. For more information, see Session +// Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// in the IAM User Guide. For information about using GetFederationToken to +// create temporary security credentials, see GetFederationToken—Federation +// Through a Custom Identity Broker (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getfederationtoken). +// +// You can use the credentials to access a resource that has a resource-based +// policy. If that policy specifically references the federated user session +// in the Principal element of the policy, the session has the permissions allowed +// by the policy. These permissions are granted in addition to the permissions +// granted by the session policies. +// +// Tags +// +// (Optional) You can pass tag key-value pairs to your session. These are called +// session tags. For more information about session tags, see Passing Session +// Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. +// +// You can create a mobile-based or browser-based app that can authenticate +// users using a web identity provider like Login with Amazon, Facebook, Google, +// or an OpenID Connect-compatible identity provider. In this case, we recommend +// that you use Amazon Cognito (http://aws.amazon.com/cognito/) or AssumeRoleWithWebIdentity. +// For more information, see Federation Through a Web-based Identity Provider +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity) +// in the IAM User Guide. +// +// You can also call GetFederationToken using the security credentials of an +// AWS account root user, but we do not recommend it. Instead, we recommend +// that you create an IAM user for the purpose of the proxy application. Then +// attach a policy to the IAM user that limits federated users to only the actions +// and resources that they need to access. For more information, see IAM Best +// Practices (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html) +// in the IAM User Guide. +// +// Session duration +// +// The temporary credentials are valid for the specified duration, from 900 +// seconds (15 minutes) up to a maximum of 129,600 seconds (36 hours). The default +// session duration is 43,200 seconds (12 hours). Temporary credentials that +// are obtained by using AWS account root user credentials have a maximum duration +// of 3,600 seconds (1 hour). +// +// Permissions +// +// You can use the temporary credentials created by GetFederationToken in any +// AWS service except the following: +// +// * You cannot call any IAM operations using the AWS CLI or the AWS API. +// +// * You cannot call any STS operations except GetCallerIdentity. +// +// You must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// to this operation. You can pass a single JSON policy document to use as an +// inline session policy. You can also specify up to 10 managed policies to // use as managed session policies. The plain text that you use for both inline // and managed session policies can't exceed 2,048 characters. // @@ -1338,14 +1383,15 @@ func (c *STS) GetSessionTokenWithContext(ctx aws.Context, input *GetSessionToken type AssumeRoleInput struct { _ struct{} `type:"structure"` - // The duration, in seconds, of the role session. The value can range from 900 - // seconds (15 minutes) up to the maximum session duration setting for the role. - // This setting can have a value from 1 hour to 12 hours. If you specify a value - // higher than this setting, the operation fails. For example, if you specify - // a session duration of 12 hours, but your administrator set the maximum session - // duration to 6 hours, your operation fails. To learn how to view the maximum - // value for your role, see View the Maximum Session Duration Setting for a - // Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) + // The duration, in seconds, of the role session. The value specified can can + // range from 900 seconds (15 minutes) up to the maximum session duration that + // is set for the role. The maximum session duration setting can have a value + // from 1 hour to 12 hours. If you specify a value higher than this setting + // or the administrator setting (whichever is lower), the operation fails. For + // example, if you specify a session duration of 12 hours, but your administrator + // set the maximum session duration to 6 hours, your operation fails. To learn + // how to view the maximum value for your role, see View the Maximum Session + // Duration Setting for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) // in the IAM User Guide. // // By default, the value is set to 3600 seconds. @@ -1387,17 +1433,17 @@ type AssumeRoleInput struct { // that is being assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // in the IAM User Guide. // - // The plain text that you use for both inline and managed session policies - // can't exceed 2,048 characters. The JSON policy characters can be any ASCII - // character from the space character to the end of the valid character list - // (\u0020 through \u00FF). It can also include the tab (\u0009), linefeed (\u000A), - // and carriage return (\u000D) characters. + // The plaintext that you use for both inline and managed session policies can't + // exceed 2,048 characters. The JSON policy characters can be any ASCII character + // from the space character to the end of the valid character list (\u0020 through + // \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage + // return (\u000D) characters. // // An AWS conversion compresses the passed session policies and session tags // into a packed binary format that has a separate limit. Your request can fail - // for this limit even if your plain text meets the other requirements. The - // PackedPolicySize response element indicates by percentage how close the policies - // and tags for your request are to the upper size limit. + // for this limit even if your plaintext meets the other requirements. The PackedPolicySize + // response element indicates by percentage how close the policies and tags + // for your request are to the upper size limit. Policy *string `min:"1" type:"string"` // The Amazon Resource Names (ARNs) of the IAM managed policies that you want @@ -1405,16 +1451,16 @@ type AssumeRoleInput struct { // as the role. // // This parameter is optional. You can provide up to 10 managed policy ARNs. - // However, the plain text that you use for both inline and managed session - // policies can't exceed 2,048 characters. For more information about ARNs, - // see Amazon Resource Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // However, the plaintext that you use for both inline and managed session policies + // can't exceed 2,048 characters. For more information about ARNs, see Amazon + // Resource Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) // in the AWS General Reference. // // An AWS conversion compresses the passed session policies and session tags // into a packed binary format that has a separate limit. Your request can fail - // for this limit even if your plain text meets the other requirements. The - // PackedPolicySize response element indicates by percentage how close the policies - // and tags for your request are to the upper size limit. + // for this limit even if your plaintext meets the other requirements. The PackedPolicySize + // response element indicates by percentage how close the policies and tags + // for your request are to the upper size limit. // // Passing policies to this operation returns new temporary credentials. The // resulting session's permissions are the intersection of the role's identity-based @@ -1459,22 +1505,41 @@ type AssumeRoleInput struct { // also include underscores or any of the following characters: =,.@- SerialNumber *string `min:"9" type:"string"` + // The source identity specified by the principal that is calling the AssumeRole + // operation. + // + // You can require users to specify a source identity when they assume a role. + // You do this by using the sts:SourceIdentity condition key in a role trust + // policy. You can use source identity information in AWS CloudTrail logs to + // determine who took actions with a role. You can use the aws:SourceIdentity + // condition key to further control access to AWS resources based on the value + // of source identity. For more information about using source identity, see + // Monitor and control actions taken with assumed roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html) + // in the IAM User Guide. + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can + // also include underscores or any of the following characters: =,.@-. You cannot + // use a value that begins with the text aws:. This prefix is reserved for AWS + // internal use. + SourceIdentity *string `min:"2" type:"string"` + // A list of session tags that you want to pass. Each session tag consists of // a key name and an associated value. For more information about session tags, // see Tagging AWS STS Sessions (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) // in the IAM User Guide. // - // This parameter is optional. You can pass up to 50 session tags. The plain - // text session tag keys can’t exceed 128 characters, and the values can’t - // exceed 256 characters. For these and additional limits, see IAM and STS Character + // This parameter is optional. You can pass up to 50 session tags. The plaintext + // session tag keys can’t exceed 128 characters, and the values can’t exceed + // 256 characters. For these and additional limits, see IAM and STS Character // Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) // in the IAM User Guide. // // An AWS conversion compresses the passed session policies and session tags // into a packed binary format that has a separate limit. Your request can fail - // for this limit even if your plain text meets the other requirements. The - // PackedPolicySize response element indicates by percentage how close the policies - // and tags for your request are to the upper size limit. + // for this limit even if your plaintext meets the other requirements. The PackedPolicySize + // response element indicates by percentage how close the policies and tags + // for your request are to the upper size limit. // // You can pass a session tag with the same key as a tag that is already attached // to the role. When you do, session tags override a role tag with the same @@ -1495,9 +1560,10 @@ type AssumeRoleInput struct { Tags []*Tag `type:"list"` // The value provided by the MFA device, if the trust policy of the role being - // assumed requires MFA (that is, if the policy includes a condition that tests - // for MFA). If the role being assumed requires MFA and if the TokenCode value - // is missing or expired, the AssumeRole call returns an "access denied" error. + // assumed requires MFA. (In other words, if the policy includes a condition + // that tests for MFA). If the role being assumed requires MFA and if the TokenCode + // value is missing or expired, the AssumeRole call returns an "access denied" + // error. // // The format for this parameter, as described by its regex pattern, is a sequence // of six numeric digits. @@ -1554,6 +1620,9 @@ func (s *AssumeRoleInput) Validate() error { if s.SerialNumber != nil && len(*s.SerialNumber) < 9 { invalidParams.Add(request.NewErrParamMinLen("SerialNumber", 9)) } + if s.SourceIdentity != nil && len(*s.SourceIdentity) < 2 { + invalidParams.Add(request.NewErrParamMinLen("SourceIdentity", 2)) + } if s.TokenCode != nil && len(*s.TokenCode) < 6 { invalidParams.Add(request.NewErrParamMinLen("TokenCode", 6)) } @@ -1626,6 +1695,12 @@ func (s *AssumeRoleInput) SetSerialNumber(v string) *AssumeRoleInput { return s } +// SetSourceIdentity sets the SourceIdentity field's value. +func (s *AssumeRoleInput) SetSourceIdentity(v string) *AssumeRoleInput { + s.SourceIdentity = &v + return s +} + // SetTags sets the Tags field's value. func (s *AssumeRoleInput) SetTags(v []*Tag) *AssumeRoleInput { s.Tags = v @@ -1668,6 +1743,23 @@ type AssumeRoleOutput struct { // packed size is greater than 100 percent, which means the policies and tags // exceeded the allowed space. PackedPolicySize *int64 `type:"integer"` + + // The source identity specified by the principal that is calling the AssumeRole + // operation. + // + // You can require users to specify a source identity when they assume a role. + // You do this by using the sts:SourceIdentity condition key in a role trust + // policy. You can use source identity information in AWS CloudTrail logs to + // determine who took actions with a role. You can use the aws:SourceIdentity + // condition key to further control access to AWS resources based on the value + // of source identity. For more information about using source identity, see + // Monitor and control actions taken with assumed roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html) + // in the IAM User Guide. + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can + // also include underscores or any of the following characters: =,.@- + SourceIdentity *string `min:"2" type:"string"` } // String returns the string representation @@ -1698,6 +1790,12 @@ func (s *AssumeRoleOutput) SetPackedPolicySize(v int64) *AssumeRoleOutput { return s } +// SetSourceIdentity sets the SourceIdentity field's value. +func (s *AssumeRoleOutput) SetSourceIdentity(v string) *AssumeRoleOutput { + s.SourceIdentity = &v + return s +} + type AssumeRoleWithSAMLInput struct { _ struct{} `type:"structure"` @@ -1736,17 +1834,17 @@ type AssumeRoleWithSAMLInput struct { // that is being assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // in the IAM User Guide. // - // The plain text that you use for both inline and managed session policies - // can't exceed 2,048 characters. The JSON policy characters can be any ASCII - // character from the space character to the end of the valid character list - // (\u0020 through \u00FF). It can also include the tab (\u0009), linefeed (\u000A), - // and carriage return (\u000D) characters. + // The plaintext that you use for both inline and managed session policies can't + // exceed 2,048 characters. The JSON policy characters can be any ASCII character + // from the space character to the end of the valid character list (\u0020 through + // \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage + // return (\u000D) characters. // // An AWS conversion compresses the passed session policies and session tags // into a packed binary format that has a separate limit. Your request can fail - // for this limit even if your plain text meets the other requirements. The - // PackedPolicySize response element indicates by percentage how close the policies - // and tags for your request are to the upper size limit. + // for this limit even if your plaintext meets the other requirements. The PackedPolicySize + // response element indicates by percentage how close the policies and tags + // for your request are to the upper size limit. Policy *string `min:"1" type:"string"` // The Amazon Resource Names (ARNs) of the IAM managed policies that you want @@ -1754,16 +1852,16 @@ type AssumeRoleWithSAMLInput struct { // as the role. // // This parameter is optional. You can provide up to 10 managed policy ARNs. - // However, the plain text that you use for both inline and managed session - // policies can't exceed 2,048 characters. For more information about ARNs, - // see Amazon Resource Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // However, the plaintext that you use for both inline and managed session policies + // can't exceed 2,048 characters. For more information about ARNs, see Amazon + // Resource Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) // in the AWS General Reference. // // An AWS conversion compresses the passed session policies and session tags // into a packed binary format that has a separate limit. Your request can fail - // for this limit even if your plain text meets the other requirements. The - // PackedPolicySize response element indicates by percentage how close the policies - // and tags for your request are to the upper size limit. + // for this limit even if your plaintext meets the other requirements. The PackedPolicySize + // response element indicates by percentage how close the policies and tags + // for your request are to the upper size limit. // // Passing policies to this operation returns new temporary credentials. The // resulting session's permissions are the intersection of the role's identity-based @@ -1786,7 +1884,7 @@ type AssumeRoleWithSAMLInput struct { // RoleArn is a required field RoleArn *string `min:"20" type:"string" required:"true"` - // The base-64 encoded SAML authentication response provided by the IdP. + // The base64 encoded SAML authentication response provided by the IdP. // // For more information, see Configuring a Relying Party and Adding Claims (https://docs.aws.amazon.com/IAM/latest/UserGuide/create-role-saml-IdP-tasks.html) // in the IAM User Guide. @@ -1908,10 +2006,17 @@ type AssumeRoleWithSAMLOutput struct { // The value of the Issuer element of the SAML assertion. Issuer *string `type:"string"` - // A hash value based on the concatenation of the Issuer response value, the - // AWS account ID, and the friendly name (the last part of the ARN) of the SAML - // provider in IAM. The combination of NameQualifier and Subject can be used - // to uniquely identify a federated user. + // A hash value based on the concatenation of the following: + // + // * The Issuer response value. + // + // * The AWS account ID. + // + // * The friendly name (the last part of the ARN) of the SAML provider in + // IAM. + // + // The combination of NameQualifier and Subject can be used to uniquely identify + // a federated user. // // The following pseudocode shows how the hash value is calculated: // @@ -1925,6 +2030,26 @@ type AssumeRoleWithSAMLOutput struct { // exceeded the allowed space. PackedPolicySize *int64 `type:"integer"` + // The value in the SourceIdentity attribute in the SAML assertion. + // + // You can require users to set a source identity value when they assume a role. + // You do this by using the sts:SourceIdentity condition key in a role trust + // policy. That way, actions that are taken with the role are associated with + // that user. After the source identity is set, the value cannot be changed. + // It is present in the request for all actions that are taken by the role and + // persists across chained role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts#iam-term-role-chaining) + // sessions. You can configure your SAML identity provider to use an attribute + // associated with your users, like user name or email, as the source identity + // when calling AssumeRoleWithSAML. You do this by adding an attribute to the + // SAML assertion. For more information about using source identity, see Monitor + // and control actions taken with assumed roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html) + // in the IAM User Guide. + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can + // also include underscores or any of the following characters: =,.@- + SourceIdentity *string `min:"2" type:"string"` + // The value of the NameID element in the Subject element of the SAML assertion. Subject *string `type:"string"` @@ -1985,6 +2110,12 @@ func (s *AssumeRoleWithSAMLOutput) SetPackedPolicySize(v int64) *AssumeRoleWithS return s } +// SetSourceIdentity sets the SourceIdentity field's value. +func (s *AssumeRoleWithSAMLOutput) SetSourceIdentity(v string) *AssumeRoleWithSAMLOutput { + s.SourceIdentity = &v + return s +} + // SetSubject sets the Subject field's value. func (s *AssumeRoleWithSAMLOutput) SetSubject(v string) *AssumeRoleWithSAMLOutput { s.Subject = &v @@ -2032,17 +2163,17 @@ type AssumeRoleWithWebIdentityInput struct { // that is being assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // in the IAM User Guide. // - // The plain text that you use for both inline and managed session policies - // can't exceed 2,048 characters. The JSON policy characters can be any ASCII - // character from the space character to the end of the valid character list - // (\u0020 through \u00FF). It can also include the tab (\u0009), linefeed (\u000A), - // and carriage return (\u000D) characters. + // The plaintext that you use for both inline and managed session policies can't + // exceed 2,048 characters. The JSON policy characters can be any ASCII character + // from the space character to the end of the valid character list (\u0020 through + // \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage + // return (\u000D) characters. // // An AWS conversion compresses the passed session policies and session tags // into a packed binary format that has a separate limit. Your request can fail - // for this limit even if your plain text meets the other requirements. The - // PackedPolicySize response element indicates by percentage how close the policies - // and tags for your request are to the upper size limit. + // for this limit even if your plaintext meets the other requirements. The PackedPolicySize + // response element indicates by percentage how close the policies and tags + // for your request are to the upper size limit. Policy *string `min:"1" type:"string"` // The Amazon Resource Names (ARNs) of the IAM managed policies that you want @@ -2050,16 +2181,16 @@ type AssumeRoleWithWebIdentityInput struct { // as the role. // // This parameter is optional. You can provide up to 10 managed policy ARNs. - // However, the plain text that you use for both inline and managed session - // policies can't exceed 2,048 characters. For more information about ARNs, - // see Amazon Resource Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // However, the plaintext that you use for both inline and managed session policies + // can't exceed 2,048 characters. For more information about ARNs, see Amazon + // Resource Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) // in the AWS General Reference. // // An AWS conversion compresses the passed session policies and session tags // into a packed binary format that has a separate limit. Your request can fail - // for this limit even if your plain text meets the other requirements. The - // PackedPolicySize response element indicates by percentage how close the policies - // and tags for your request are to the upper size limit. + // for this limit even if your plaintext meets the other requirements. The PackedPolicySize + // response element indicates by percentage how close the policies and tags + // for your request are to the upper size limit. // // Passing policies to this operation returns new temporary credentials. The // resulting session's permissions are the intersection of the role's identity-based @@ -2242,6 +2373,29 @@ type AssumeRoleWithWebIdentityOutput struct { // in the AssumeRoleWithWebIdentity request. Provider *string `type:"string"` + // The value of the source identity that is returned in the JSON web token (JWT) + // from the identity provider. + // + // You can require users to set a source identity value when they assume a role. + // You do this by using the sts:SourceIdentity condition key in a role trust + // policy. That way, actions that are taken with the role are associated with + // that user. After the source identity is set, the value cannot be changed. + // It is present in the request for all actions that are taken by the role and + // persists across chained role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts#iam-term-role-chaining) + // sessions. You can configure your identity provider to use an attribute associated + // with your users, like user name or email, as the source identity when calling + // AssumeRoleWithWebIdentity. You do this by adding a claim to the JSON web + // token. To learn more about OIDC tokens and claims, see Using Tokens with + // User Pools (https://docs.aws.amazon.com/cognito/latest/developerguide/amazon-cognito-user-pools-using-tokens-with-identity-providers.html) + // in the Amazon Cognito Developer Guide. For more information about using source + // identity, see Monitor and control actions taken with assumed roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html) + // in the IAM User Guide. + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can + // also include underscores or any of the following characters: =,.@- + SourceIdentity *string `min:"2" type:"string"` + // The unique user identifier that is returned by the identity provider. This // identifier is associated with the WebIdentityToken that was submitted with // the AssumeRoleWithWebIdentity call. The identifier is typically unique to @@ -2291,6 +2445,12 @@ func (s *AssumeRoleWithWebIdentityOutput) SetProvider(v string) *AssumeRoleWithW return s } +// SetSourceIdentity sets the SourceIdentity field's value. +func (s *AssumeRoleWithWebIdentityOutput) SetSourceIdentity(v string) *AssumeRoleWithWebIdentityOutput { + s.SourceIdentity = &v + return s +} + // SetSubjectFromWebIdentityToken sets the SubjectFromWebIdentityToken field's value. func (s *AssumeRoleWithWebIdentityOutput) SetSubjectFromWebIdentityToken(v string) *AssumeRoleWithWebIdentityOutput { s.SubjectFromWebIdentityToken = &v @@ -2682,17 +2842,17 @@ type GetFederationTokenInput struct { // by the policy. These permissions are granted in addition to the permissions // that are granted by the session policies. // - // The plain text that you use for both inline and managed session policies - // can't exceed 2,048 characters. The JSON policy characters can be any ASCII - // character from the space character to the end of the valid character list - // (\u0020 through \u00FF). It can also include the tab (\u0009), linefeed (\u000A), - // and carriage return (\u000D) characters. + // The plaintext that you use for both inline and managed session policies can't + // exceed 2,048 characters. The JSON policy characters can be any ASCII character + // from the space character to the end of the valid character list (\u0020 through + // \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage + // return (\u000D) characters. // // An AWS conversion compresses the passed session policies and session tags // into a packed binary format that has a separate limit. Your request can fail - // for this limit even if your plain text meets the other requirements. The - // PackedPolicySize response element indicates by percentage how close the policies - // and tags for your request are to the upper size limit. + // for this limit even if your plaintext meets the other requirements. The PackedPolicySize + // response element indicates by percentage how close the policies and tags + // for your request are to the upper size limit. Policy *string `min:"1" type:"string"` // The Amazon Resource Names (ARNs) of the IAM managed policies that you want @@ -2702,7 +2862,7 @@ type GetFederationTokenInput struct { // You must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // to this operation. You can pass a single JSON policy document to use as an // inline session policy. You can also specify up to 10 managed policies to - // use as managed session policies. The plain text that you use for both inline + // use as managed session policies. The plaintext that you use for both inline // and managed session policies can't exceed 2,048 characters. You can provide // up to 10 managed policy ARNs. For more information about ARNs, see Amazon // Resource Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) @@ -2727,9 +2887,9 @@ type GetFederationTokenInput struct { // // An AWS conversion compresses the passed session policies and session tags // into a packed binary format that has a separate limit. Your request can fail - // for this limit even if your plain text meets the other requirements. The - // PackedPolicySize response element indicates by percentage how close the policies - // and tags for your request are to the upper size limit. + // for this limit even if your plaintext meets the other requirements. The PackedPolicySize + // response element indicates by percentage how close the policies and tags + // for your request are to the upper size limit. PolicyArns []*PolicyDescriptorType `type:"list"` // A list of session tags. Each session tag consists of a key name and an associated @@ -2737,17 +2897,17 @@ type GetFederationTokenInput struct { // in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) // in the IAM User Guide. // - // This parameter is optional. You can pass up to 50 session tags. The plain - // text session tag keys can’t exceed 128 characters and the values can’t - // exceed 256 characters. For these and additional limits, see IAM and STS Character + // This parameter is optional. You can pass up to 50 session tags. The plaintext + // session tag keys can’t exceed 128 characters and the values can’t exceed + // 256 characters. For these and additional limits, see IAM and STS Character // Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) // in the IAM User Guide. // // An AWS conversion compresses the passed session policies and session tags // into a packed binary format that has a separate limit. Your request can fail - // for this limit even if your plain text meets the other requirements. The - // PackedPolicySize response element indicates by percentage how close the policies - // and tags for your request are to the upper size limit. + // for this limit even if your plaintext meets the other requirements. The PackedPolicySize + // response element indicates by percentage how close the policies and tags + // for your request are to the upper size limit. // // You can pass a session tag with the same key as a tag that is already attached // to the user you are federating. When you do, session tags override a user diff --git a/vendor/github.com/evanw/esbuild/internal/ast/ast.go b/vendor/github.com/evanw/esbuild/internal/ast/ast.go index 9be056e..0de1a91 100644 --- a/vendor/github.com/evanw/esbuild/internal/ast/ast.go +++ b/vendor/github.com/evanw/esbuild/internal/ast/ast.go @@ -9,8 +9,11 @@ import "github.com/evanw/esbuild/internal/logger" type ImportKind uint8 const ( + // An entry point provided by the user + ImportEntryPoint ImportKind = iota + // An ES6 import or re-export statement - ImportStmt ImportKind = iota + ImportStmt // A call to "require()" ImportRequire @@ -24,11 +27,11 @@ const ( // A CSS "@import" rule ImportAt + // A CSS "@import" rule with import conditions + ImportAtConditional + // A CSS "url(...)" token ImportURL - - // An entry point provided by the user - ImportEntryPoint ) func (kind ImportKind) StringForMetafile() string { @@ -41,7 +44,7 @@ func (kind ImportKind) StringForMetafile() string { return "dynamic-import" case ImportRequireResolve: return "require-resolve" - case ImportAt: + case ImportAt, ImportAtConditional: return "import-rule" case ImportURL: return "url-token" @@ -62,7 +65,7 @@ type ImportRecord struct { // The resolved source index for an internal import (within the bundle) or // nil for an external import (not included in the bundle) - SourceIndex *uint32 + SourceIndex Index32 // Sometimes the parser creates an import record and decides it isn't needed. // For example, TypeScript code may have import statements that later turn @@ -74,19 +77,50 @@ type ImportRecord struct { // CommonJS wrapper or not. ContainsImportStar bool + // If this is true, the import contains an import for the alias "default", + // either via the "import x from" or "import {default as x} from" syntax. + ContainsDefaultAlias bool + // If true, this "export * from 'path'" statement is evaluated at run-time by - // calling the "__exportStar()" helper function - CallsRunTimeExportStarFn bool + // calling the "__reExport()" helper function + CallsRunTimeReExportFn bool // Tell the printer to wrap this call to "require()" in "__toModule(...)" WrapWithToModule bool - // True for require calls like this: "try { require() } catch {}". In this - // case we shouldn't generate an error if the path could not be resolved. - IsInsideTryBody bool + // True for the following cases: + // + // try { require('x') } catch { handle } + // try { await import('x') } catch { handle } + // try { require.resolve('x') } catch { handle } + // import('x').catch(handle) + // import('x').then(_, handle) + // + // In these cases we shouldn't generate an error if the path could not be + // resolved. + HandlesImportErrors bool // If true, this was originally written as a bare "import 'file'" statement WasOriginallyBareImport bool Kind ImportKind } + +// This stores a 32-bit index where the zero value is an invalid index. This is +// a better alternative to storing the index as a pointer since that has the +// same properties but takes up more space and costs an extra pointer traversal. +type Index32 struct { + flippedBits uint32 +} + +func MakeIndex32(index uint32) Index32 { + return Index32{flippedBits: ^index} +} + +func (i Index32) IsValid() bool { + return i.flippedBits != 0 +} + +func (i Index32) GetIndex() uint32 { + return ^i.flippedBits +} diff --git a/vendor/github.com/evanw/esbuild/internal/bundler/bundler.go b/vendor/github.com/evanw/esbuild/internal/bundler/bundler.go index 27a09a9..01b6220 100644 --- a/vendor/github.com/evanw/esbuild/internal/bundler/bundler.go +++ b/vendor/github.com/evanw/esbuild/internal/bundler/bundler.go @@ -2,16 +2,15 @@ package bundler import ( "bytes" - "crypto/sha1" "encoding/base32" "encoding/base64" "fmt" - "mime" "net/http" "sort" "strings" "sync" "syscall" + "time" "unicode" "unicode/utf8" @@ -19,9 +18,10 @@ import ( "github.com/evanw/esbuild/internal/cache" "github.com/evanw/esbuild/internal/compat" "github.com/evanw/esbuild/internal/config" - "github.com/evanw/esbuild/internal/css_ast" "github.com/evanw/esbuild/internal/css_parser" "github.com/evanw/esbuild/internal/fs" + "github.com/evanw/esbuild/internal/graph" + "github.com/evanw/esbuild/internal/helpers" "github.com/evanw/esbuild/internal/js_ast" "github.com/evanw/esbuild/internal/js_lexer" "github.com/evanw/esbuild/internal/js_parser" @@ -29,78 +29,17 @@ import ( "github.com/evanw/esbuild/internal/logger" "github.com/evanw/esbuild/internal/resolver" "github.com/evanw/esbuild/internal/runtime" - "github.com/evanw/esbuild/internal/sourcemap" + "github.com/evanw/esbuild/internal/xxhash" ) -type file struct { - source logger.Source - repr fileRepr - loader config.Loader - sourceMap *sourcemap.SourceMap +type scannerFile struct { + inputFile graph.InputFile pluginData interface{} - // The minimum number of links in the module graph to get from an entry point - // to this file - distanceFromEntryPoint uint32 - - // This holds all entry points that can reach this file. It will be used to - // assign the parts in this file to a chunk. - entryBits bitSet - // If "AbsMetadataFile" is present, this will be filled out with information // about this file in JSON format. This is a partial JSON file that will be // fully assembled later. - jsonMetadataChunk []byte - - // The path of this entry point relative to the lowest common ancestor - // directory containing all entry points. Note: this must have OS-independent - // path separators (i.e. '/' not '\'). - entryPointRelPath string - - // If this file ends up being used in the bundle, these are additional files - // that must be written to the output directory. It's used by the "file" - // loader. - additionalFiles []OutputFile - - isEntryPoint bool - - // If true, this file was listed as not having side effects by a package.json - // file in one of our containing directories with a "sideEffects" field. - ignoreIfUnused bool - - // This is optional additional information about "ignoreIfUnused" for errors - ignoreIfUnusedData *resolver.IgnoreIfUnusedData -} - -type fileRepr interface { - importRecords() *[]ast.ImportRecord -} - -type reprJS struct { - ast js_ast.AST - meta fileMeta - - // If present, this is the CSS file that this JavaScript stub corresponds to. - // A JavaScript stub is automatically generated for a CSS file when it's - // imported from a JavaScript file. - cssSourceIndex *uint32 -} - -func (repr *reprJS) importRecords() *[]ast.ImportRecord { - return &repr.ast.ImportRecords -} - -type reprCSS struct { - ast css_ast.AST - - // If present, this is the JavaScript stub corresponding to this CSS file. - // A JavaScript stub is automatically generated for a CSS file when it's - // imported from a JavaScript file. - jsSourceIndex *uint32 -} - -func (repr *reprCSS) importRecords() *[]ast.ImportRecord { - return &repr.ast.ImportRecords + jsonMetadataChunk string } // This is data related to source maps. It's computed in parallel with linking @@ -121,34 +60,39 @@ type dataForSourceMap struct { type Bundle struct { fs fs.FS res resolver.Resolver - files []file - entryPoints []uint32 + files []scannerFile + entryPoints []graph.EntryPoint } type parseArgs struct { - fs fs.FS - log logger.Log - res resolver.Resolver - caches *cache.CacheSet - keyPath logger.Path - prettyPath string - sourceIndex uint32 - importSource *logger.Source - ignoreIfUnused bool - ignoreIfUnusedData *resolver.IgnoreIfUnusedData - importPathRange logger.Range - pluginData interface{} - options config.Options - results chan parseResult - inject chan config.InjectedFile - skipResolve bool + fs fs.FS + log logger.Log + res resolver.Resolver + caches *cache.CacheSet + keyPath logger.Path + prettyPath string + sourceIndex uint32 + importSource *logger.Source + sideEffects graph.SideEffects + importPathRange logger.Range + pluginData interface{} + options config.Options + results chan parseResult + inject chan config.InjectedFile + skipResolve bool } type parseResult struct { - file file - ok bool - + file scannerFile resolveResults []*resolver.ResolveResult + tlaCheck tlaCheck + ok bool +} + +type tlaCheck struct { + parent ast.Index32 + depth uint32 + importRecordIndex uint32 } func parseFile(args parseArgs) { @@ -208,40 +152,39 @@ func parseFile(args parseArgs) { } result := parseResult{ - file: file{ - source: source, - loader: loader, + file: scannerFile{ + inputFile: graph.InputFile{ + Source: source, + Loader: loader, + SideEffects: args.sideEffects, + }, pluginData: pluginData, - - // Record information from "sideEffects" in "package.json" - ignoreIfUnused: args.ignoreIfUnused, - ignoreIfUnusedData: args.ignoreIfUnusedData, }, } switch loader { case config.LoaderJS: ast, ok := args.caches.JSCache.Parse(args.log, source, js_parser.OptionsFromConfig(&args.options)) - result.file.repr = &reprJS{ast: ast} + result.file.inputFile.Repr = &graph.JSRepr{AST: ast} result.ok = ok case config.LoaderJSX: args.options.JSX.Parse = true ast, ok := args.caches.JSCache.Parse(args.log, source, js_parser.OptionsFromConfig(&args.options)) - result.file.repr = &reprJS{ast: ast} + result.file.inputFile.Repr = &graph.JSRepr{AST: ast} result.ok = ok case config.LoaderTS: args.options.TS.Parse = true ast, ok := args.caches.JSCache.Parse(args.log, source, js_parser.OptionsFromConfig(&args.options)) - result.file.repr = &reprJS{ast: ast} + result.file.inputFile.Repr = &graph.JSRepr{AST: ast} result.ok = ok case config.LoaderTSX: args.options.TS.Parse = true args.options.JSX.Parse = true ast, ok := args.caches.JSCache.Parse(args.log, source, js_parser.OptionsFromConfig(&args.options)) - result.file.repr = &reprJS{ast: ast} + result.file.inputFile.Repr = &graph.JSRepr{AST: ast} result.ok = ok case config.LoaderCSS: @@ -250,14 +193,18 @@ func parseFile(args parseArgs) { RemoveWhitespace: args.options.RemoveWhitespace, UnsupportedCSSFeatures: args.options.UnsupportedCSSFeatures, }) - result.file.repr = &reprCSS{ast: ast} + result.file.inputFile.Repr = &graph.CSSRepr{AST: ast} result.ok = true case config.LoaderJSON: expr, ok := args.caches.JSONCache.Parse(args.log, source, js_parser.JSONOptions{}) ast := js_parser.LazyExportAST(args.log, source, js_parser.OptionsFromConfig(&args.options), expr, "") - result.file.ignoreIfUnused = true - result.file.repr = &reprJS{ast: ast} + if pluginName != "" { + result.file.inputFile.SideEffects.Kind = graph.NoSideEffects_PureData_FromPlugin + } else { + result.file.inputFile.SideEffects.Kind = graph.NoSideEffects_PureData + } + result.file.inputFile.Repr = &graph.JSRepr{AST: ast} result.ok = ok case config.LoaderText: @@ -265,8 +212,12 @@ func parseFile(args parseArgs) { expr := js_ast.Expr{Data: &js_ast.EString{Value: js_lexer.StringToUTF16(source.Contents)}} ast := js_parser.LazyExportAST(args.log, source, js_parser.OptionsFromConfig(&args.options), expr, "") ast.URLForCSS = "data:text/plain;base64," + encoded - result.file.ignoreIfUnused = true - result.file.repr = &reprJS{ast: ast} + if pluginName != "" { + result.file.inputFile.SideEffects.Kind = graph.NoSideEffects_PureData_FromPlugin + } else { + result.file.inputFile.SideEffects.Kind = graph.NoSideEffects_PureData + } + result.file.inputFile.Repr = &graph.JSRepr{AST: ast} result.ok = true case config.LoaderBase64: @@ -275,8 +226,12 @@ func parseFile(args parseArgs) { expr := js_ast.Expr{Data: &js_ast.EString{Value: js_lexer.StringToUTF16(encoded)}} ast := js_parser.LazyExportAST(args.log, source, js_parser.OptionsFromConfig(&args.options), expr, "") ast.URLForCSS = "data:" + mimeType + ";base64," + encoded - result.file.ignoreIfUnused = true - result.file.repr = &reprJS{ast: ast} + if pluginName != "" { + result.file.inputFile.SideEffects.Kind = graph.NoSideEffects_PureData_FromPlugin + } else { + result.file.inputFile.SideEffects.Kind = graph.NoSideEffects_PureData + } + result.file.inputFile.Repr = &graph.JSRepr{AST: ast} result.ok = true case config.LoaderBinary: @@ -284,72 +239,98 @@ func parseFile(args parseArgs) { expr := js_ast.Expr{Data: &js_ast.EString{Value: js_lexer.StringToUTF16(encoded)}} ast := js_parser.LazyExportAST(args.log, source, js_parser.OptionsFromConfig(&args.options), expr, "__toBinary") ast.URLForCSS = "data:application/octet-stream;base64," + encoded - result.file.ignoreIfUnused = true - result.file.repr = &reprJS{ast: ast} + if pluginName != "" { + result.file.inputFile.SideEffects.Kind = graph.NoSideEffects_PureData_FromPlugin + } else { + result.file.inputFile.SideEffects.Kind = graph.NoSideEffects_PureData + } + result.file.inputFile.Repr = &graph.JSRepr{AST: ast} result.ok = true case config.LoaderDataURL: mimeType := guessMimeType(ext, source.Contents) encoded := base64.StdEncoding.EncodeToString([]byte(source.Contents)) - url := "data:" + mimeType + ";base64," + encoded + url := fmt.Sprintf("data:%s;base64,%s", mimeType, encoded) expr := js_ast.Expr{Data: &js_ast.EString{Value: js_lexer.StringToUTF16(url)}} ast := js_parser.LazyExportAST(args.log, source, js_parser.OptionsFromConfig(&args.options), expr, "") ast.URLForCSS = url - result.file.ignoreIfUnused = true - result.file.repr = &reprJS{ast: ast} + if pluginName != "" { + result.file.inputFile.SideEffects.Kind = graph.NoSideEffects_PureData_FromPlugin + } else { + result.file.inputFile.SideEffects.Kind = graph.NoSideEffects_PureData + } + result.file.inputFile.Repr = &graph.JSRepr{AST: ast} result.ok = true case config.LoaderFile: // Add a hash to the file name to prevent multiple files with the same name // but different contents from colliding - hash := hashForFileName([]byte(source.Contents)) - additionalFileName := base + "." + hash + ext - publicPath := args.options.PublicPath + additionalFileName + var hash string + if config.HasPlaceholder(args.options.AssetPathTemplate, config.HashPlaceholder) { + h := xxhash.New() + h.Write([]byte(source.Contents)) + hash = hashForFileName(h.Sum(nil)) + } + dir := "/" + relPath := config.TemplateToString(config.SubstituteTemplate(args.options.AssetPathTemplate, config.PathPlaceholders{ + Dir: &dir, + Name: &base, + Hash: &hash, + })) + ext - // Determine the destination folder - targetFolder := args.options.AbsOutputDir + // Determine the final path that this asset will have in the output directory + publicPath := joinWithPublicPath(args.options.PublicPath, relPath+source.KeyPath.IgnoredSuffix) // Export the resulting relative path as a string expr := js_ast.Expr{Data: &js_ast.EString{Value: js_lexer.StringToUTF16(publicPath)}} ast := js_parser.LazyExportAST(args.log, source, js_parser.OptionsFromConfig(&args.options), expr, "") ast.URLForCSS = publicPath - result.file.ignoreIfUnused = true - result.file.repr = &reprJS{ast: ast} + if pluginName != "" { + result.file.inputFile.SideEffects.Kind = graph.NoSideEffects_PureData_FromPlugin + } else { + result.file.inputFile.SideEffects.Kind = graph.NoSideEffects_PureData + } + result.file.inputFile.Repr = &graph.JSRepr{AST: ast} result.ok = true // Optionally add metadata about the file - var jsonMetadataChunk []byte - if args.options.AbsMetadataFile != "" { + var jsonMetadataChunk string + if args.options.NeedsMetafile { inputs := fmt.Sprintf("{\n %s: {\n \"bytesInOutput\": %d\n }\n }", js_printer.QuoteForJSON(source.PrettyPath, args.options.ASCIIOnly), len(source.Contents), ) - jsonMetadataChunk = []byte(fmt.Sprintf( + jsonMetadataChunk = fmt.Sprintf( "{\n \"imports\": [],\n \"exports\": [],\n \"inputs\": %s,\n \"bytes\": %d\n }", inputs, len(source.Contents), - )) + ) } // Copy the file using an additional file payload to make sure we only copy // the file if the module isn't removed due to tree shaking. - result.file.additionalFiles = []OutputFile{{ - AbsPath: args.fs.Join(targetFolder, additionalFileName), + result.file.inputFile.AdditionalFiles = []graph.OutputFile{{ + AbsPath: args.fs.Join(args.options.AbsOutputDir, relPath), Contents: []byte(source.Contents), - jsonMetadataChunk: jsonMetadataChunk, + JSONMetadataChunk: jsonMetadataChunk, }} default: - args.log.AddRangeError(args.importSource, args.importPathRange, - fmt.Sprintf("File could not be loaded: %s", source.PrettyPath)) + var message string + if source.KeyPath.Namespace == "file" && ext != "" { + message = fmt.Sprintf("No loader is configured for %q files: %s", ext, source.PrettyPath) + } else { + message = fmt.Sprintf("Do not know how to load path: %s", source.PrettyPath) + } + args.log.AddRangeError(args.importSource, args.importPathRange, message) } // This must come before we send on the "results" channel to avoid deadlock if args.inject != nil { var exports []string - if repr, ok := result.file.repr.(*reprJS); ok { - exports = make([]string, 0, len(repr.ast.NamedExports)) - for alias := range repr.ast.NamedExports { + if repr, ok := result.file.inputFile.Repr.(*graph.JSRepr); ok { + exports = make([]string, 0, len(repr.AST.NamedExports)) + for alias := range repr.AST.NamedExports { exports = append(exports, alias) } sort.Strings(exports) // Sort for determinism @@ -371,7 +352,7 @@ func parseFile(args parseArgs) { // That way the main thread isn't blocked if the resolver takes a while. if args.options.Mode == config.ModeBundle && !args.skipResolve { // Clone the import records because they will be mutated later - recordsPtr := result.file.repr.importRecords() + recordsPtr := result.file.inputFile.Repr.ImportRecords() records := append([]ast.ImportRecord{}, *recordsPtr...) *recordsPtr = records result.resolveResults = make([]*resolver.ResolveResult, len(records)) @@ -382,7 +363,7 @@ func parseFile(args parseArgs) { for importRecordIndex := range records { // Don't try to resolve imports that are already resolved record := &records[importRecordIndex] - if record.SourceIndex != nil { + if record.SourceIndex.IsValid() { continue } @@ -404,13 +385,15 @@ func parseFile(args parseArgs) { } // Run the resolver and log an error if the path couldn't be resolved - resolveResult, didLogError := runOnResolvePlugins( + resolveResult, didLogError, debug := runOnResolvePlugins( args.options.Plugins, args.res, args.log, args.fs, + &args.caches.FSCache, &source, record.Range, + source.KeyPath.Namespace, record.Path.Text, record.Kind, absResolveDir, @@ -421,7 +404,7 @@ func parseFile(args parseArgs) { // All "require.resolve()" imports should be external because we don't // want to waste effort traversing into them if record.Kind == ast.ImportRequireResolve { - if !record.IsInsideTryBody && (resolveResult == nil || !resolveResult.IsExternal) { + if !record.HandlesImportErrors && (resolveResult == nil || !resolveResult.IsExternal) { args.log.AddRangeWarning(&source, record.Range, fmt.Sprintf("%q should be marked as external for use with \"require.resolve\"", record.Path.Text)) } @@ -433,10 +416,15 @@ func parseFile(args parseArgs) { // external imports instead of causing errors. This matches a common // code pattern for conditionally importing a module with a graceful // fallback. - if !didLogError && !record.IsInsideTryBody { + if !didLogError && !record.HandlesImportErrors { hint := "" if resolver.IsPackagePath(record.Path.Text) { - hint = " (mark it as external to exclude it from the bundle)" + if record.Kind == ast.ImportRequire { + hint = ", or surround it with try/catch to handle the failure at run-time" + } else if record.Kind == ast.ImportDynamic { + hint = ", or add \".catch()\" to handle the failure at run-time" + } + hint = fmt.Sprintf(" (mark it as external to exclude it from the bundle%s)", hint) if pluginName == "" && !args.fs.IsAbs(record.Path.Text) { if query := args.res.ProbeResolvePackageAsRelative(absResolveDir, record.Path.Text, record.Kind); query != nil { hint = fmt.Sprintf(" (use %q to reference the file %q)", "./"+record.Path.Text, args.res.PrettyPath(query.PathPair.Primary)) @@ -451,8 +439,11 @@ func parseFile(args parseArgs) { if absResolveDir == "" && pluginName != "" { hint = fmt.Sprintf(" (the plugin %q didn't set a resolve directory)", pluginName) } - args.log.AddRangeError(&source, record.Range, - fmt.Sprintf("Could not resolve %q%s", record.Path.Text, hint)) + debug.LogErrorMsg(args.log, &source, record.Range, fmt.Sprintf("Could not resolve %q%s", record.Path.Text, hint)) + } else if args.log.Level <= logger.LevelDebug && !didLogError && record.HandlesImportErrors { + args.log.AddRangeDebug(&source, record.Range, + fmt.Sprintf("Importing %q was allowed even though it could not be resolved because dynamic import failures appear to be handled here", + record.Path.Text)) } continue } @@ -464,10 +455,10 @@ func parseFile(args parseArgs) { // Attempt to parse the source map if present if loader.CanHaveSourceMap() && args.options.SourceMap != config.SourceMapNone { - if repr, ok := result.file.repr.(*reprJS); ok && repr.ast.SourceMapComment.Text != "" { + if repr, ok := result.file.inputFile.Repr.(*graph.JSRepr); ok && repr.AST.SourceMapComment.Text != "" { if path, contents := extractSourceMapFromComment(args.log, args.fs, &args.caches.FSCache, - args.res, &source, repr.ast.SourceMapComment, absResolveDir); contents != nil { - result.file.sourceMap = js_parser.ParseSourceMap(args.log, logger.Source{ + args.res, &source, repr.AST.SourceMapComment, absResolveDir); contents != nil { + result.file.inputFile.InputSourceMap = js_parser.ParseSourceMap(args.log, logger.Source{ KeyPath: path, PrettyPath: args.res.PrettyPath(path), Contents: *contents, @@ -479,6 +470,35 @@ func parseFile(args parseArgs) { args.results <- result } +func joinWithPublicPath(publicPath string, relPath string) string { + if strings.HasPrefix(relPath, "./") { + relPath = relPath[2:] + + // Strip any amount of further no-op slashes (i.e. ".///././/x/y" => "x/y") + for { + if strings.HasPrefix(relPath, "/") { + relPath = relPath[1:] + } else if strings.HasPrefix(relPath, "./") { + relPath = relPath[2:] + } else { + break + } + } + } + + // Use a relative path if there is no public path + if publicPath == "" { + publicPath = "." + } + + // Join with a slash + slash := "/" + if strings.HasSuffix(publicPath, "/") { + slash = "" + } + return fmt.Sprintf("%s%s%s", publicPath, slash, relPath) +} + func isASCIIOnly(text string) bool { for _, c := range text { if c < 0x20 || c > 0x7E { @@ -489,7 +509,7 @@ func isASCIIOnly(text string) bool { } func guessMimeType(extension string, contents string) string { - mimeType := mime.TypeByExtension(extension) + mimeType := helpers.MimeTypeByExtension(extension) if mimeType == "" { mimeType = http.DetectContentType([]byte(contents)) } @@ -507,40 +527,30 @@ func extractSourceMapFromComment( comment js_ast.Span, absResolveDir string, ) (logger.Path, *string) { - // Data URL - if strings.HasPrefix(comment.Text, "data:") { - if strings.HasPrefix(comment.Text, "data:application/json;") { - // Scan for the base64 part to support URLs like "data:application/json;charset=utf-8;base64," - if index := strings.Index(comment.Text, ";base64,"); index != -1 { - n := int32(index + len(";base64,")) - encoded := comment.Text[n:] - decoded, err := base64.StdEncoding.DecodeString(encoded) - if err != nil { - r := logger.Range{Loc: logger.Loc{Start: comment.Range.Loc.Start + n}, Len: comment.Range.Len - n} - log.AddRangeWarning(source, r, "Invalid base64 data in source map") - return logger.Path{}, nil - } - contents := string(decoded) - return logger.Path{Text: source.PrettyPath + ".sourceMappingURL"}, &contents - } + // Support data URLs + if parsed, ok := resolver.ParseDataURL(comment.Text); ok { + if contents, err := parsed.DecodeData(); err == nil { + return logger.Path{Text: source.PrettyPath, IgnoredSuffix: "#sourceMappingURL"}, &contents + } else { + log.AddRangeWarning(source, comment.Range, fmt.Sprintf("Unsupported source map comment: %s", err.Error())) + return logger.Path{}, nil } - - // Anything else is unsupported - log.AddRangeWarning(source, comment.Range, "Unsupported source map comment") - return logger.Path{}, nil } // Relative path in a file with an absolute path if absResolveDir != "" { absPath := fs.Join(absResolveDir, comment.Text) path := logger.Path{Text: absPath, Namespace: "file"} - contents, err := fsCache.ReadFile(fs, absPath) + contents, err, originalError := fsCache.ReadFile(fs, absPath) + if log.Level <= logger.LevelDebug && originalError != nil { + log.AddRangeDebug(source, comment.Range, fmt.Sprintf("Failed to read file %q: %s", res.PrettyPath(path), originalError.Error())) + } if err != nil { if err == syscall.ENOENT { // Don't report a warning because this is likely unactionable return logger.Path{}, nil } - log.AddRangeError(source, comment.Range, fmt.Sprintf("Cannot read file %q: %s", res.PrettyPath(path), err.Error())) + log.AddRangeWarning(source, comment.Range, fmt.Sprintf("Cannot read file %q: %s", res.PrettyPath(path), err.Error())) return logger.Path{}, nil } return path, &contents @@ -583,6 +593,9 @@ func logPluginMessages( } // Sanitize the locations + for _, note := range msg.Notes { + sanetizeLocation(res, note.Location) + } if msg.Data.Location == nil { msg.Data.Location = logger.LocationOrNil(importSource, importPathRange) } else { @@ -590,9 +603,10 @@ func logPluginMessages( if msg.Data.Location.File == "" && importSource != nil { msg.Data.Location.File = importSource.PrettyPath } - } - for _, note := range msg.Notes { - sanetizeLocation(res, note.Location) + if importSource != nil { + msg.Notes = append(msg.Notes, logger.RangeData(importSource, importPathRange, + fmt.Sprintf("The plugin %q was triggered by this import", name))) + } } log.AddMsg(msg) @@ -623,22 +637,29 @@ func runOnResolvePlugins( res resolver.Resolver, log logger.Log, fs fs.FS, + fsCache *cache.FSCache, importSource *logger.Source, importPathRange logger.Range, + importNamespace string, path string, kind ast.ImportKind, absResolveDir string, pluginData interface{}, -) (*resolver.ResolveResult, bool) { +) (*resolver.ResolveResult, bool, resolver.DebugMeta) { resolverArgs := config.OnResolveArgs{ Path: path, ResolveDir: absResolveDir, + Kind: kind, PluginData: pluginData, } - applyPath := logger.Path{Text: path} + applyPath := logger.Path{ + Text: path, + Namespace: importNamespace, + } if importSource != nil { resolverArgs.Importer = importSource.KeyPath - applyPath.Namespace = importSource.KeyPath.Namespace + } else { + resolverArgs.Importer.Namespace = importNamespace } // Apply resolver plugins in order until one succeeds @@ -655,9 +676,17 @@ func runOnResolvePlugins( } didLogError := logPluginMessages(res, log, pluginName, result.Msgs, result.ThrownError, importSource, importPathRange) + // Plugins can also provide additional file system paths to watch + for _, file := range result.AbsWatchFiles { + fsCache.ReadFile(fs, file) + } + for _, dir := range result.AbsWatchDirs { + fs.ReadDirectory(dir) + } + // Stop now if there was an error if didLogError { - return nil, true + return nil, true, resolver.DebugMeta{} } // The "file" namespace is the default for non-external paths, but not @@ -686,21 +715,33 @@ func runOnResolvePlugins( log.AddRangeError(importSource, importPathRange, fmt.Sprintf("Plugin %q returned a non-absolute path: %s (set a namespace if this is not a file path)", pluginName, result.Path.Text)) } - return nil, true + return nil, true, resolver.DebugMeta{} } return &resolver.ResolveResult{ PathPair: resolver.PathPair{Primary: result.Path}, IsExternal: result.External, PluginData: result.PluginData, - }, false + }, false, resolver.DebugMeta{} } } // Resolve relative to the resolve directory by default. All paths in the // "file" namespace automatically have a resolve directory. Loader plugins // can also configure a custom resolve directory for files in other namespaces. - return res.Resolve(absResolveDir, path, kind), false + result, debug := res.Resolve(absResolveDir, path, kind) + + // Warn when the case used for importing differs from the actual file name + if result != nil && result.DifferentCase != nil && !resolver.IsInsideNodeModules(absResolveDir) { + diffCase := *result.DifferentCase + log.AddRangeWarning(importSource, importPathRange, fmt.Sprintf( + "Use %q instead of %q to avoid issues with case-sensitive file systems", + res.PrettyPath(logger.Path{Text: fs.Join(diffCase.Dir, diffCase.Actual), Namespace: "file"}), + res.PrettyPath(logger.Path{Text: fs.Join(diffCase.Dir, diffCase.Query), Namespace: "file"}), + )) + } + + return result, false, debug } type loaderPluginResult struct { @@ -741,6 +782,14 @@ func runOnLoadPlugins( } didLogError := logPluginMessages(res, log, pluginName, result.Msgs, result.ThrownError, importSource, importPathRange) + // Plugins can also provide additional file system paths to watch + for _, file := range result.AbsWatchFiles { + fsCache.ReadFile(fs, file) + } + for _, dir := range result.AbsWatchDirs { + fs.ReadDirectory(dir) + } + // Stop now if there was an error if didLogError { if isWatchMode && source.KeyPath.Namespace == "file" { @@ -781,20 +830,49 @@ func runOnLoadPlugins( // Read normal modules from disk if source.KeyPath.Namespace == "file" { - if contents, err := fsCache.ReadFile(fs, source.KeyPath.Text); err == nil { + if contents, err, originalError := fsCache.ReadFile(fs, source.KeyPath.Text); err == nil { source.Contents = contents return loaderPluginResult{ loader: config.LoaderDefault, absResolveDir: fs.Dir(source.KeyPath.Text), }, true - } else if err == syscall.ENOENT { - log.AddRangeError(importSource, importPathRange, - fmt.Sprintf("Could not read from file: %s", source.KeyPath.Text)) - return loaderPluginResult{}, false } else { - log.AddRangeError(importSource, importPathRange, - fmt.Sprintf("Cannot read file %q: %s", res.PrettyPath(source.KeyPath), err.Error())) - return loaderPluginResult{}, false + if log.Level <= logger.LevelDebug && originalError != nil { + log.AddDebug(nil, logger.Loc{}, fmt.Sprintf("Failed to read file %q: %s", source.KeyPath.Text, originalError.Error())) + } + if err == syscall.ENOENT { + log.AddRangeError(importSource, importPathRange, + fmt.Sprintf("Could not read from file: %s", source.KeyPath.Text)) + return loaderPluginResult{}, false + } else { + log.AddRangeError(importSource, importPathRange, + fmt.Sprintf("Cannot read file %q: %s", res.PrettyPath(source.KeyPath), err.Error())) + return loaderPluginResult{}, false + } + } + } + + // Native support for data URLs. This is supported natively by node: + // https://nodejs.org/docs/latest/api/esm.html#esm_data_imports + if source.KeyPath.Namespace == "dataurl" { + if parsed, ok := resolver.ParseDataURL(source.KeyPath.Text); ok { + if mimeType := parsed.DecodeMIMEType(); mimeType != resolver.MIMETypeUnsupported { + if contents, err := parsed.DecodeData(); err != nil { + log.AddRangeError(importSource, importPathRange, + fmt.Sprintf("Could not load data URL: %s", err.Error())) + return loaderPluginResult{loader: config.LoaderNone}, true + } else { + source.Contents = contents + switch mimeType { + case resolver.MIMETypeTextCSS: + return loaderPluginResult{loader: config.LoaderCSS}, true + case resolver.MIMETypeTextJavaScript: + return loaderPluginResult{loader: config.LoaderJS}, true + case resolver.MIMETypeApplicationJSON: + return loaderPluginResult{loader: config.LoaderJSON}, true + } + } + } } } @@ -826,9 +904,8 @@ func lowerCaseAbsPathForWindows(absPath string) string { return strings.ToLower(absPath) } -func hashForFileName(bytes []byte) string { - hashBytes := sha1.Sum(bytes) - return base32.StdEncoding.EncodeToString(hashBytes[:])[:8] +func hashForFileName(hashBytes []byte) string { + return base32.StdEncoding.EncodeToString(hashBytes)[:8] } type scanner struct { @@ -847,11 +924,27 @@ type scanner struct { remaining int } -func ScanBundle(log logger.Log, fs fs.FS, res resolver.Resolver, caches *cache.CacheSet, entryPoints []string, options config.Options) Bundle { - if options.ExtensionToLoader == nil { - options.ExtensionToLoader = DefaultExtensionToLoaderMap() +type EntryPoint struct { + InputPath string + OutputPath string + IsFile bool +} + +func ScanBundle( + log logger.Log, + fs fs.FS, + res resolver.Resolver, + caches *cache.CacheSet, + entryPoints []EntryPoint, + options config.Options, +) Bundle { + start := time.Now() + if log.Level <= logger.LevelVerbose { + log.AddVerbose(nil, logger.Loc{}, "Started the scan phase") } + applyOptionDefaults(&options) + s := scanner{ log: log, fs: fs, @@ -868,19 +961,31 @@ func ScanBundle(log logger.Log, fs fs.FS, res resolver.Resolver, caches *cache.C s.remaining++ go func() { source, ast, ok := globalRuntimeCache.parseRuntime(&options) - s.resultChannel <- parseResult{file: file{source: source, repr: &reprJS{ast: ast}}, ok: ok} + s.resultChannel <- parseResult{ + file: scannerFile{ + inputFile: graph.InputFile{ + Source: source, + Repr: &graph.JSRepr{AST: ast}, + }, + }, + ok: ok, + } }() s.preprocessInjectedFiles() - entryPointIndices := s.addEntryPoints(entryPoints) + entryPointMeta := s.addEntryPoints(entryPoints) s.scanAllDependencies() files := s.processScannedFiles() + if log.Level <= logger.LevelVerbose { + log.AddVerbose(nil, logger.Loc{}, fmt.Sprintf("Ended the scan phase (%dms)", time.Since(start).Milliseconds())) + } + return Bundle{ fs: fs, res: res, files: files, - entryPoints: entryPointIndices, + entryPoints: entryPointMeta, } } @@ -923,7 +1028,7 @@ func (s *scanner) maybeParseFile( } // Don't emit warnings for code inside a "node_modules" directory - if resolver.IsInsideNodeModules(s.fs, path.Text) { + if resolver.IsInsideNodeModules(path.Text) { optionsClone.SuppressWarningsAboutWeirdCode = true } @@ -934,13 +1039,22 @@ func (s *scanner) maybeParseFile( if len(resolveResult.JSXFragment) > 0 { optionsClone.JSX.Fragment = resolveResult.JSXFragment } - if resolveResult.UseDefineForClassFieldsTS { - optionsClone.UseDefineForClassFields = true + if resolveResult.UseDefineForClassFieldsTS != config.Unspecified { + optionsClone.UseDefineForClassFields = resolveResult.UseDefineForClassFieldsTS } if resolveResult.PreserveUnusedImportsTS { optionsClone.PreserveUnusedImportsTS = true } + // Set the module type preference using node's module type rules + if strings.HasSuffix(path.Text, ".mjs") { + optionsClone.ModuleType = config.ModuleESM + } else if strings.HasSuffix(path.Text, ".cjs") { + optionsClone.ModuleType = config.ModuleCommonJS + } else { + optionsClone.ModuleType = resolveResult.ModuleType + } + // Enable bundling for injected files so we always do tree shaking. We // never want to include unnecessary code from injected files since they // are essentially bundled. However, if we do this we should skip the @@ -952,23 +1066,39 @@ func (s *scanner) maybeParseFile( skipResolve = true } + // Special-case pretty-printed paths for data URLs + if path.Namespace == "dataurl" { + if _, ok := resolver.ParseDataURL(path.Text); ok { + prettyPath = path.Text + if len(prettyPath) > 64 { + prettyPath = prettyPath[:64] + "..." + } + prettyPath = fmt.Sprintf("<%s>", prettyPath) + } + } + + var sideEffects graph.SideEffects + if resolveResult.PrimarySideEffectsData != nil { + sideEffects.Kind = graph.NoSideEffects_PackageJSON + sideEffects.Data = resolveResult.PrimarySideEffectsData + } + go parseFile(parseArgs{ - fs: s.fs, - log: s.log, - res: s.res, - caches: s.caches, - keyPath: path, - prettyPath: prettyPath, - sourceIndex: sourceIndex, - importSource: importSource, - ignoreIfUnused: resolveResult.IgnorePrimaryIfUnused != nil, - ignoreIfUnusedData: resolveResult.IgnorePrimaryIfUnused, - importPathRange: importPathRange, - pluginData: pluginData, - options: optionsClone, - results: s.resultChannel, - inject: inject, - skipResolve: skipResolve, + fs: s.fs, + log: s.log, + res: s.res, + caches: s.caches, + keyPath: path, + prettyPath: prettyPath, + sourceIndex: sourceIndex, + importSource: importSource, + sideEffects: sideEffects, + importPathRange: importPathRange, + pluginData: pluginData, + options: optionsClone, + results: s.resultChannel, + inject: inject, + skipResolve: skipResolve, }) return sourceIndex @@ -1027,11 +1157,15 @@ func (s *scanner) preprocessInjectedFiles() { ast := js_parser.LazyExportAST(s.log, source, js_parser.OptionsFromConfig(&s.options), expr, "") result := parseResult{ ok: true, - file: file{ - source: source, - loader: config.LoaderJSON, - repr: &reprJS{ast: ast}, - ignoreIfUnused: true, + file: scannerFile{ + inputFile: graph.InputFile{ + Source: source, + Repr: &graph.JSRepr{AST: ast}, + Loader: config.LoaderJSON, + SideEffects: graph.SideEffects{ + Kind: graph.NoSideEffects_PureData, + }, + }, }, } @@ -1040,6 +1174,8 @@ func (s *scanner) preprocessInjectedFiles() { go func() { s.resultChannel <- result }() } + results := make([]config.InjectedFile, len(s.options.InjectAbsPaths)) + j := 0 for _, absPath := range s.options.InjectAbsPaths { prettyPath := s.res.PrettyPath(logger.Path{Text: absPath, Namespace: "file"}) lowerAbsPath := lowerCaseAbsPathForWindows(absPath) @@ -1057,26 +1193,28 @@ func (s *scanner) preprocessInjectedFiles() { continue } - i := len(injectedFiles) - injectedFiles = append(injectedFiles, config.InjectedFile{}) channel := make(chan config.InjectedFile) s.maybeParseFile(*resolveResult, prettyPath, nil, logger.Range{}, nil, inputKindNormal, channel) - // Wait for the results in parallel + // Wait for the results in parallel. The results slice is large enough so + // it is not reallocated during the computations. injectWaitGroup.Add(1) - go func(i int, prettyPath string, resolveResult *resolver.ResolveResult) { - injectedFiles[i] = <-channel + go func(i int) { + results[i] = <-channel injectWaitGroup.Done() - }(i, prettyPath, resolveResult) + }(j) + j++ } injectWaitGroup.Wait() + injectedFiles = append(injectedFiles, results[:j]...) + s.options.InjectedFiles = injectedFiles } -func (s *scanner) addEntryPoints(entryPoints []string) []uint32 { +func (s *scanner) addEntryPoints(entryPoints []EntryPoint) []graph.EntryPoint { // Reserve a slot for each entry point - entryPointIndices := make([]uint32, 0, len(entryPoints)+1) + entryMetas := make([]graph.EntryPoint, 0, len(entryPoints)+1) // Treat stdin as an extra entry point if stdin := s.options.Stdin; stdin != nil { @@ -1092,32 +1230,44 @@ func (s *scanner) addEntryPoints(entryPoints []string) []uint32 { } resolveResult := resolver.ResolveResult{PathPair: resolver.PathPair{Primary: stdinPath}} sourceIndex := s.maybeParseFile(resolveResult, s.res.PrettyPath(stdinPath), nil, logger.Range{}, nil, inputKindStdin, nil) - entryPointIndices = append(entryPointIndices, sourceIndex) + entryMetas = append(entryMetas, graph.EntryPoint{ + OutputPath: "stdin", + SourceIndex: sourceIndex, + }) } - // Entry point paths without a leading "./" are interpreted as package - // paths. This happens because they go through general path resolution - // like all other import paths so that plugins can run on them. Requiring - // a leading "./" for a relative path simplifies writing plugins because - // entry points aren't a special case. - // - // However, requiring a leading "./" also breaks backward compatibility - // and makes working with the CLI more difficult. So attempt to insert - // "./" automatically when needed. We don't want to unconditionally insert - // a leading "./" because the path may not be a file system path. For - // example, it may be a URL. So only insert a leading "./" when the path - // is an exact match for an existing file. + // Check each entry point ahead of time to see if it's a real file entryPointAbsResolveDir := s.fs.Cwd() - for i, path := range entryPoints { - if !s.fs.IsAbs(path) && resolver.IsPackagePath(path) { - absPath := s.fs.Join(entryPointAbsResolveDir, path) - dir := s.fs.Dir(absPath) - base := s.fs.Base(absPath) - if entries, err := s.fs.ReadDirectory(dir); err == nil { - if entry := entries[base]; entry != nil && entry.Kind(s.fs) == fs.FileEntry { - entryPoints[i] = "./" + path + for i := range entryPoints { + entryPoint := &entryPoints[i] + absPath := entryPoint.InputPath + if !s.fs.IsAbs(absPath) { + absPath = s.fs.Join(entryPointAbsResolveDir, absPath) + } + dir := s.fs.Dir(absPath) + base := s.fs.Base(absPath) + if entries, err, originalError := s.fs.ReadDirectory(dir); err == nil { + if entry, _ := entries.Get(base); entry != nil && entry.Kind(s.fs) == fs.FileEntry { + entryPoint.IsFile = true + + // Entry point paths without a leading "./" are interpreted as package + // paths. This happens because they go through general path resolution + // like all other import paths so that plugins can run on them. Requiring + // a leading "./" for a relative path simplifies writing plugins because + // entry points aren't a special case. + // + // However, requiring a leading "./" also breaks backward compatibility + // and makes working with the CLI more difficult. So attempt to insert + // "./" automatically when needed. We don't want to unconditionally insert + // a leading "./" because the path may not be a file system path. For + // example, it may be a URL. So only insert a leading "./" when the path + // is an exact match for an existing file. + if !s.fs.IsAbs(entryPoint.InputPath) && resolver.IsPackagePath(entryPoint.InputPath) { + entryPoint.InputPath = "./" + entryPoint.InputPath } } + } else if s.log.Level <= logger.LevelDebug && originalError != nil { + s.log.AddDebug(nil, logger.Loc{}, fmt.Sprintf("Failed to read directory %q: %s", absPath, originalError.Error())) } } @@ -1127,57 +1277,189 @@ func (s *scanner) addEntryPoints(entryPoints []string) []uint32 { entryPointResolveResults := make([]*resolver.ResolveResult, len(entryPoints)) entryPointWaitGroup := sync.WaitGroup{} entryPointWaitGroup.Add(len(entryPoints)) - for i, path := range entryPoints { - go func(i int, path string) { + for i, entryPoint := range entryPoints { + go func(i int, entryPoint EntryPoint) { + namespace := "" + if entryPoint.IsFile { + namespace = "file" + } + // Run the resolver and log an error if the path couldn't be resolved - resolveResult, didLogError := runOnResolvePlugins( + resolveResult, didLogError, debug := runOnResolvePlugins( s.options.Plugins, s.res, s.log, s.fs, + &s.caches.FSCache, nil, logger.Range{}, - path, + namespace, + entryPoint.InputPath, ast.ImportEntryPoint, entryPointAbsResolveDir, nil, ) if resolveResult != nil { if resolveResult.IsExternal { - s.log.AddError(nil, logger.Loc{}, fmt.Sprintf("The entry point %q cannot be marked as external", path)) + s.log.AddError(nil, logger.Loc{}, fmt.Sprintf("The entry point %q cannot be marked as external", entryPoint.InputPath)) } else { entryPointResolveResults[i] = resolveResult } } else if !didLogError { hint := "" - if !s.fs.IsAbs(path) { - if query := s.res.ProbeResolvePackageAsRelative(entryPointAbsResolveDir, path, ast.ImportEntryPoint); query != nil { - hint = fmt.Sprintf(" (use %q to reference the file %q)", "./"+path, s.res.PrettyPath(query.PathPair.Primary)) + if !s.fs.IsAbs(entryPoint.InputPath) { + if strings.ContainsRune(entryPoint.InputPath, '*') { + hint = " (glob syntax must be expanded first before passing the paths to esbuild)" + } else if query := s.res.ProbeResolvePackageAsRelative(entryPointAbsResolveDir, entryPoint.InputPath, ast.ImportEntryPoint); query != nil { + hint = fmt.Sprintf(" (use %q to reference the file %q)", "./"+entryPoint.InputPath, s.res.PrettyPath(query.PathPair.Primary)) } } - s.log.AddError(nil, logger.Loc{}, fmt.Sprintf("Could not resolve %q%s", path, hint)) + debug.LogErrorMsg(s.log, nil, logger.Range{}, fmt.Sprintf("Could not resolve %q%s", entryPoint.InputPath, hint)) } entryPointWaitGroup.Done() - }(i, path) + }(i, entryPoint) } entryPointWaitGroup.Wait() // Parse all entry points that were resolved successfully - duplicateEntryPoints := make(map[uint32]bool) - for _, resolveResult := range entryPointResolveResults { + for i, resolveResult := range entryPointResolveResults { if resolveResult != nil { prettyPath := s.res.PrettyPath(resolveResult.PathPair.Primary) sourceIndex := s.maybeParseFile(*resolveResult, prettyPath, nil, logger.Range{}, resolveResult.PluginData, inputKindEntryPoint, nil) - if duplicateEntryPoints[sourceIndex] { - s.log.AddError(nil, logger.Loc{}, fmt.Sprintf("Duplicate entry point %q", prettyPath)) - continue + outputPath := entryPoints[i].OutputPath + outputPathWasAutoGenerated := false + + // If the output path is missing, automatically generate one from the input path + if outputPath == "" { + outputPath = entryPoints[i].InputPath + windowsVolumeLabel := "" + + // The ":" character is invalid in file paths on Windows except when + // it's used as a volume separator. Special-case that here so volume + // labels don't break on Windows. + if s.fs.IsAbs(outputPath) && len(outputPath) >= 3 && outputPath[1] == ':' { + if c := outputPath[0]; (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') { + if c := outputPath[2]; c == '/' || c == '\\' { + windowsVolumeLabel = outputPath[:3] + outputPath = outputPath[3:] + } + } + } + + // For cross-platform robustness, do not allow characters in the output + // path that are invalid on Windows. This is especially relevant when + // the input path is something other than a file path, such as a URL. + outputPath = sanitizeFilePathForVirtualModulePath(outputPath) + if windowsVolumeLabel != "" { + outputPath = windowsVolumeLabel + outputPath + } + outputPathWasAutoGenerated = true + + // Strip the file extension from the output path if there is one so the + // "out extension" setting is used instead + if last := strings.LastIndexAny(outputPath, "/.\\"); last != -1 && outputPath[last] == '.' { + outputPath = outputPath[:last] + } } - duplicateEntryPoints[sourceIndex] = true - entryPointIndices = append(entryPointIndices, sourceIndex) + + entryMetas = append(entryMetas, graph.EntryPoint{ + OutputPath: outputPath, + SourceIndex: sourceIndex, + OutputPathWasAutoGenerated: outputPathWasAutoGenerated, + }) } } - return entryPointIndices + // Turn all automatically-generated output paths into absolute paths + for i := range entryMetas { + entryPoint := &entryMetas[i] + if entryPoint.OutputPathWasAutoGenerated && !s.fs.IsAbs(entryPoint.OutputPath) { + entryPoint.OutputPath = s.fs.Join(entryPointAbsResolveDir, entryPoint.OutputPath) + } + } + + // Automatically compute "outbase" if it wasn't provided + if s.options.AbsOutputBase == "" { + s.options.AbsOutputBase = lowestCommonAncestorDirectory(s.fs, entryMetas) + if s.options.AbsOutputBase == "" { + s.options.AbsOutputBase = entryPointAbsResolveDir + } + } + + // Turn all output paths back into relative paths, but this time relative to + // the "outbase" value we computed above + for i := range entryMetas { + entryPoint := &entryMetas[i] + if s.fs.IsAbs(entryPoint.OutputPath) { + if !entryPoint.OutputPathWasAutoGenerated { + // If an explicit absolute output path was specified, use the path + // relative to the "outdir" directory + if relPath, ok := s.fs.Rel(s.options.AbsOutputDir, entryPoint.OutputPath); ok { + entryPoint.OutputPath = relPath + } + } else { + // Otherwise if the absolute output path was derived from the input + // path, use the path relative to the "outbase" directory + if relPath, ok := s.fs.Rel(s.options.AbsOutputBase, entryPoint.OutputPath); ok { + entryPoint.OutputPath = relPath + } + } + } + } + + return entryMetas +} + +func lowestCommonAncestorDirectory(fs fs.FS, entryPoints []graph.EntryPoint) string { + // Ignore any explicitly-specified output paths + absPaths := make([]string, 0, len(entryPoints)) + for _, entryPoint := range entryPoints { + if entryPoint.OutputPathWasAutoGenerated { + absPaths = append(absPaths, entryPoint.OutputPath) + } + } + + if len(absPaths) == 0 { + return "" + } + + lowestAbsDir := fs.Dir(absPaths[0]) + + for _, absPath := range absPaths[1:] { + absDir := fs.Dir(absPath) + lastSlash := 0 + a := 0 + b := 0 + + for { + runeA, widthA := utf8.DecodeRuneInString(absDir[a:]) + runeB, widthB := utf8.DecodeRuneInString(lowestAbsDir[b:]) + boundaryA := widthA == 0 || runeA == '/' || runeA == '\\' + boundaryB := widthB == 0 || runeB == '/' || runeB == '\\' + + if boundaryA && boundaryB { + if widthA == 0 || widthB == 0 { + // Truncate to the smaller path if one path is a prefix of the other + lowestAbsDir = absDir[:a] + break + } else { + // Track the longest common directory so far + lastSlash = a + } + } else if boundaryA != boundaryB || unicode.ToLower(runeA) != unicode.ToLower(runeB) { + // If both paths are different at this point, stop and set the lowest so + // far to the common parent directory. Compare using a case-insensitive + // comparison to handle paths on Windows. + lowestAbsDir = absDir[:lastSlash] + break + } + + a += widthA + b += widthB + } + } + + return lowestAbsDir } func (s *scanner) scanAllDependencies() { @@ -1191,7 +1473,7 @@ func (s *scanner) scanAllDependencies() { // Don't try to resolve paths if we're not bundling if s.options.Mode == config.ModeBundle { - records := *result.file.repr.importRecords() + records := *result.file.inputFile.Repr.ImportRecords() for importRecordIndex := range records { record := &records[importRecordIndex] @@ -1204,9 +1486,9 @@ func (s *scanner) scanAllDependencies() { path := resolveResult.PathPair.Primary if !resolveResult.IsExternal { // Handle a path within the bundle - prettyPath := s.res.PrettyPath(path) - sourceIndex := s.maybeParseFile(*resolveResult, prettyPath, &result.file.source, record.Range, resolveResult.PluginData, inputKindNormal, nil) - record.SourceIndex = &sourceIndex + sourceIndex := s.maybeParseFile(*resolveResult, s.res.PrettyPath(path), + &result.file.inputFile.Source, record.Range, resolveResult.PluginData, inputKindNormal, nil) + record.SourceIndex = ast.MakeIndex32(sourceIndex) } else { // If the path to the external module is relative to the source // file, rewrite the path to be relative to the working directory @@ -1228,35 +1510,35 @@ func (s *scanner) scanAllDependencies() { } } - s.results[result.file.source.Index] = result + s.results[result.file.inputFile.Source.Index] = result } } -func (s *scanner) processScannedFiles() []file { +func (s *scanner) processScannedFiles() []scannerFile { // Now that all files have been scanned, process the final file import records for i, result := range s.results { if !result.ok { continue } - j := js_printer.Joiner{} + sb := strings.Builder{} isFirstImport := true // Begin the metadata chunk - if s.options.AbsMetadataFile != "" { - j.AddBytes(js_printer.QuoteForJSON(result.file.source.PrettyPath, s.options.ASCIIOnly)) - j.AddString(fmt.Sprintf(": {\n \"bytes\": %d,\n \"imports\": [", len(result.file.source.Contents))) + if s.options.NeedsMetafile { + sb.Write(js_printer.QuoteForJSON(result.file.inputFile.Source.PrettyPath, s.options.ASCIIOnly)) + sb.WriteString(fmt.Sprintf(": {\n \"bytes\": %d,\n \"imports\": [", len(result.file.inputFile.Source.Contents))) } // Don't try to resolve paths if we're not bundling if s.options.Mode == config.ModeBundle { - records := *result.file.repr.importRecords() + records := *result.file.inputFile.Repr.ImportRecords() for importRecordIndex := range records { record := &records[importRecordIndex] // Skip this import record if the previous resolver call failed resolveResult := result.resolveResults[importRecordIndex] - if resolveResult == nil || record.SourceIndex == nil { + if resolveResult == nil || !record.SourceIndex.IsValid() { continue } @@ -1275,43 +1557,47 @@ func (s *scanner) processScannedFiles() []file { secondaryKey.Text = lowerCaseAbsPathForWindows(secondaryKey.Text) } if secondarySourceIndex, ok := s.visited[secondaryKey]; ok { - record.SourceIndex = &secondarySourceIndex + record.SourceIndex = ast.MakeIndex32(secondarySourceIndex) } } // Generate metadata about each import - if s.options.AbsMetadataFile != "" { + if s.options.NeedsMetafile { if isFirstImport { isFirstImport = false - j.AddString("\n ") + sb.WriteString("\n ") } else { - j.AddString(",\n ") + sb.WriteString(",\n ") } - j.AddString(fmt.Sprintf("{\n \"path\": %s,\n \"kind\": %s\n }", - js_printer.QuoteForJSON(s.results[*record.SourceIndex].file.source.PrettyPath, s.options.ASCIIOnly), + sb.WriteString(fmt.Sprintf("{\n \"path\": %s,\n \"kind\": %s\n }", + js_printer.QuoteForJSON(s.results[record.SourceIndex.GetIndex()].file.inputFile.Source.PrettyPath, s.options.ASCIIOnly), js_printer.QuoteForJSON(record.Kind.StringForMetafile(), s.options.ASCIIOnly))) } - // Importing a JavaScript file from a CSS file is not allowed. switch record.Kind { - case ast.ImportAt: - otherFile := &s.results[*record.SourceIndex].file - if _, ok := otherFile.repr.(*reprJS); ok { - s.log.AddRangeError(&result.file.source, record.Range, - fmt.Sprintf("Cannot import %q into a CSS file", otherFile.source.PrettyPath)) + case ast.ImportAt, ast.ImportAtConditional: + // Using a JavaScript file with CSS "@import" is not allowed + otherFile := &s.results[record.SourceIndex.GetIndex()].file + if _, ok := otherFile.inputFile.Repr.(*graph.JSRepr); ok { + s.log.AddRangeError(&result.file.inputFile.Source, record.Range, + fmt.Sprintf("Cannot import %q into a CSS file", otherFile.inputFile.Source.PrettyPath)) + } else if record.Kind == ast.ImportAtConditional { + s.log.AddRangeError(&result.file.inputFile.Source, record.Range, + "Bundling with conditional \"@import\" rules is not currently supported") } case ast.ImportURL: - otherFile := &s.results[*record.SourceIndex].file - switch otherRepr := otherFile.repr.(type) { - case *reprCSS: - s.log.AddRangeError(&result.file.source, record.Range, - fmt.Sprintf("Cannot use %q as a URL", otherFile.source.PrettyPath)) + // Using a JavaScript or CSS file with CSS "url()" is not allowed + otherFile := &s.results[record.SourceIndex.GetIndex()].file + switch otherRepr := otherFile.inputFile.Repr.(type) { + case *graph.CSSRepr: + s.log.AddRangeError(&result.file.inputFile.Source, record.Range, + fmt.Sprintf("Cannot use %q as a URL", otherFile.inputFile.Source.PrettyPath)) - case *reprJS: - if otherRepr.ast.URLForCSS == "" { - s.log.AddRangeError(&result.file.source, record.Range, - fmt.Sprintf("Cannot use %q as a URL", otherFile.source.PrettyPath)) + case *graph.JSRepr: + if otherRepr.AST.URLForCSS == "" { + s.log.AddRangeError(&result.file.inputFile.Source, record.Range, + fmt.Sprintf("Cannot use %q as a URL", otherFile.inputFile.Source.PrettyPath)) } } } @@ -1319,87 +1605,185 @@ func (s *scanner) processScannedFiles() []file { // If an import from a JavaScript file targets a CSS file, generate a // JavaScript stub to ensure that JavaScript files only ever import // other JavaScript files. - if _, ok := result.file.repr.(*reprJS); ok { - otherFile := &s.results[*record.SourceIndex].file - if css, ok := otherFile.repr.(*reprCSS); ok { + if _, ok := result.file.inputFile.Repr.(*graph.JSRepr); ok { + otherFile := &s.results[record.SourceIndex.GetIndex()].file + if css, ok := otherFile.inputFile.Repr.(*graph.CSSRepr); ok { if s.options.WriteToStdout { - s.log.AddRangeError(&result.file.source, record.Range, - fmt.Sprintf("Cannot import %q into a JavaScript file without an output path configured", otherFile.source.PrettyPath)) - } else if css.jsSourceIndex == nil { - stubKey := otherFile.source.KeyPath + s.log.AddRangeError(&result.file.inputFile.Source, record.Range, + fmt.Sprintf("Cannot import %q into a JavaScript file without an output path configured", otherFile.inputFile.Source.PrettyPath)) + } else if !css.JSSourceIndex.IsValid() { + stubKey := otherFile.inputFile.Source.KeyPath if stubKey.Namespace == "file" { stubKey.Text = lowerCaseAbsPathForWindows(stubKey.Text) } sourceIndex := s.allocateSourceIndex(stubKey, cache.SourceIndexJSStubForCSS) source := logger.Source{ Index: sourceIndex, - PrettyPath: otherFile.source.PrettyPath, + PrettyPath: otherFile.inputFile.Source.PrettyPath, } s.results[sourceIndex] = parseResult{ - file: file{ - repr: &reprJS{ - ast: js_parser.LazyExportAST(s.log, source, - js_parser.OptionsFromConfig(&s.options), js_ast.Expr{Data: &js_ast.EObject{}}, ""), - cssSourceIndex: record.SourceIndex, + file: scannerFile{ + inputFile: graph.InputFile{ + Source: source, + Repr: &graph.JSRepr{ + AST: js_parser.LazyExportAST(s.log, source, + js_parser.OptionsFromConfig(&s.options), js_ast.Expr{Data: &js_ast.EObject{}}, ""), + CSSSourceIndex: ast.MakeIndex32(record.SourceIndex.GetIndex()), + }, }, - source: source, }, ok: true, } - css.jsSourceIndex = &sourceIndex + css.JSSourceIndex = ast.MakeIndex32(sourceIndex) } - record.SourceIndex = css.jsSourceIndex - if css.jsSourceIndex == nil { + record.SourceIndex = css.JSSourceIndex + if !css.JSSourceIndex.IsValid() { continue } } } - // Don't include this module for its side effects if it can be - // considered to have no side effects - if record.WasOriginallyBareImport && !s.options.IgnoreDCEAnnotations { - if otherFile := &s.results[*record.SourceIndex].file; otherFile.ignoreIfUnused { + // Warn about this import if it's a bare import statement without any + // imported names (i.e. a side-effect-only import) and the module has + // been marked as having no side effects. + // + // Except don't do this if this file is inside "node_modules" since + // it's a bug in the package and the user won't be able to do anything + // about it. Note that this can result in esbuild silently generating + // broken code. If this actually happens for people, it's probably worth + // re-enabling the warning about code inside "node_modules". + if record.WasOriginallyBareImport && !s.options.IgnoreDCEAnnotations && !resolver.IsInsideNodeModules(result.file.inputFile.Source.KeyPath.Text) { + if otherModule := &s.results[record.SourceIndex.GetIndex()].file.inputFile; otherModule.SideEffects.Kind != graph.HasSideEffects && + // Do not warn if this is from a plugin, since removing the import + // would cause the plugin to not run, and running a plugin is a side + // effect. + otherModule.SideEffects.Kind != graph.NoSideEffects_PureData_FromPlugin { var notes []logger.MsgData - if otherFile.ignoreIfUnusedData != nil { + if data := otherModule.SideEffects.Data; data != nil { var text string - if otherFile.ignoreIfUnusedData.IsSideEffectsArrayInJSON { + if data.IsSideEffectsArrayInJSON { text = "It was excluded from the \"sideEffects\" array in the enclosing \"package.json\" file" } else { text = "\"sideEffects\" is false in the enclosing \"package.json\" file" } - notes = append(notes, logger.RangeData(otherFile.ignoreIfUnusedData.Source, otherFile.ignoreIfUnusedData.Range, text)) + notes = append(notes, logger.RangeData(data.Source, data.Range, text)) } - s.log.AddRangeWarningWithNotes(&result.file.source, record.Range, + s.log.AddRangeWarningWithNotes(&result.file.inputFile.Source, record.Range, fmt.Sprintf("Ignoring this import because %q was marked as having no side effects", - otherFile.source.PrettyPath), notes) + otherModule.Source.PrettyPath), notes) } } } } // End the metadata chunk - if s.options.AbsMetadataFile != "" { + if s.options.NeedsMetafile { if !isFirstImport { - j.AddString("\n ") + sb.WriteString("\n ") } - j.AddString("]\n }") + sb.WriteString("]\n }") } - s.results[i].file.jsonMetadataChunk = j.Done() + s.results[i].file.jsonMetadataChunk = sb.String() } // The linker operates on an array of files, so construct that now. This // can't be constructed earlier because we generate new parse results for // JavaScript stub files for CSS imports above. - files := make([]file, len(s.results)) - for i, result := range s.results { - if result.ok { - files[i] = result.file + files := make([]scannerFile, len(s.results)) + for sourceIndex := range s.results { + if result := &s.results[sourceIndex]; result.ok { + s.validateTLA(uint32(sourceIndex)) + files[sourceIndex] = result.file } } return files } +func (s *scanner) validateTLA(sourceIndex uint32) tlaCheck { + result := &s.results[sourceIndex] + + if result.ok && result.tlaCheck.depth == 0 { + if repr, ok := result.file.inputFile.Repr.(*graph.JSRepr); ok { + result.tlaCheck.depth = 1 + if repr.AST.TopLevelAwaitKeyword.Len > 0 { + result.tlaCheck.parent = ast.MakeIndex32(sourceIndex) + } + + for importRecordIndex, record := range repr.AST.ImportRecords { + if record.SourceIndex.IsValid() && (record.Kind == ast.ImportRequire || record.Kind == ast.ImportStmt) { + parent := s.validateTLA(record.SourceIndex.GetIndex()) + if !parent.parent.IsValid() { + continue + } + + // Follow any import chains + if record.Kind == ast.ImportStmt && (!result.tlaCheck.parent.IsValid() || parent.depth < result.tlaCheck.depth) { + result.tlaCheck.depth = parent.depth + 1 + result.tlaCheck.parent = record.SourceIndex + result.tlaCheck.importRecordIndex = uint32(importRecordIndex) + continue + } + + // Require of a top-level await chain is forbidden + if record.Kind == ast.ImportRequire { + var notes []logger.MsgData + var tlaPrettyPath string + otherSourceIndex := record.SourceIndex.GetIndex() + + // Build up a chain of relevant notes for all of the imports + for { + parentResult := &s.results[otherSourceIndex] + parentRepr := parentResult.file.inputFile.Repr.(*graph.JSRepr) + + if parentRepr.AST.TopLevelAwaitKeyword.Len > 0 { + tlaPrettyPath = parentResult.file.inputFile.Source.PrettyPath + notes = append(notes, logger.RangeData(&parentResult.file.inputFile.Source, parentRepr.AST.TopLevelAwaitKeyword, + fmt.Sprintf("The top-level await in %q is here", tlaPrettyPath))) + break + } + + if !parentResult.tlaCheck.parent.IsValid() { + notes = append(notes, logger.MsgData{Text: "unexpected invalid index"}) + break + } + + otherSourceIndex = parentResult.tlaCheck.parent.GetIndex() + + notes = append(notes, logger.RangeData(&parentResult.file.inputFile.Source, + parentRepr.AST.ImportRecords[parent.importRecordIndex].Range, + fmt.Sprintf("The file %q imports the file %q here", + parentResult.file.inputFile.Source.PrettyPath, s.results[otherSourceIndex].file.inputFile.Source.PrettyPath))) + } + + var text string + importedPrettyPath := s.results[record.SourceIndex.GetIndex()].file.inputFile.Source.PrettyPath + + if importedPrettyPath == tlaPrettyPath { + text = fmt.Sprintf("This require call is not allowed because the imported file %q contains a top-level await", + importedPrettyPath) + } else { + text = fmt.Sprintf("This require call is not allowed because the transitive dependency %q contains a top-level await", + tlaPrettyPath) + } + + s.log.AddRangeErrorWithNotes(&result.file.inputFile.Source, record.Range, text, notes) + } + } + } + + // Make sure that if we wrap this module in a closure, the closure is also + // async. This happens when you call "import()" on this module and code + // splitting is off. + if result.tlaCheck.parent.IsValid() { + repr.Meta.IsAsyncOrHasAsyncDependency = true + } + } + } + + return result.tlaCheck +} + func DefaultExtensionToLoaderMap() map[string]config.Loader { return map[string]config.Loader{ ".js": config.LoaderJS, @@ -1414,19 +1798,7 @@ func DefaultExtensionToLoaderMap() map[string]config.Loader { } } -type OutputFile struct { - AbsPath string - Contents []byte - - // If "AbsMetadataFile" is present, this will be filled out with information - // about this file in JSON format. This is a partial JSON file that will be - // fully assembled later. - jsonMetadataChunk []byte - - IsExecutable bool -} - -func (b *Bundle) Compile(log logger.Log, options config.Options) []OutputFile { +func applyOptionDefaults(options *config.Options) { if options.ExtensionToLoader == nil { options.ExtensionToLoader = DefaultExtensionToLoaderMap() } @@ -1437,35 +1809,66 @@ func (b *Bundle) Compile(log logger.Log, options config.Options) []OutputFile { options.OutputExtensionCSS = ".css" } + // Configure default path templates + if len(options.EntryPathTemplate) == 0 { + options.EntryPathTemplate = []config.PathTemplate{ + {Data: "./", Placeholder: config.DirPlaceholder}, + {Data: "/", Placeholder: config.NamePlaceholder}, + } + } + if len(options.ChunkPathTemplate) == 0 { + options.ChunkPathTemplate = []config.PathTemplate{ + {Data: "./", Placeholder: config.NamePlaceholder}, + {Data: "-", Placeholder: config.HashPlaceholder}, + } + } + if len(options.AssetPathTemplate) == 0 { + options.AssetPathTemplate = []config.PathTemplate{ + {Data: "./", Placeholder: config.NamePlaceholder}, + {Data: "-", Placeholder: config.HashPlaceholder}, + } + } +} + +func (b *Bundle) Compile(log logger.Log, options config.Options) ([]graph.OutputFile, string) { + start := time.Now() + if log.Level <= logger.LevelVerbose { + log.AddVerbose(nil, logger.Loc{}, "Started the compile phase") + } + + applyOptionDefaults(&options) + // The format can't be "preserve" while bundling if options.Mode == config.ModeBundle && options.OutputFormat == config.FormatPreserve { options.OutputFormat = config.FormatESModule } - // Get the base path from the options or choose the lowest common ancestor of all entry points - allReachableFiles := findReachableFiles(b.files, b.entryPoints) - if options.AbsOutputBase == "" { - options.AbsOutputBase = b.lowestCommonAncestorDirectory(options.CodeSplitting, allReachableFiles) + files := make([]graph.InputFile, len(b.files)) + for i, file := range b.files { + files[i] = file.inputFile } + // Get the base path from the options or choose the lowest common ancestor of all entry points + allReachableFiles := findReachableFiles(files, b.entryPoints) + // Compute source map data in parallel with linking dataForSourceMaps := b.computeDataForSourceMapsInParallel(&options, allReachableFiles) - var resultGroups [][]OutputFile + var resultGroups [][]graph.OutputFile if options.CodeSplitting { // If code splitting is enabled, link all entry points together - c := newLinkerContext(&options, log, b.fs, b.res, b.files, b.entryPoints, allReachableFiles, dataForSourceMaps) - resultGroups = [][]OutputFile{c.link()} + c := newLinkerContext(&options, log, b.fs, b.res, files, b.entryPoints, allReachableFiles, dataForSourceMaps) + resultGroups = [][]graph.OutputFile{c.link()} } else { // Otherwise, link each entry point with the runtime file separately waitGroup := sync.WaitGroup{} - resultGroups = make([][]OutputFile, len(b.entryPoints)) + resultGroups = make([][]graph.OutputFile, len(b.entryPoints)) for i, entryPoint := range b.entryPoints { waitGroup.Add(1) - go func(i int, entryPoint uint32) { - entryPoints := []uint32{entryPoint} - reachableFiles := findReachableFiles(b.files, entryPoints) - c := newLinkerContext(&options, log, b.fs, b.res, b.files, entryPoints, reachableFiles, dataForSourceMaps) + go func(i int, entryPoint graph.EntryPoint) { + entryPoints := []graph.EntryPoint{entryPoint} + reachableFiles := findReachableFiles(files, entryPoints) + c := newLinkerContext(&options, log, b.fs, b.res, files, entryPoints, reachableFiles, dataForSourceMaps) resultGroups[i] = c.link() waitGroup.Done() }(i, entryPoint) @@ -1474,24 +1877,22 @@ func (b *Bundle) Compile(log logger.Log, options config.Options) []OutputFile { } // Join the results in entry point order for determinism - var outputFiles []OutputFile + var outputFiles []graph.OutputFile for _, group := range resultGroups { outputFiles = append(outputFiles, group...) } // Also generate the metadata file if necessary - if options.AbsMetadataFile != "" { - outputFiles = append(outputFiles, OutputFile{ - AbsPath: options.AbsMetadataFile, - Contents: b.generateMetadataJSON(outputFiles, allReachableFiles, options.ASCIIOnly), - }) + var metafileJSON string + if options.NeedsMetafile { + metafileJSON = b.generateMetadataJSON(outputFiles, allReachableFiles, options.ASCIIOnly) } if !options.WriteToStdout { // Make sure an output file never overwrites an input file sourceAbsPaths := make(map[string]uint32) for _, sourceIndex := range allReachableFiles { - keyPath := b.files[sourceIndex].source.KeyPath + keyPath := b.files[sourceIndex].inputFile.Source.KeyPath if keyPath.Namespace == "file" { lowerAbsPath := lowerCaseAbsPathForWindows(keyPath.Text) sourceAbsPaths[lowerAbsPath] = sourceIndex @@ -1500,7 +1901,7 @@ func (b *Bundle) Compile(log logger.Log, options config.Options) []OutputFile { for _, outputFile := range outputFiles { lowerAbsPath := lowerCaseAbsPathForWindows(outputFile.AbsPath) if sourceIndex, ok := sourceAbsPaths[lowerAbsPath]; ok { - log.AddError(nil, logger.Loc{}, "Refusing to overwrite input file: "+b.files[sourceIndex].source.PrettyPath) + log.AddError(nil, logger.Loc{}, "Refusing to overwrite input file: "+b.files[sourceIndex].inputFile.Source.PrettyPath) } } @@ -1539,7 +1940,50 @@ func (b *Bundle) Compile(log logger.Log, options config.Options) []OutputFile { outputFiles = outputFiles[:end] } - return outputFiles + if log.Level <= logger.LevelVerbose { + log.AddVerbose(nil, logger.Loc{}, fmt.Sprintf("Ended the compile phase (%dms)", time.Since(start).Milliseconds())) + } + + return outputFiles, metafileJSON +} + +// Find all files reachable from all entry points. This order should be +// deterministic given that the entry point order is deterministic, since the +// returned order is the postorder of the graph traversal and import record +// order within a given file is deterministic. +func findReachableFiles(files []graph.InputFile, entryPoints []graph.EntryPoint) []uint32 { + visited := make(map[uint32]bool) + var order []uint32 + var visit func(uint32) + + // Include this file and all files it imports + visit = func(sourceIndex uint32) { + if !visited[sourceIndex] { + visited[sourceIndex] = true + file := &files[sourceIndex] + if repr, ok := file.Repr.(*graph.JSRepr); ok && repr.CSSSourceIndex.IsValid() { + visit(repr.CSSSourceIndex.GetIndex()) + } + for _, record := range *file.Repr.ImportRecords() { + if record.SourceIndex.IsValid() { + visit(record.SourceIndex.GetIndex()) + } + } + + // Each file must come after its dependencies + order = append(order, sourceIndex) + } + } + + // The runtime is always included in case it's needed + visit(runtime.SourceIndex) + + // Include all files reachable from any entry point + for _, entryPoint := range entryPoints { + visit(entryPoint.SourceIndex) + } + + return order } // This is done in parallel with linking because linking is a mostly serial @@ -1563,17 +2007,17 @@ func (b *Bundle) computeDataForSourceMapsInParallel(options *config.Options, rea results := make([]dataForSourceMap, len(b.files)) for _, sourceIndex := range reachableFiles { - if f := &b.files[sourceIndex]; f.loader.CanHaveSourceMap() { - if repr, ok := f.repr.(*reprJS); ok { + if f := &b.files[sourceIndex]; f.inputFile.Loader.CanHaveSourceMap() { + if repr, ok := f.inputFile.Repr.(*graph.JSRepr); ok { waitGroup.Add(1) - go func(sourceIndex uint32, f *file, repr *reprJS) { + go func(sourceIndex uint32, f *scannerFile, repr *graph.JSRepr) { result := &results[sourceIndex] - result.lineOffsetTables = js_printer.GenerateLineOffsetTables(f.source.Contents, repr.ast.ApproximateLineCount) - sm := f.sourceMap + result.lineOffsetTables = js_printer.GenerateLineOffsetTables(f.inputFile.Source.Contents, repr.AST.ApproximateLineCount) + sm := f.inputFile.InputSourceMap if !options.ExcludeSourcesContent { if sm == nil { // Simple case: no nested source map - result.quotedContents = [][]byte{js_printer.QuoteForJSON(f.source.Contents, options.ASCIIOnly)} + result.quotedContents = [][]byte{js_printer.QuoteForJSON(f.inputFile.Source.Contents, options.ASCIIOnly)} } else { // Complex case: nested source map result.quotedContents = make([][]byte, len(sm.Sources)) @@ -1608,80 +2052,9 @@ func (b *Bundle) computeDataForSourceMapsInParallel(options *config.Options, rea } } -func (b *Bundle) lowestCommonAncestorDirectory(codeSplitting bool, allReachableFiles []uint32) string { - isEntryPoint := make(map[uint32]bool) - for _, entryPoint := range b.entryPoints { - isEntryPoint[entryPoint] = true - } - - // If code splitting is enabled, also treat dynamic imports as entry points - if codeSplitting { - for _, sourceIndex := range allReachableFiles { - if repr, ok := b.files[sourceIndex].repr.(*reprJS); ok { - for importRecordIndex := range repr.ast.ImportRecords { - if record := &repr.ast.ImportRecords[importRecordIndex]; record.SourceIndex != nil && record.Kind == ast.ImportDynamic { - isEntryPoint[*record.SourceIndex] = true - } - } - } - } - } - - // Ignore any paths for virtual modules (that don't exist on the file system) - absPaths := make([]string, 0, len(isEntryPoint)) - for entryPoint := range isEntryPoint { - keyPath := b.files[entryPoint].source.KeyPath - if keyPath.Namespace == "file" { - absPaths = append(absPaths, keyPath.Text) - } - } - - if len(absPaths) == 0 { - return "" - } - - lowestAbsDir := b.fs.Dir(absPaths[0]) - - for _, absPath := range absPaths[1:] { - absDir := b.fs.Dir(absPath) - lastSlash := 0 - a := 0 - b := 0 - - for { - runeA, widthA := utf8.DecodeRuneInString(absDir[a:]) - runeB, widthB := utf8.DecodeRuneInString(lowestAbsDir[b:]) - boundaryA := widthA == 0 || runeA == '/' || runeA == '\\' - boundaryB := widthB == 0 || runeB == '/' || runeB == '\\' - - if boundaryA && boundaryB { - if widthA == 0 || widthB == 0 { - // Truncate to the smaller path if one path is a prefix of the other - lowestAbsDir = absDir[:a] - break - } else { - // Track the longest common directory so far - lastSlash = a - } - } else if boundaryA != boundaryB || unicode.ToLower(runeA) != unicode.ToLower(runeB) { - // If both paths are different at this point, stop and set the lowest so - // far to the common parent directory. Compare using a case-insensitive - // comparison to handle paths on Windows. - lowestAbsDir = absDir[:lastSlash] - break - } - - a += widthA - b += widthB - } - } - - return lowestAbsDir -} - -func (b *Bundle) generateMetadataJSON(results []OutputFile, allReachableFiles []uint32, asciiOnly bool) []byte { - j := js_printer.Joiner{} - j.AddString("{\n \"inputs\": {") +func (b *Bundle) generateMetadataJSON(results []graph.OutputFile, allReachableFiles []uint32, asciiOnly bool) string { + sb := strings.Builder{} + sb.WriteString("{\n \"inputs\": {") // Write inputs isFirst := true @@ -1692,21 +2065,21 @@ func (b *Bundle) generateMetadataJSON(results []OutputFile, allReachableFiles [] if file := &b.files[sourceIndex]; len(file.jsonMetadataChunk) > 0 { if isFirst { isFirst = false - j.AddString("\n ") + sb.WriteString("\n ") } else { - j.AddString(",\n ") + sb.WriteString(",\n ") } - j.AddBytes(file.jsonMetadataChunk) + sb.WriteString(file.jsonMetadataChunk) } } - j.AddString("\n },\n \"outputs\": {") + sb.WriteString("\n },\n \"outputs\": {") // Write outputs isFirst = true paths := make(map[string]bool) for _, result := range results { - if len(result.jsonMetadataChunk) > 0 { + if len(result.JSONMetadataChunk) > 0 { path := b.res.PrettyPath(logger.Path{Text: result.AbsPath, Namespace: "file"}) if paths[path] { // Don't write out the same path twice (can happen with the "file" loader) @@ -1714,18 +2087,18 @@ func (b *Bundle) generateMetadataJSON(results []OutputFile, allReachableFiles [] } if isFirst { isFirst = false - j.AddString("\n ") + sb.WriteString("\n ") } else { - j.AddString(",\n ") + sb.WriteString(",\n ") } paths[path] = true - j.AddString(fmt.Sprintf("%s: ", js_printer.QuoteForJSON(path, asciiOnly))) - j.AddBytes(result.jsonMetadataChunk) + sb.WriteString(fmt.Sprintf("%s: ", js_printer.QuoteForJSON(path, asciiOnly))) + sb.WriteString(result.JSONMetadataChunk) } } - j.AddString("\n }\n}\n") - return j.Done() + sb.WriteString("\n }\n}\n") + return sb.String() } type runtimeCacheKey struct { diff --git a/vendor/github.com/evanw/esbuild/internal/bundler/debug.go b/vendor/github.com/evanw/esbuild/internal/bundler/debug.go new file mode 100644 index 0000000..a51ee81 --- /dev/null +++ b/vendor/github.com/evanw/esbuild/internal/bundler/debug.go @@ -0,0 +1,132 @@ +package bundler + +import ( + "fmt" + "strings" + + "github.com/evanw/esbuild/internal/ast" + "github.com/evanw/esbuild/internal/graph" + "github.com/evanw/esbuild/internal/js_ast" + "github.com/evanw/esbuild/internal/js_printer" +) + +// Set this to true and then load the resulting metafile in "graph-debugger.html" +// to debug graph information. +// +// This is deliberately not exposed in the final binary. It is *very* internal +// and only exists to help debug esbuild itself. Make sure this is always set +// back to false before committing. +const debugVerboseMetafile = false + +func (c *linkerContext) generateExtraDataForFileJS(sourceIndex uint32) string { + if !debugVerboseMetafile { + return "" + } + + file := &c.graph.Files[sourceIndex] + repr := file.InputFile.Repr.(*graph.JSRepr) + sb := strings.Builder{} + + quoteSym := func(ref js_ast.Ref) string { + name := fmt.Sprintf("%d:%d [%s]", ref.SourceIndex, ref.InnerIndex, c.graph.Symbols.Get(ref).OriginalName) + return string(js_printer.QuoteForJSON(name, c.options.ASCIIOnly)) + } + + sb.WriteString(`,"parts":[`) + for partIndex, part := range repr.AST.Parts { + if partIndex > 0 { + sb.WriteByte(',') + } + var isFirst bool + code := "" + + sb.WriteString(fmt.Sprintf(`{"isLive":%v`, part.IsLive)) + sb.WriteString(fmt.Sprintf(`,"canBeRemovedIfUnused":%v`, part.CanBeRemovedIfUnused)) + + if partIndex == int(repr.Meta.NSExportPartIndex) { + sb.WriteString(`,"nsExportPartIndex":true`) + } else if ast.MakeIndex32(uint32(partIndex)) == repr.Meta.WrapperPartIndex { + sb.WriteString(`,"wrapperPartIndex":true`) + } else if len(part.Stmts) > 0 { + start := part.Stmts[0].Loc.Start + end := len(file.InputFile.Source.Contents) + if partIndex+1 < len(repr.AST.Parts) { + if nextStmts := repr.AST.Parts[partIndex+1].Stmts; len(nextStmts) > 0 { + if nextStart := nextStmts[0].Loc.Start; nextStart >= start { + end = int(nextStart) + } + } + } + code = file.InputFile.Source.Contents[start:end] + } + + // importRecords + sb.WriteString(`,"importRecords":[`) + isFirst = true + for _, importRecordIndex := range part.ImportRecordIndices { + record := repr.AST.ImportRecords[importRecordIndex] + if !record.SourceIndex.IsValid() { + continue + } + if isFirst { + isFirst = false + } else { + sb.WriteByte(',') + } + path := c.graph.Files[record.SourceIndex.GetIndex()].InputFile.Source.PrettyPath + sb.WriteString(fmt.Sprintf(`{"source":%s}`, js_printer.QuoteForJSON(path, c.options.ASCIIOnly))) + } + sb.WriteByte(']') + + // declaredSymbols + sb.WriteString(`,"declaredSymbols":[`) + isFirst = true + for _, declSym := range part.DeclaredSymbols { + if !declSym.IsTopLevel { + continue + } + if isFirst { + isFirst = false + } else { + sb.WriteByte(',') + } + sb.WriteString(fmt.Sprintf(`{"name":%s}`, quoteSym(declSym.Ref))) + } + sb.WriteByte(']') + + // symbolUses + sb.WriteString(`,"symbolUses":[`) + isFirst = true + for ref, uses := range part.SymbolUses { + if isFirst { + isFirst = false + } else { + sb.WriteByte(',') + } + sb.WriteString(fmt.Sprintf(`{"name":%s,"countEstimate":%d}`, quoteSym(ref), uses.CountEstimate)) + } + sb.WriteByte(']') + + // dependencies + sb.WriteString(`,"dependencies":[`) + for i, dep := range part.Dependencies { + if i > 0 { + sb.WriteByte(',') + } + sb.WriteString(fmt.Sprintf(`{"source":%s,"partIndex":%d}`, + js_printer.QuoteForJSON(c.graph.Files[dep.SourceIndex].InputFile.Source.PrettyPath, c.options.ASCIIOnly), + dep.PartIndex, + )) + } + sb.WriteByte(']') + + // code + sb.WriteString(`,"code":`) + sb.Write(js_printer.QuoteForJSON(strings.TrimRight(code, "\n"), c.options.ASCIIOnly)) + + sb.WriteByte('}') + } + sb.WriteString(`]`) + + return sb.String() +} diff --git a/vendor/github.com/evanw/esbuild/internal/bundler/linker.go b/vendor/github.com/evanw/esbuild/internal/bundler/linker.go index 5de3efe..9d0cd52 100644 --- a/vendor/github.com/evanw/esbuild/internal/bundler/linker.go +++ b/vendor/github.com/evanw/esbuild/internal/bundler/linker.go @@ -3,11 +3,14 @@ package bundler import ( "bytes" "encoding/base64" + "encoding/binary" "fmt" - "path" + "hash" + "math/rand" "sort" "strings" "sync" + "time" "github.com/evanw/esbuild/internal/ast" "github.com/evanw/esbuild/internal/compat" @@ -15,6 +18,8 @@ import ( "github.com/evanw/esbuild/internal/css_ast" "github.com/evanw/esbuild/internal/css_printer" "github.com/evanw/esbuild/internal/fs" + "github.com/evanw/esbuild/internal/graph" + "github.com/evanw/esbuild/internal/helpers" "github.com/evanw/esbuild/internal/js_ast" "github.com/evanw/esbuild/internal/js_lexer" "github.com/evanw/esbuild/internal/js_printer" @@ -23,65 +28,19 @@ import ( "github.com/evanw/esbuild/internal/resolver" "github.com/evanw/esbuild/internal/runtime" "github.com/evanw/esbuild/internal/sourcemap" + "github.com/evanw/esbuild/internal/xxhash" ) -type bitSet struct { - entries []byte -} - -func newBitSet(bitCount uint) bitSet { - return bitSet{make([]byte, (bitCount+7)/8)} -} - -func (bs bitSet) hasBit(bit uint) bool { - return (bs.entries[bit/8] & (1 << (bit & 7))) != 0 -} - -func (bs bitSet) setBit(bit uint) { - bs.entries[bit/8] |= 1 << (bit & 7) -} - -func (bs bitSet) equals(other bitSet) bool { - return bytes.Equal(bs.entries, other.entries) -} - -func (bs bitSet) copyFrom(other bitSet) { - copy(bs.entries, other.entries) -} - -func (bs *bitSet) bitwiseOrWith(other bitSet) { - for i := range bs.entries { - bs.entries[i] |= other.entries[i] - } -} - type linkerContext struct { - options *config.Options - log logger.Log - fs fs.FS - res resolver.Resolver - symbols js_ast.SymbolMap - entryPoints []uint32 - files []file - hasErrors bool + options *config.Options + log logger.Log + fs fs.FS + res resolver.Resolver + graph graph.LinkerGraph // This helps avoid an infinite loop when matching imports to exports cycleDetector []importTracker - // We should avoid traversing all files in the bundle, because the linker - // should be able to run a linking operation on a large bundle where only - // a few files are needed (e.g. an incremental compilation scenario). This - // holds all files that could possibly be reached through the entry points. - // If you need to iterate over all files in the linking operation, iterate - // over this array. This array is also sorted in a deterministic ordering - // to help ensure deterministic builds (source indices are random). - reachableFiles []uint32 - - // This maps from unstable source index to stable reachable file index. This - // is useful as a deterministic key for sorting if you need to sort something - // containing a source index (such as "js_ast.Ref" symbol references). - stableSourceIndices []uint32 - // We may need to refer to the CommonJS "module" symbol for exports unboundModuleRef js_ast.Ref @@ -89,180 +48,13 @@ type linkerContext struct { // Calling this will block until the computation is done. The resulting value // is shared between threads and must be treated as immutable. dataForSourceMaps func() []dataForSourceMap -} -// This contains linker-specific metadata corresponding to a "file" struct -// from the initial scan phase of the bundler. It's separated out because it's -// conceptually only used for a single linking operation and because multiple -// linking operations may be happening in parallel with different metadata for -// the same file. -type fileMeta struct { - partMeta []partMeta - - // This is the index to the automatically-generated part containing code that - // calls "__export(exports, { ... getters ... })". This is used to generate - // getters on an exports object for ES6 export statements, and is both for - // ES6 star imports and CommonJS-style modules. - nsExportPartIndex uint32 - - // The index of the automatically-generated part containing export statements - // for every export in the entry point. This also contains the call to the - // require wrapper for CommonJS-style entry points. - entryPointExportPartIndex *uint32 - - // This is only for TypeScript files. If an import symbol is in this map, it - // means the import couldn't be found and doesn't actually exist. This is not - // an error in TypeScript because the import is probably just a type. - // - // Normally we remove all unused imports for TypeScript files during parsing, - // which automatically removes type-only imports. But there are certain re- - // export situations where it's impossible to tell if an import is a type or - // not: - // - // import {typeOrNotTypeWhoKnows} from 'path'; - // export {typeOrNotTypeWhoKnows}; - // - // Really people should be using the TypeScript "isolatedModules" flag with - // bundlers like this one that compile TypeScript files independently without - // type checking. That causes the TypeScript type checker to emit the error - // "Re-exporting a type when the '--isolatedModules' flag is provided requires - // using 'export type'." But we try to be robust to such code anyway. - isProbablyTypeScriptType map[js_ast.Ref]bool - - // Imports are matched with exports in a separate pass from when the matched - // exports are actually bound to the imports. Here "binding" means adding non- - // local dependencies on the parts in the exporting file that declare the - // exported symbol to all parts in the importing file that use the imported - // symbol. - // - // This must be a separate pass because of the "probably TypeScript type" - // check above. We can't generate the part for the export namespace until - // we've matched imports with exports because the generated code must omit - // type-only imports in the export namespace code. And we can't bind exports - // to imports until the part for the export namespace is generated since that - // part needs to participate in the binding. - // - // This array holds the deferred imports to bind so the pass can be split - // into two separate passes. - importsToBind map[js_ast.Ref]importToBind - - // If true, the module must be bundled CommonJS-style like this: - // - // // foo.ts - // let require_foo = __commonJS((exports, module) => { - // ... - // }); - // - // // bar.ts - // let foo = flag ? require_foo() : null; - // - cjsWrap bool - - // If true, all exports must be reached via property accesses off a call to - // the CommonJS wrapper for this module. In addition, all ES6 exports for - // this module must be added as getters to the CommonJS "exports" object. - cjsStyleExports bool - - // If true, the "__export(exports, { ... })" call will be force-included even - // if there are no parts that reference "exports". Otherwise this call will - // be removed due to the tree shaking pass. This is used when for entry point - // files when code related to the current output format needs to reference - // the "exports" variable. - forceIncludeExportsForEntryPoint bool - - // This is set when we need to pull in the "__export" symbol in to the part - // at "nsExportPartIndex". This can't be done in "createExportsForFile" - // because of concurrent map hazards. Instead, it must be done later. - needsExportSymbolFromRuntime bool - needsMarkAsModuleSymbolFromRuntime bool - - // The index of the automatically-generated part used to represent the - // CommonJS wrapper. This part is empty and is only useful for tree shaking - // and code splitting. The CommonJS wrapper can't be inserted into the part - // because the wrapper contains other parts, which can't be represented by - // the current part system. - cjsWrapperPartIndex *uint32 - - // This includes both named exports and re-exports. - // - // Named exports come from explicit export statements in the original file, - // and are copied from the "NamedExports" field in the AST. - // - // Re-exports come from other files and are the result of resolving export - // star statements (i.e. "export * from 'foo'"). - resolvedExports map[string]exportData - - // Never iterate over "resolvedExports" directly. Instead, iterate over this - // array. Some exports in that map aren't meant to end up in generated code. - // This array excludes these exports and is also sorted, which avoids non- - // determinism due to random map iteration order. - sortedAndFilteredExportAliases []string -} - -type importToBind struct { - sourceIndex uint32 - nameLoc logger.Loc // Optional, goes with sourceIndex, ignore if zero - ref js_ast.Ref -} - -type exportData struct { - ref js_ast.Ref - - // Export star resolution happens first before import resolution. That means - // it cannot yet determine if duplicate names from export star resolution are - // ambiguous (point to different symbols) or not (point to the same symbol). - // This issue can happen in the following scenario: - // - // // entry.js - // export * from './a' - // export * from './b' - // - // // a.js - // export * from './c' - // - // // b.js - // export {x} from './c' - // - // // c.js - // export let x = 1, y = 2 - // - // In this case "entry.js" should have two exports "x" and "y", neither of - // which are ambiguous. To handle this case, ambiguity resolution must be - // deferred until import resolution time. That is done using this array. - potentiallyAmbiguousExportStarRefs []importToBind - - // This is the file that the named export above came from. This will be - // different from the file that contains this object if this is a re-export. - sourceIndex uint32 - nameLoc logger.Loc // Optional, goes with sourceIndex, ignore if zero -} - -// This contains linker-specific metadata corresponding to a "js_ast.Part" struct -// from the initial scan phase of the bundler. It's separated out because it's -// conceptually only used for a single linking operation and because multiple -// linking operations may be happening in parallel with different metadata for -// the same part in the same file. -type partMeta struct { - // This holds all entry points that can reach this part. It will be used to - // assign this part to a chunk. - entryBits bitSet - - // If present, this is a circular doubly-linked list of all other parts in - // this file that need to be in the same chunk as this part to avoid cross- - // chunk assignments, which are not allowed in ES6 modules. - // - // This used to be an array but that was generating lots of allocations. - // Changing this to a circular doubly-linked list was a substantial speedup. - prevSibling uint32 - nextSibling uint32 - - // These are dependencies that come from other files via import statements. - nonLocalDependencies []partRef -} - -type partRef struct { - sourceIndex uint32 - partIndex uint32 + // The unique key prefix is a random string that is unique to every linking + // operation. It is used as a prefix for the unique keys assigned to every + // chunk. These unique keys are used to identify each chunk before the final + // output paths have been computed. + uniqueKeyPrefix string + uniqueKeyPrefixBytes []byte // This is just "uniqueKeyPrefix" in byte form } type partRange struct { @@ -272,42 +64,75 @@ type partRange struct { } type chunkInfo struct { - // The path of this chunk's directory relative to the output directory. Note: - // this must have OS-independent path separators (i.e. '/' not '\'). - relDir string - - // The name of this chunk. This is initially empty for non-entry point chunks - // because the file name contains a hash of the file contents, which haven't - // been generated yet. Don't access this directly. Instead call "relPath()" - // which first checks that the base name is not empty. - baseNameOrEmpty string + // This is a random string and is used to represent the output path of this + // chunk before the final output path has been computed. + uniqueKey string filesWithPartsInChunk map[uint32]bool filesInChunkInOrder []uint32 partsInChunkInOrder []partRange - entryBits bitSet + entryBits helpers.BitSet // This information is only useful if "isEntryPoint" is true isEntryPoint bool sourceIndex uint32 // An index into "c.sources" - entryPointBit uint // An index into "c.entryPoints" + entryPointBit uint // An index into "c.graph.EntryPoints" // For code splitting - crossChunkImports []uint32 + crossChunkImports []chunkImport // This is the representation-specific information - repr chunkRepr + chunkRepr chunkRepr + + // This is the final path of this chunk relative to the output directory, but + // without the substitution of the final hash (since it hasn't been computed). + finalTemplate []config.PathTemplate + + // This is the final path of this chunk relative to the output directory. It + // is the substitution of the final hash into "finalTemplate". + finalRelPath string + + // When this chunk is initially generated in isolation, the output pieces + // will contain slices of the output with the unique keys of other chunks + // omitted. + outputPieces []outputPiece + + // This contains the hash for just this chunk without including information + // from the hashes of other chunks. Later on in the linking process, the + // final hash for this chunk will be constructed by merging the isolated + // hashes of all transitive dependencies of this chunk. This is separated + // into two phases like this to handle cycles in the chunk import graph. + waitForIsolatedHash func() []byte + + // Other fields relating to the output file for this chunk + jsonMetadataChunkCallback func(finalOutputSize int) []byte + outputSourceMap sourcemap.SourceMapPieces + isExecutable bool } -type generateContinue struct { - crossChunkImportRecords []ast.ImportRecord - crossChunkAbsPaths []string +type chunkImport struct { + chunkIndex uint32 + importKind ast.ImportKind } -type chunkRepr interface { - generate(c *linkerContext, chunk *chunkInfo) func(generateContinue) []OutputFile +// This is a chunk of source code followed by a reference to another chunk. For +// example, the file "@import 'CHUNK0001'; body { color: black; }" would be +// represented by two pieces, one with the data "@import '" and another with the +// data "'; body { color: black; }". The first would have the chunk index 1 and +// the second would have an invalid chunk index. +type outputPiece struct { + data []byte + + // Note: This may be invalid. For example, the chunk may not contain any + // imports, in which case there is one piece with data and no chunk index. + chunkIndex ast.Index32 } +type chunkRepr interface{ isChunk() } + +func (*chunkReprJS) isChunk() {} +func (*chunkReprCSS) isChunk() {} + type chunkReprJS struct { // For code splitting crossChunkPrefixStmts []js_ast.Stmt @@ -319,13 +144,30 @@ type chunkReprJS struct { type chunkReprCSS struct { } -// Returns the path of this chunk relative to the output directory. Note: -// this must have OS-independent path separators (i.e. '/' not '\'). -func (chunk *chunkInfo) relPath() string { - if chunk.baseNameOrEmpty == "" { - panic("Internal error") +// Returns a log where "log.HasErrors()" only returns true if any errors have +// been logged since this call. This is useful when there have already been +// errors logged by other linkers that share the same log. +func wrappedLog(log logger.Log) logger.Log { + var mutex sync.Mutex + var hasErrors bool + addMsg := log.AddMsg + + log.AddMsg = func(msg logger.Msg) { + if msg.Kind == logger.Error { + mutex.Lock() + defer mutex.Unlock() + hasErrors = true + } + addMsg(msg) } - return path.Join(chunk.relDir, chunk.baseNameOrEmpty) + + log.HasErrors = func() bool { + mutex.Lock() + defer mutex.Unlock() + return hasErrors + } + + return log } func newLinkerContext( @@ -333,157 +175,52 @@ func newLinkerContext( log logger.Log, fs fs.FS, res resolver.Resolver, - files []file, - entryPoints []uint32, + inputFiles []graph.InputFile, + entryPoints []graph.EntryPoint, reachableFiles []uint32, dataForSourceMaps func() []dataForSourceMap, ) linkerContext { - // Clone information about symbols and files so we don't mutate the input data + log = wrappedLog(log) + c := linkerContext{ options: options, log: log, fs: fs, res: res, - entryPoints: append([]uint32{}, entryPoints...), - files: make([]file, len(files)), - symbols: js_ast.NewSymbolMap(len(files)), - reachableFiles: reachableFiles, dataForSourceMaps: dataForSourceMaps, + graph: graph.MakeLinkerGraph( + inputFiles, + reachableFiles, + entryPoints, + options.CodeSplitting, + ), } - // Clone various things since we may mutate them later - for _, sourceIndex := range c.reachableFiles { - file := files[sourceIndex] - - switch repr := file.repr.(type) { - case *reprJS: - // Clone the representation - { - clone := *repr - repr = &clone - file.repr = repr - } - - // Clone the symbol map - fileSymbols := append([]js_ast.Symbol{}, repr.ast.Symbols...) - c.symbols.Outer[sourceIndex] = fileSymbols - repr.ast.Symbols = nil - - // Clone the parts - repr.ast.Parts = append([]js_ast.Part{}, repr.ast.Parts...) - for i, part := range repr.ast.Parts { - clone := make(map[js_ast.Ref]js_ast.SymbolUse, len(part.SymbolUses)) - for ref, uses := range part.SymbolUses { - clone[ref] = uses - } - repr.ast.Parts[i].SymbolUses = clone - } - - // Clone the import records - repr.ast.ImportRecords = append([]ast.ImportRecord{}, repr.ast.ImportRecords...) - - // Clone the import map - namedImports := make(map[js_ast.Ref]js_ast.NamedImport, len(repr.ast.NamedImports)) - for k, v := range repr.ast.NamedImports { - namedImports[k] = v - } - repr.ast.NamedImports = namedImports - - // Clone the export map - resolvedExports := make(map[string]exportData) - for alias, name := range repr.ast.NamedExports { - resolvedExports[alias] = exportData{ - ref: name.Ref, - sourceIndex: sourceIndex, - nameLoc: name.AliasLoc, - } - } - - // Clone the top-level symbol-to-parts map - topLevelSymbolToParts := make(map[js_ast.Ref][]uint32) - for ref, parts := range repr.ast.TopLevelSymbolToParts { - topLevelSymbolToParts[ref] = parts - } - repr.ast.TopLevelSymbolToParts = topLevelSymbolToParts - - // Clone the top-level scope so we can generate more variables - { - new := &js_ast.Scope{} - *new = *repr.ast.ModuleScope - new.Generated = append([]js_ast.Ref{}, new.Generated...) - repr.ast.ModuleScope = new - } - - // Also associate some default metadata with the file - repr.meta = fileMeta{ - cjsStyleExports: repr.ast.HasCommonJSFeatures() || - (options.Mode == config.ModeBundle && repr.ast.ModuleScope.ContainsDirectEval) || - (repr.ast.HasLazyExport && c.options.Mode == config.ModeConvertFormat && !c.options.OutputFormat.KeepES6ImportExportSyntax()), - partMeta: make([]partMeta, len(repr.ast.Parts)), - resolvedExports: resolvedExports, - isProbablyTypeScriptType: make(map[js_ast.Ref]bool), - importsToBind: make(map[js_ast.Ref]importToBind), - } - - case *reprCSS: - // Clone the representation - { - clone := *repr - repr = &clone - file.repr = repr - } - - // Clone the import records - repr.ast.ImportRecords = append([]ast.ImportRecord{}, repr.ast.ImportRecords...) - } - - // All files start off as far as possible from an entry point - file.distanceFromEntryPoint = ^uint32(0) - - // Update the file in our copy of the file array - c.files[sourceIndex] = file - } - - // Create a way to convert source indices to a stable ordering - c.stableSourceIndices = make([]uint32, len(c.files)) - for stableIndex, sourceIndex := range c.reachableFiles { - c.stableSourceIndices[sourceIndex] = uint32(stableIndex) - } - - // Mark all entry points so we don't add them again for import() expressions - for _, sourceIndex := range entryPoints { - file := &c.files[sourceIndex] - file.isEntryPoint = true - - if repr, ok := file.repr.(*reprJS); ok { - // Lazy exports default to CommonJS-style for the transform API - if repr.ast.HasLazyExport && c.options.Mode == config.ModePassThrough { - repr.meta.cjsStyleExports = true + for _, entryPoint := range entryPoints { + if repr, ok := c.graph.Files[entryPoint.SourceIndex].InputFile.Repr.(*graph.JSRepr); ok { + // Loaders default to CommonJS when they are the entry point and the output + // format is not ESM-compatible since that avoids generating the ESM-to-CJS + // machinery. + if repr.AST.HasLazyExport && (c.options.Mode == config.ModePassThrough || + (c.options.Mode == config.ModeConvertFormat && !c.options.OutputFormat.KeepES6ImportExportSyntax())) { + repr.AST.ExportsKind = js_ast.ExportsCommonJS } // Entry points with ES6 exports must generate an exports object when // targeting non-ES6 formats. Note that the IIFE format only needs this // when the global name is present, since that's the only way the exports // can actually be observed externally. - if repr.ast.HasES6Exports && (options.OutputFormat == config.FormatCommonJS || + if repr.AST.ExportKeyword.Len > 0 && (options.OutputFormat == config.FormatCommonJS || (options.OutputFormat == config.FormatIIFE && len(options.GlobalName) > 0)) { - repr.ast.UsesExportsRef = true - repr.meta.forceIncludeExportsForEntryPoint = true + repr.AST.UsesExportsRef = true + repr.Meta.ForceIncludeExportsForEntryPoint = true } } } // Allocate a new unbound symbol called "module" in case we need it later if c.options.OutputFormat == config.FormatCommonJS { - runtimeSymbols := &c.symbols.Outer[runtime.SourceIndex] - runtimeScope := c.files[runtime.SourceIndex].repr.(*reprJS).ast.ModuleScope - c.unboundModuleRef = js_ast.Ref{OuterIndex: runtime.SourceIndex, InnerIndex: uint32(len(*runtimeSymbols))} - runtimeScope.Generated = append(runtimeScope.Generated, c.unboundModuleRef) - *runtimeSymbols = append(*runtimeSymbols, js_ast.Symbol{ - Kind: js_ast.SymbolUnbound, - OriginalName: "module", - Link: js_ast.InvalidRef, - }) + c.unboundModuleRef = c.graph.GenerateNewSymbol(runtime.SourceIndex, js_ast.SymbolUnbound, "module") } else { c.unboundModuleRef = js_ast.InvalidRef } @@ -491,88 +228,36 @@ func newLinkerContext( return c } -// Find all files reachable from all entry points. This order should be -// deterministic given that the entry point order is deterministic, since the -// returned order is the postorder of the graph traversal and import record -// order within a given file is deterministic. -func findReachableFiles(files []file, entryPoints []uint32) []uint32 { - visited := make(map[uint32]bool) - var order []uint32 - var visit func(uint32) - - // Include this file and all files it imports - visit = func(sourceIndex uint32) { - if !visited[sourceIndex] { - visited[sourceIndex] = true - file := &files[sourceIndex] - if repr, ok := file.repr.(*reprJS); ok && repr.cssSourceIndex != nil { - visit(*repr.cssSourceIndex) - } - for _, record := range *file.repr.importRecords() { - if record.SourceIndex != nil { - visit(*record.SourceIndex) - } - } - - // Each file must come after its dependencies - order = append(order, sourceIndex) - } +func (c *linkerContext) generateUniqueKeyPrefix() bool { + var data [12]byte + rand.Seed(time.Now().UnixNano()) + if _, err := rand.Read(data[:]); err != nil { + c.log.AddError(nil, logger.Loc{}, fmt.Sprintf("Failed to read from randomness source: %s", err.Error())) + return false } - // The runtime is always included in case it's needed - visit(runtime.SourceIndex) - - // Include all files reachable from any entry point - for _, entryPoint := range entryPoints { - visit(entryPoint) - } - - return order + // This is 16 bytes and shouldn't generate escape characters when put into strings + c.uniqueKeyPrefix = base64.URLEncoding.EncodeToString(data[:]) + c.uniqueKeyPrefixBytes = []byte(c.uniqueKeyPrefix) + return true } -func (c *linkerContext) addRangeError(source logger.Source, r logger.Range, text string) { - c.log.AddRangeError(&source, r, text) - c.hasErrors = true -} - -func (c *linkerContext) addRangeErrorWithNotes(source logger.Source, r logger.Range, text string, notes []logger.MsgData) { - c.log.AddRangeErrorWithNotes(&source, r, text, notes) - c.hasErrors = true -} - -func (c *linkerContext) addPartToFile(sourceIndex uint32, part js_ast.Part, partMeta partMeta) uint32 { - if part.LocalDependencies == nil { - part.LocalDependencies = make(map[uint32]bool) +func (c *linkerContext) link() []graph.OutputFile { + if !c.generateUniqueKeyPrefix() { + return nil } - if part.SymbolUses == nil { - part.SymbolUses = make(map[js_ast.Ref]js_ast.SymbolUse) - } - if partMeta.entryBits.entries == nil { - partMeta.entryBits = newBitSet(uint(len(c.entryPoints))) - } - repr := c.files[sourceIndex].repr.(*reprJS) - partIndex := uint32(len(repr.ast.Parts)) - partMeta.prevSibling = partIndex - partMeta.nextSibling = partIndex - repr.ast.Parts = append(repr.ast.Parts, part) - repr.meta.partMeta = append(repr.meta.partMeta, partMeta) - return partIndex -} - -func (c *linkerContext) link() []OutputFile { c.scanImportsAndExports() // Stop now if there were errors - if c.hasErrors { - return []OutputFile{} + if c.log.HasErrors() { + return []graph.OutputFile{} } - c.markPartsReachableFromEntryPoints() - c.handleCrossChunkAssignments() + c.treeShakingAndCodeSplitting() if c.options.Mode == config.ModePassThrough { - for _, entryPoint := range c.entryPoints { - c.preventExportsFromBeingRenamed(entryPoint) + for _, entryPoint := range c.graph.EntryPoints() { + c.preventExportsFromBeingRenamed(entryPoint.SourceIndex) } } @@ -581,144 +266,217 @@ func (c *linkerContext) link() []OutputFile { // Make sure calls to "js_ast.FollowSymbols()" in parallel goroutines after this // won't hit concurrent map mutation hazards - js_ast.FollowAllSymbols(c.symbols) + js_ast.FollowAllSymbols(c.graph.Symbols) return c.generateChunksInParallel(chunks) } -func (c *linkerContext) generateChunksInParallel(chunks []chunkInfo) []OutputFile { - // Determine the order of files within the chunk ahead of time. This may - // generate additional CSS chunks from JS chunks that import CSS files. - { - originalChunks := chunks - for i, chunk := range originalChunks { - js, jsParts, css := c.chunkFileOrder(&chunk) - - switch chunk.repr.(type) { - case *chunkReprJS: - chunks[i].filesInChunkInOrder = js - chunks[i].partsInChunkInOrder = jsParts - - // If JS files include CSS files, make a sibling chunk for the CSS - if len(css) > 0 { - baseNameOrEmpty := chunk.baseNameOrEmpty - if baseNameOrEmpty != "" { - if js := c.options.OutputExtensionJS; strings.HasSuffix(baseNameOrEmpty, js) { - baseNameOrEmpty = baseNameOrEmpty[:len(baseNameOrEmpty)-len(js)] - } - baseNameOrEmpty += c.options.OutputExtensionCSS - } - chunks = append(chunks, chunkInfo{ - filesInChunkInOrder: css, - entryBits: chunk.entryBits, - isEntryPoint: chunk.isEntryPoint, - sourceIndex: chunk.sourceIndex, - entryPointBit: chunk.entryPointBit, - relDir: chunk.relDir, - baseNameOrEmpty: baseNameOrEmpty, - filesWithPartsInChunk: make(map[uint32]bool), - repr: &chunkReprCSS{}, - }) - } - - case *chunkReprCSS: - chunks[i].filesInChunkInOrder = css - } - } - } - - // We want to process chunks with as much parallelism as possible. However, - // content hashing means chunks that import other chunks must be completed - // after the imported chunks are completed because the import paths contain - // the content hash. It's only safe to process a chunk when the dependency - // count reaches zero. - type ordering struct { - dependencies sync.WaitGroup - dependents []uint32 - } - chunkOrdering := make([]ordering, len(chunks)) - for chunkIndex, chunk := range chunks { - chunkOrdering[chunkIndex].dependencies.Add(len(chunk.crossChunkImports)) - for _, otherChunkIndex := range chunk.crossChunkImports { - dependents := &chunkOrdering[otherChunkIndex].dependents - *dependents = append(*dependents, uint32(chunkIndex)) - } - } - - // Check for loops in the dependency graph since they cause a deadlock - var check func(int, []int) - check = func(chunkIndex int, path []int) { +// Currently the automatic chunk generation algorithm should by construction +// never generate chunks that import each other since files are allocated to +// chunks based on which entry points they are reachable from. +// +// This will change in the future when we allow manual chunk labels. But before +// we allow manual chunk labels, we'll need to rework module initialization to +// allow code splitting chunks to be lazily-initialized. +// +// Since that work hasn't been finished yet, cycles in the chunk import graph +// can cause initialization bugs. So let's forbid these cycles for now to guard +// against code splitting bugs that could cause us to generate buggy chunks. +func (c *linkerContext) enforceNoCyclicChunkImports(chunks []chunkInfo) { + var validate func(int, []int) + validate = func(chunkIndex int, path []int) { for _, otherChunkIndex := range path { if chunkIndex == otherChunkIndex { - panic("Internal error: Chunk import graph contains a cycle") + c.log.AddError(nil, logger.Loc{}, "Internal error: generated chunks contain a circular import") + return } } path = append(path, chunkIndex) - for _, otherChunkIndex := range chunks[chunkIndex].crossChunkImports { - check(int(otherChunkIndex), path) + for _, chunkImport := range chunks[chunkIndex].crossChunkImports { + // Ignore cycles caused by dynamic "import()" expressions. These are fine + // because they don't necessarily cause initialization order issues and + // they don't indicate a bug in our chunk generation algorithm. They arise + // normally in real code (e.g. two files that import each other). + if chunkImport.importKind != ast.ImportDynamic { + validate(int(chunkImport.chunkIndex), path) + } } } + path := make([]int, 0, len(chunks)) for i := range chunks { - check(i, nil) + validate(i, path) + } +} + +func (c *linkerContext) generateChunksInParallel(chunks []chunkInfo) []graph.OutputFile { + // Generate each chunk on a separate goroutine + generateWaitGroup := sync.WaitGroup{} + generateWaitGroup.Add(len(chunks)) + for chunkIndex := range chunks { + switch chunks[chunkIndex].chunkRepr.(type) { + case *chunkReprJS: + go c.generateChunkJS(chunks, chunkIndex, &generateWaitGroup) + case *chunkReprCSS: + go c.generateChunkCSS(chunks, chunkIndex, &generateWaitGroup) + } + } + c.enforceNoCyclicChunkImports(chunks) + generateWaitGroup.Wait() + + // Compute the final hashes of each chunk. This can technically be done in + // parallel but it probably doesn't matter so much because we're not hashing + // that much data. + visited := make([]uint32, len(chunks)) + var finalBytes []byte + for chunkIndex := range chunks { + chunk := &chunks[chunkIndex] + var hashSubstitution *string + + // Only wait for the hash if necessary + if config.HasPlaceholder(chunk.finalTemplate, config.HashPlaceholder) { + // Compute the final hash using the isolated hashes of the dependencies + hash := xxhash.New() + appendIsolatedHashesForImportedChunks(hash, chunks, uint32(chunkIndex), visited, ^uint32(chunkIndex)) + finalBytes = hash.Sum(finalBytes[:0]) + finalString := hashForFileName(finalBytes) + hashSubstitution = &finalString + } + + // Render the last remaining placeholder in the template + chunk.finalRelPath = config.TemplateToString(config.SubstituteTemplate(chunk.finalTemplate, config.PathPlaceholders{ + Hash: hashSubstitution, + })) } - results := make([][]OutputFile, len(chunks)) - resultsWaitGroup := sync.WaitGroup{} + // Generate the final output files by joining file pieces together + var resultsWaitGroup sync.WaitGroup + results := make([][]graph.OutputFile, len(chunks)) resultsWaitGroup.Add(len(chunks)) + for chunkIndex, chunk := range chunks { + go func(chunkIndex int, chunk chunkInfo) { + var outputFiles []graph.OutputFile - // Generate each chunk on a separate goroutine - for i := range chunks { - go func(i int) { - chunk := &chunks[i] - order := &chunkOrdering[i] + // Each file may optionally contain additional files to be copied to the + // output directory. This is used by the "file" loader. + for _, sourceIndex := range chunk.filesInChunkInOrder { + outputFiles = append(outputFiles, c.graph.Files[sourceIndex].InputFile.AdditionalFiles...) + } - // Start generating the chunk without dependencies, but stop when - // dependencies are needed. This returns a callback that is called - // later to resume generating the chunk once dependencies are known. - resume := chunk.repr.generate(c, chunk) + // Path substitution for the chunk itself + finalRelDir := c.fs.Dir(chunk.finalRelPath) + outputContentsJoiner, outputSourceMapShifts := c.substituteFinalPaths(chunks, chunk.outputPieces, func(finalRelPathForImport string) string { + return c.pathBetweenChunks(finalRelDir, finalRelPathForImport) + }) - // Wait for all dependencies to be resolved first - order.dependencies.Wait() + // Generate the optional source map for this chunk + if c.options.SourceMap != config.SourceMapNone && chunk.outputSourceMap.Suffix != nil { + outputSourceMap := chunk.outputSourceMap.Finalize(outputSourceMapShifts) + finalRelPathForSourceMap := chunk.finalRelPath + ".map" - // Fill in the cross-chunk import records now that the paths are known - crossChunkImportRecords := make([]ast.ImportRecord, len(chunk.crossChunkImports)) - crossChunkAbsPaths := make([]string, len(chunk.crossChunkImports)) - for i, otherChunkIndex := range chunk.crossChunkImports { - relPath := chunks[otherChunkIndex].relPath() - crossChunkAbsPaths[i] = c.fs.Join(c.options.AbsOutputDir, relPath) - crossChunkImportRecords[i] = ast.ImportRecord{ - Kind: ast.ImportStmt, - Path: logger.Path{Text: c.pathBetweenChunks(chunk.relDir, relPath)}, + // Potentially write a trailing source map comment + switch c.options.SourceMap { + case config.SourceMapLinkedWithComment: + importPath := c.pathBetweenChunks(finalRelDir, finalRelPathForSourceMap) + importPath = strings.TrimPrefix(importPath, "./") + outputContentsJoiner.EnsureNewlineAtEnd() + outputContentsJoiner.AddString("//# sourceMappingURL=") + outputContentsJoiner.AddString(importPath) + outputContentsJoiner.AddString("\n") + + case config.SourceMapInline, config.SourceMapInlineAndExternal: + outputContentsJoiner.EnsureNewlineAtEnd() + outputContentsJoiner.AddString("//# sourceMappingURL=data:application/json;base64,") + outputContentsJoiner.AddString(base64.StdEncoding.EncodeToString(outputSourceMap)) + outputContentsJoiner.AddString("\n") + } + + // Potentially write the external source map file + switch c.options.SourceMap { + case config.SourceMapLinkedWithComment, config.SourceMapInlineAndExternal, config.SourceMapExternalWithoutComment: + outputFiles = append(outputFiles, graph.OutputFile{ + AbsPath: c.fs.Join(c.options.AbsOutputDir, finalRelPathForSourceMap), + Contents: outputSourceMap, + JSONMetadataChunk: fmt.Sprintf( + "{\n \"imports\": [],\n \"exports\": [],\n \"inputs\": {},\n \"bytes\": %d\n }", len(outputSourceMap)), + }) } } - // Generate the chunk - results[i] = resume(generateContinue{ - crossChunkAbsPaths: crossChunkAbsPaths, - crossChunkImportRecords: crossChunkImportRecords, + // Finalize the output contents + outputContents := outputContentsJoiner.Done() + + // Path substitution for the JSON metadata + var jsonMetadataChunk string + if c.options.NeedsMetafile { + jsonMetadataChunkPieces := c.breakOutputIntoPieces(chunk.jsonMetadataChunkCallback(len(outputContents)), uint32(len(chunks))) + jsonMetadataChunkBytes, _ := c.substituteFinalPaths(chunks, jsonMetadataChunkPieces, func(finalRelPathForImport string) string { + return c.res.PrettyPath(logger.Path{Text: c.fs.Join(c.options.AbsOutputDir, finalRelPathForImport), Namespace: "file"}) + }) + jsonMetadataChunk = string(jsonMetadataChunkBytes.Done()) + } + + // Generate the output file for this chunk + outputFiles = append(outputFiles, graph.OutputFile{ + AbsPath: c.fs.Join(c.options.AbsOutputDir, chunk.finalRelPath), + Contents: outputContents, + JSONMetadataChunk: jsonMetadataChunk, + IsExecutable: chunk.isExecutable, }) - // Wake up any dependents now that we're done - for _, chunkIndex := range order.dependents { - chunkOrdering[chunkIndex].dependencies.Done() - } + results[chunkIndex] = outputFiles resultsWaitGroup.Done() - }(i) + }(chunkIndex, chunk) } - - // Join the results in chunk order for determinism resultsWaitGroup.Wait() - var outputFiles []OutputFile - for _, group := range results { - outputFiles = append(outputFiles, group...) + + // Merge the output files from the different goroutines together in order + outputFilesLen := 0 + for _, result := range results { + outputFilesLen += len(result) + } + outputFiles := make([]graph.OutputFile, 0, outputFilesLen) + for _, result := range results { + outputFiles = append(outputFiles, result...) } return outputFiles } +// Given a set of output pieces (i.e. a buffer already divided into the spans +// between import paths), substitute the final import paths in and then join +// everything into a single byte buffer. +func (c *linkerContext) substituteFinalPaths( + chunks []chunkInfo, + pieces []outputPiece, + modifyPath func(string) string, +) (j helpers.Joiner, shifts []sourcemap.SourceMapShift) { + var shift sourcemap.SourceMapShift + shifts = make([]sourcemap.SourceMapShift, 0, len(pieces)) + shifts = append(shifts, shift) + + for _, piece := range pieces { + var dataOffset sourcemap.LineColumnOffset + j.AddBytes(piece.data) + dataOffset.AdvanceBytes(piece.data) + shift.Before.Add(dataOffset) + shift.After.Add(dataOffset) + + if piece.chunkIndex.IsValid() { + chunk := chunks[piece.chunkIndex.GetIndex()] + importPath := modifyPath(chunk.finalRelPath) + j.AddString(importPath) + shift.Before.AdvanceString(chunk.uniqueKey) + shift.After.AdvanceString(importPath) + shifts = append(shifts, shift) + } + } + + return +} + func (c *linkerContext) pathBetweenChunks(fromRelDir string, toRelPath string) string { - // Return an absolute path if a public path has been configured + // Join with the public path if it has been configured if c.options.PublicPath != "" { - return c.options.PublicPath + toRelPath + return joinWithPublicPath(c.options.PublicPath, toRelPath) } // Otherwise, return a relative path @@ -741,15 +499,134 @@ func (c *linkerContext) pathBetweenChunks(fromRelDir string, toRelPath string) s return relPath } +// Returns the path of this file relative to "outbase", which is then ready to +// be joined with the absolute output directory path. The directory and name +// components are returned separately for convenience. +// +// This makes sure to have the directory end in a slash so that it can be +// substituted into a path template without necessarily having a "/" after it. +// Extra slashes should get cleaned up automatically when we join it with the +// output directory. +func (c *linkerContext) pathRelativeToOutbase( + sourceIndex uint32, + entryPointBit uint, + stdExt string, + avoidIndex bool, +) (relDir string, baseName string, baseExt string) { + file := &c.graph.Files[sourceIndex] + relDir = "/" + baseExt = stdExt + + // If the output path was configured explicitly, use it verbatim + if c.options.AbsOutputFile != "" { + baseName = c.fs.Base(c.options.AbsOutputFile) + + // Strip off the extension + ext := c.fs.Ext(baseName) + baseName = baseName[:len(baseName)-len(ext)] + + // Use the extension from the explicit output file path. However, don't do + // that if this is a CSS chunk but the entry point file is not CSS. In that + // case use the standard extension. This happens when importing CSS into JS. + if _, ok := file.InputFile.Repr.(*graph.CSSRepr); ok || stdExt != c.options.OutputExtensionCSS { + baseExt = ext + } + return + } + + absPath := file.InputFile.Source.KeyPath.Text + isCustomOutputPath := false + + if outPath := c.graph.EntryPoints()[entryPointBit].OutputPath; outPath != "" { + // Use the configured output path if present + absPath = outPath + if !c.fs.IsAbs(absPath) { + absPath = c.fs.Join(c.options.AbsOutputBase, absPath) + } + isCustomOutputPath = true + } else if file.InputFile.Source.KeyPath.Namespace != "file" { + // Come up with a path for virtual paths (i.e. non-file-system paths) + dir, base, _ := logger.PlatformIndependentPathDirBaseExt(absPath) + if avoidIndex && base == "index" { + _, base, _ = logger.PlatformIndependentPathDirBaseExt(dir) + } + baseName = sanitizeFilePathForVirtualModulePath(base) + return + } else { + // Heuristic: If the file is named something like "index.js", then use + // the name of the parent directory instead. This helps avoid the + // situation where many chunks are named "index" because of people + // dynamically-importing npm packages that make use of node's implicit + // "index" file name feature. + if avoidIndex { + base := c.fs.Base(absPath) + base = base[:len(base)-len(c.fs.Ext(base))] + if base == "index" { + absPath = c.fs.Dir(absPath) + } + } + } + + // Try to get a relative path to the base directory + relPath, ok := c.fs.Rel(c.options.AbsOutputBase, absPath) + if !ok { + // This can fail in some situations such as on different drives on + // Windows. In that case we just use the file name. + baseName = c.fs.Base(absPath) + } else { + // Now we finally have a relative path + relDir = c.fs.Dir(relPath) + "/" + baseName = c.fs.Base(relPath) + + // Use platform-independent slashes + relDir = strings.ReplaceAll(relDir, "\\", "/") + + // Replace leading "../" so we don't try to write outside of the output + // directory. This normally can't happen because "AbsOutputBase" is + // automatically computed to contain all entry point files, but it can + // happen if someone sets it manually via the "outbase" API option. + // + // Note that we can't just strip any leading "../" because that could + // cause two separate entry point paths to collide. For example, there + // could be both "src/index.js" and "../src/index.js" as entry points. + dotDotCount := 0 + for strings.HasPrefix(relDir[dotDotCount*3:], "../") { + dotDotCount++ + } + if dotDotCount > 0 { + // The use of "_.._" here is somewhat arbitrary but it is unlikely to + // collide with a folder named by a human and it works on Windows + // (Windows doesn't like names that end with a "."). And not starting + // with a "." means that it will not be hidden on Unix. + relDir = strings.Repeat("_.._/", dotDotCount) + relDir[dotDotCount*3:] + } + relDir = "/" + relDir + } + + // Strip the file extension if the output path is an input file + if !isCustomOutputPath { + ext := c.fs.Ext(baseName) + baseName = baseName[:len(baseName)-len(ext)] + } + return +} + func (c *linkerContext) computeCrossChunkDependencies(chunks []chunkInfo) { - if len(chunks) < 2 { + jsChunks := 0 + for _, chunk := range chunks { + if _, ok := chunk.chunkRepr.(*chunkReprJS); ok { + jsChunks++ + } + } + if jsChunks < 2 { // No need to compute cross-chunk dependencies if there can't be any return } type chunkMeta struct { - imports map[js_ast.Ref]bool - exports map[js_ast.Ref]bool + imports map[js_ast.Ref]bool + exports map[js_ast.Ref]bool + dynamicImports map[int]bool } chunkMetas := make([]chunkMeta, len(chunks)) @@ -760,27 +637,39 @@ func (c *linkerContext) computeCrossChunkDependencies(chunks []chunkInfo) { waitGroup.Add(len(chunks)) for chunkIndex, chunk := range chunks { go func(chunkIndex int, chunk chunkInfo) { - chunkKey := string(chunk.entryBits.entries) + chunkMeta := &chunkMetas[chunkIndex] imports := make(map[js_ast.Ref]bool) - chunkMetas[chunkIndex] = chunkMeta{imports: imports, exports: make(map[js_ast.Ref]bool)} + chunkMeta.imports = imports + chunkMeta.exports = make(map[js_ast.Ref]bool) // Go over each file in this chunk for sourceIndex := range chunk.filesWithPartsInChunk { // Go over each part in this file that's marked for inclusion in this chunk - switch repr := c.files[sourceIndex].repr.(type) { - case *reprJS: - for partIndex, partMeta := range repr.meta.partMeta { - if string(partMeta.entryBits.entries) != chunkKey { + switch repr := c.graph.Files[sourceIndex].InputFile.Repr.(type) { + case *graph.JSRepr: + for partIndex, partMeta := range repr.AST.Parts { + if !partMeta.IsLive { continue } - part := &repr.ast.Parts[partIndex] + part := &repr.AST.Parts[partIndex] // Rewrite external dynamic imports to point to the chunk for that entry point for _, importRecordIndex := range part.ImportRecordIndices { - record := &repr.ast.ImportRecords[importRecordIndex] - if record.SourceIndex != nil && c.isExternalDynamicImport(record) { - record.Path.Text = c.pathBetweenChunks(chunk.relDir, c.files[*record.SourceIndex].entryPointRelPath) - record.SourceIndex = nil + record := &repr.AST.ImportRecords[importRecordIndex] + if record.SourceIndex.IsValid() && c.isExternalDynamicImport(record, sourceIndex) { + otherChunkIndex := c.graph.Files[record.SourceIndex.GetIndex()].EntryPointChunkIndex + record.Path.Text = chunks[otherChunkIndex].uniqueKey + record.SourceIndex = ast.Index32{} + + // Track this cross-chunk dynamic import so we make sure to + // include its hash when we're calculating the hashes of all + // dependencies of this chunk. + if int(otherChunkIndex) != chunkIndex { + if chunkMeta.dynamicImports == nil { + chunkMeta.dynamicImports = make(map[int]bool) + } + chunkMeta.dynamicImports[int(otherChunkIndex)] = true + } } } @@ -791,7 +680,7 @@ func (c *linkerContext) computeCrossChunkDependencies(chunks []chunkInfo) { // is fine. for _, declared := range part.DeclaredSymbols { if declared.IsTopLevel { - c.symbols.Get(declared.Ref).ChunkIndex = ^uint32(chunkIndex) + c.graph.Symbols.Get(declared.Ref).ChunkIndex = ast.MakeIndex32(uint32(chunkIndex)) } } @@ -799,7 +688,7 @@ func (c *linkerContext) computeCrossChunkDependencies(chunks []chunkInfo) { // with our map of which chunk a given symbol is declared in to // determine if the symbol needs to be imported from another chunk. for ref := range part.SymbolUses { - symbol := c.symbols.Get(ref) + symbol := c.graph.Symbols.Get(ref) // Ignore unbound symbols, which don't have declarations if symbol.Kind == js_ast.SymbolUnbound { @@ -813,10 +702,10 @@ func (c *linkerContext) computeCrossChunkDependencies(chunks []chunkInfo) { // If this is imported from another file, follow the import // reference and reference the symbol in that file instead - if importToBind, ok := repr.meta.importsToBind[ref]; ok { - ref = importToBind.ref - symbol = c.symbols.Get(ref) - } else if repr.meta.cjsWrap && ref != repr.ast.WrapperRef { + if importData, ok := repr.Meta.ImportsToBind[ref]; ok { + ref = importData.Ref + symbol = c.graph.Symbols.Get(ref) + } else if repr.Meta.Wrap == graph.WrapCJS && ref != repr.AST.WrapperRef { // The only internal symbol that wrapped CommonJS files export // is the wrapper itself. continue @@ -840,6 +729,36 @@ func (c *linkerContext) computeCrossChunkDependencies(chunks []chunkInfo) { } } } + + // Include the exports if this is an entry point chunk + if chunk.isEntryPoint { + if repr, ok := c.graph.Files[chunk.sourceIndex].InputFile.Repr.(*graph.JSRepr); ok { + if repr.Meta.Wrap != graph.WrapCJS { + for _, alias := range repr.Meta.SortedAndFilteredExportAliases { + export := repr.Meta.ResolvedExports[alias] + targetRef := export.Ref + + // If this is an import, then target what the import points to + if importData, ok := c.graph.Files[export.SourceIndex].InputFile.Repr.(*graph.JSRepr).Meta.ImportsToBind[targetRef]; ok { + targetRef = importData.Ref + } + + imports[targetRef] = true + } + } + + // Ensure "exports" is included if the current output format needs it + if repr.Meta.ForceIncludeExportsForEntryPoint { + imports[repr.AST.ExportsRef] = true + } + + // Include the wrapper if present + if repr.Meta.Wrap != graph.WrapNone { + imports[repr.AST.WrapperRef] = true + } + } + } + waitGroup.Done() }(chunkIndex, chunk) } @@ -848,20 +767,22 @@ func (c *linkerContext) computeCrossChunkDependencies(chunks []chunkInfo) { // Mark imported symbols as exported in the chunk from which they are declared for chunkIndex := range chunks { chunk := &chunks[chunkIndex] - repr, ok := chunk.repr.(*chunkReprJS) + chunkRepr, ok := chunk.chunkRepr.(*chunkReprJS) if !ok { continue } + chunkMeta := chunkMetas[chunkIndex] // Find all uses in this chunk of symbols from other chunks - repr.importsFromOtherChunks = make(map[uint32]crossChunkImportItemArray) - for importRef := range chunkMetas[chunkIndex].imports { + chunkRepr.importsFromOtherChunks = make(map[uint32]crossChunkImportItemArray) + for importRef := range chunkMeta.imports { // Ignore uses that aren't top-level symbols - otherChunkIndex := ^c.symbols.Get(importRef).ChunkIndex - if otherChunkIndex != ^uint32(0) && otherChunkIndex != uint32(chunkIndex) { - repr.importsFromOtherChunks[otherChunkIndex] = - append(repr.importsFromOtherChunks[otherChunkIndex], crossChunkImportItem{ref: importRef}) - chunkMetas[otherChunkIndex].exports[importRef] = true + if otherChunkIndex := c.graph.Symbols.Get(importRef).ChunkIndex; otherChunkIndex.IsValid() { + if otherChunkIndex := otherChunkIndex.GetIndex(); otherChunkIndex != uint32(chunkIndex) { + chunkRepr.importsFromOtherChunks[otherChunkIndex] = + append(chunkRepr.importsFromOtherChunks[otherChunkIndex], crossChunkImportItem{ref: importRef}) + chunkMetas[otherChunkIndex].exports[importRef] = true + } } } @@ -870,12 +791,29 @@ func (c *linkerContext) computeCrossChunkDependencies(chunks []chunkInfo) { // these chunks are evaluated for their side effects too. if chunk.isEntryPoint { for otherChunkIndex, otherChunk := range chunks { - if chunkIndex != otherChunkIndex && otherChunk.entryBits.hasBit(chunk.entryPointBit) { - imports := repr.importsFromOtherChunks[uint32(otherChunkIndex)] - repr.importsFromOtherChunks[uint32(otherChunkIndex)] = imports + if _, ok := otherChunk.chunkRepr.(*chunkReprJS); ok && chunkIndex != otherChunkIndex && otherChunk.entryBits.HasBit(chunk.entryPointBit) { + imports := chunkRepr.importsFromOtherChunks[uint32(otherChunkIndex)] + chunkRepr.importsFromOtherChunks[uint32(otherChunkIndex)] = imports } } } + + // Make sure we also track dynamic cross-chunk imports. These need to be + // tracked so we count them as dependencies of this chunk for the purpose + // of hash calculation. + if chunkMeta.dynamicImports != nil { + sortedDynamicImports := make([]int, 0, len(chunkMeta.dynamicImports)) + for chunkIndex := range chunkMeta.dynamicImports { + sortedDynamicImports = append(sortedDynamicImports, chunkIndex) + } + sort.Ints(sortedDynamicImports) + for _, chunkIndex := range sortedDynamicImports { + chunk.crossChunkImports = append(chunk.crossChunkImports, chunkImport{ + importKind: ast.ImportDynamic, + chunkIndex: uint32(chunkIndex), + }) + } + } } // Generate cross-chunk exports. These must be computed before cross-chunk @@ -883,12 +821,12 @@ func (c *linkerContext) computeCrossChunkDependencies(chunks []chunkInfo) { // aliases simultaneously to avoid collisions. for chunkIndex := range chunks { chunk := &chunks[chunkIndex] - repr, ok := chunk.repr.(*chunkReprJS) + chunkRepr, ok := chunk.chunkRepr.(*chunkReprJS) if !ok { continue } - repr.exportsToOtherChunks = make(map[js_ast.Ref]string) + chunkRepr.exportsToOtherChunks = make(map[js_ast.Ref]string) switch c.options.OutputFormat { case config.FormatESModule: r := renamer.ExportRenamer{} @@ -898,13 +836,13 @@ func (c *linkerContext) computeCrossChunkDependencies(chunks []chunkInfo) { if c.options.MinifyIdentifiers { alias = r.NextMinifiedName() } else { - alias = r.NextRenamedName(c.symbols.Get(export.ref).OriginalName) + alias = r.NextRenamedName(c.graph.Symbols.Get(export.Ref).OriginalName) } - items = append(items, js_ast.ClauseItem{Name: js_ast.LocRef{Ref: export.ref}, Alias: alias}) - repr.exportsToOtherChunks[export.ref] = alias + items = append(items, js_ast.ClauseItem{Name: js_ast.LocRef{Ref: export.Ref}, Alias: alias}) + chunkRepr.exportsToOtherChunks[export.Ref] = alias } if len(items) > 0 { - repr.crossChunkSuffixStmts = []js_ast.Stmt{{Data: &js_ast.SExportClause{ + chunkRepr.crossChunkSuffixStmts = []js_ast.Stmt{{Data: &js_ast.SExportClause{ Items: items, }}} } @@ -919,23 +857,25 @@ func (c *linkerContext) computeCrossChunkDependencies(chunks []chunkInfo) { // be embedded in the generated import statements. for chunkIndex := range chunks { chunk := &chunks[chunkIndex] - repr, ok := chunk.repr.(*chunkReprJS) + chunkRepr, ok := chunk.chunkRepr.(*chunkReprJS) if !ok { continue } - var crossChunkImports []uint32 var crossChunkPrefixStmts []js_ast.Stmt - for _, crossChunkImport := range c.sortedCrossChunkImports(chunks, repr.importsFromOtherChunks) { + for _, crossChunkImport := range c.sortedCrossChunkImports(chunks, chunkRepr.importsFromOtherChunks) { switch c.options.OutputFormat { case config.FormatESModule: var items []js_ast.ClauseItem for _, item := range crossChunkImport.sortedImportItems { items = append(items, js_ast.ClauseItem{Name: js_ast.LocRef{Ref: item.ref}, Alias: item.exportAlias}) } - importRecordIndex := uint32(len(crossChunkImports)) - crossChunkImports = append(crossChunkImports, crossChunkImport.chunkIndex) + importRecordIndex := uint32(len(chunk.crossChunkImports)) + chunk.crossChunkImports = append(chunk.crossChunkImports, chunkImport{ + importKind: ast.ImportStmt, + chunkIndex: crossChunkImport.chunkIndex, + }) if len(items) > 0 { // "import {a, b} from './chunk.js'" crossChunkPrefixStmts = append(crossChunkPrefixStmts, js_ast.Stmt{Data: &js_ast.SImport{ @@ -954,14 +894,12 @@ func (c *linkerContext) computeCrossChunkDependencies(chunks []chunkInfo) { } } - chunk.crossChunkImports = crossChunkImports - repr.crossChunkPrefixStmts = crossChunkPrefixStmts + chunkRepr.crossChunkPrefixStmts = crossChunkPrefixStmts } } type crossChunkImport struct { chunkIndex uint32 - sortingKey string sortedImportItems crossChunkImportItemArray } @@ -972,7 +910,7 @@ func (a crossChunkImportArray) Len() int { return len(a) } func (a crossChunkImportArray) Swap(i int, j int) { a[i], a[j] = a[j], a[i] } func (a crossChunkImportArray) Less(i int, j int) bool { - return a[i].sortingKey < a[j].sortingKey + return a[i].chunkIndex < a[j].chunkIndex } // Sort cross-chunk imports by chunk name for determinism @@ -981,14 +919,14 @@ func (c *linkerContext) sortedCrossChunkImports(chunks []chunkInfo, importsFromO for otherChunkIndex, importItems := range importsFromOtherChunks { // Sort imports from a single chunk by alias for determinism - exportsToOtherChunks := chunks[otherChunkIndex].repr.(*chunkReprJS).exportsToOtherChunks + otherChunk := &chunks[otherChunkIndex] + exportsToOtherChunks := otherChunk.chunkRepr.(*chunkReprJS).exportsToOtherChunks for i, item := range importItems { importItems[i].exportAlias = exportsToOtherChunks[item.ref] } sort.Sort(importItems) result = append(result, crossChunkImport{ chunkIndex: otherChunkIndex, - sortingKey: string(chunks[otherChunkIndex].entryBits.entries), sortedImportItems: importItems, }) } @@ -1012,39 +950,14 @@ func (a crossChunkImportItemArray) Less(i int, j int) bool { return a[i].exportAlias < a[j].exportAlias } -type crossChunkExportItem struct { - ref js_ast.Ref - keyPath logger.Path -} - -// This type is just so we can use Go's native sort function -type crossChunkExportItemArray []crossChunkExportItem - -func (a crossChunkExportItemArray) Len() int { return len(a) } -func (a crossChunkExportItemArray) Swap(i int, j int) { a[i], a[j] = a[j], a[i] } - -func (a crossChunkExportItemArray) Less(i int, j int) bool { - ai := a[i] - aj := a[j] - - // The sort order here is arbitrary but needs to be consistent between builds. - // The InnerIndex should be stable because the parser for a single file is - // single-threaded and deterministically assigns out InnerIndex values - // sequentially. But the OuterIndex (i.e. source index) should be unstable - // because the main thread assigns out source index values sequentially to - // newly-discovered dependencies in a multi-threaded producer/consumer - // relationship. So instead we use the key path from the source at OuterIndex - // for stability. This compares using the InnerIndex first before the key path - // because it's a less expensive comparison test. - return ai.ref.InnerIndex < aj.ref.InnerIndex || - (ai.ref.InnerIndex == aj.ref.InnerIndex && ai.keyPath.ComesBeforeInSortedOrder(aj.keyPath)) -} - // Sort cross-chunk exports by chunk name for determinism -func (c *linkerContext) sortedCrossChunkExportItems(exportRefs map[js_ast.Ref]bool) crossChunkExportItemArray { - result := make(crossChunkExportItemArray, 0, len(exportRefs)) +func (c *linkerContext) sortedCrossChunkExportItems(exportRefs map[js_ast.Ref]bool) renamer.StableRefArray { + result := make(renamer.StableRefArray, 0, len(exportRefs)) for ref := range exportRefs { - result = append(result, crossChunkExportItem{ref: ref, keyPath: c.files[ref.OuterIndex].source.KeyPath}) + result = append(result, renamer.StableRef{ + StableSourceIndex: c.graph.StableSourceIndices[ref.SourceIndex], + Ref: ref, + }) } sort.Sort(result) return result @@ -1052,39 +965,36 @@ func (c *linkerContext) sortedCrossChunkExportItems(exportRefs map[js_ast.Ref]bo func (c *linkerContext) scanImportsAndExports() { // Step 1: Figure out what modules must be CommonJS - for _, sourceIndex := range c.reachableFiles { - file := &c.files[sourceIndex] - switch repr := file.repr.(type) { - case *reprCSS: - // We shouldn't need to clone this because it should be empty for CSS files - if file.additionalFiles != nil { - panic("Internal error") - } - + for _, sourceIndex := range c.graph.ReachableFiles { + file := &c.graph.Files[sourceIndex] + switch repr := file.InputFile.Repr.(type) { + case *graph.CSSRepr: // Inline URLs for non-CSS files into the CSS file - for importRecordIndex := range repr.ast.ImportRecords { - if record := &repr.ast.ImportRecords[importRecordIndex]; record.SourceIndex != nil { - otherFile := &c.files[*record.SourceIndex] - if otherRepr, ok := otherFile.repr.(*reprJS); ok { - record.Path.Text = otherRepr.ast.URLForCSS + var additionalFiles []graph.OutputFile + for importRecordIndex := range repr.AST.ImportRecords { + if record := &repr.AST.ImportRecords[importRecordIndex]; record.SourceIndex.IsValid() { + otherFile := &c.graph.Files[record.SourceIndex.GetIndex()] + if otherRepr, ok := otherFile.InputFile.Repr.(*graph.JSRepr); ok { + record.Path.Text = otherRepr.AST.URLForCSS record.Path.Namespace = "" - record.SourceIndex = nil + record.SourceIndex = ast.Index32{} // Copy the additional files to the output directory - file.additionalFiles = append(file.additionalFiles, otherFile.additionalFiles...) + additionalFiles = append(additionalFiles, otherFile.InputFile.AdditionalFiles...) } } } + file.InputFile.AdditionalFiles = additionalFiles - case *reprJS: - for importRecordIndex := range repr.ast.ImportRecords { - record := &repr.ast.ImportRecords[importRecordIndex] - if record.SourceIndex == nil { + case *graph.JSRepr: + for importRecordIndex := range repr.AST.ImportRecords { + record := &repr.AST.ImportRecords[importRecordIndex] + if !record.SourceIndex.IsValid() { continue } - otherFile := &c.files[*record.SourceIndex] - otherRepr := otherFile.repr.(*reprJS) + otherFile := &c.graph.Files[record.SourceIndex.GetIndex()] + otherRepr := otherFile.InputFile.Repr.(*graph.JSRepr) switch record.Kind { case ast.ImportStmt: @@ -1099,74 +1009,71 @@ func (c *linkerContext) scanImportsAndExports() { // We emit a warning in this case but try to avoid turning the module // into a CommonJS module if possible. This is possible with named // imports (the module stays an ECMAScript module but the imports are - // rewritten with undefined) but is not possible with star imports: + // rewritten with undefined) but is not possible with star or default + // imports: // // import * as ns from './empty-file' - // console.log(ns) + // import defVal from './empty-file' + // console.log(ns, defVal) // // In that case the module *is* considered a CommonJS module because // the namespace object must be created. - if record.ContainsImportStar && !otherRepr.ast.HasES6ImportsOrExports() && !otherRepr.ast.HasLazyExport { - otherRepr.meta.cjsStyleExports = true + if (record.ContainsImportStar || record.ContainsDefaultAlias) && otherRepr.AST.ExportsKind == js_ast.ExportsNone && !otherRepr.AST.HasLazyExport { + otherRepr.Meta.Wrap = graph.WrapCJS + otherRepr.AST.ExportsKind = js_ast.ExportsCommonJS } case ast.ImportRequire: // Files that are imported with require() must be CommonJS modules - otherRepr.meta.cjsStyleExports = true + if otherRepr.AST.ExportsKind == js_ast.ExportsESM { + otherRepr.Meta.Wrap = graph.WrapESM + } else { + otherRepr.Meta.Wrap = graph.WrapCJS + otherRepr.AST.ExportsKind = js_ast.ExportsCommonJS + } case ast.ImportDynamic: - if c.options.CodeSplitting { - // Files that are imported with import() must be entry points - if !otherFile.isEntryPoint { - c.entryPoints = append(c.entryPoints, *record.SourceIndex) - otherFile.isEntryPoint = true - } - } else { + if !c.options.CodeSplitting { // If we're not splitting, then import() is just a require() that // returns a promise, so the imported file must be a CommonJS module - otherRepr.meta.cjsStyleExports = true + if otherRepr.AST.ExportsKind == js_ast.ExportsESM { + otherRepr.Meta.Wrap = graph.WrapESM + } else { + otherRepr.Meta.Wrap = graph.WrapCJS + otherRepr.AST.ExportsKind = js_ast.ExportsCommonJS + } } } } + + // If the output format doesn't have an implicit CommonJS wrapper, any file + // that uses CommonJS features will need to be wrapped, even though the + // resulting wrapper won't be invoked by other files. An exception is made + // for entry point files in CommonJS format (or when in pass-through mode). + if repr.AST.ExportsKind == js_ast.ExportsCommonJS && (!file.IsEntryPoint() || + c.options.OutputFormat == config.FormatIIFE || c.options.OutputFormat == config.FormatESModule) { + repr.Meta.Wrap = graph.WrapCJS + } } } - // Step 2: Propagate CommonJS status for export star statements that are re- - // exports from a CommonJS module. Exports from a CommonJS module are not - // statically analyzable, so the export star must be evaluated at run time - // instead of at bundle time. - for _, sourceIndex := range c.reachableFiles { - if repr, ok := c.files[sourceIndex].repr.(*reprJS); ok && len(repr.ast.ExportStarImportRecords) > 0 { - visited := make(map[uint32]bool) - c.isCommonJSDueToExportStar(sourceIndex, visited) - } - } - - // Step 3: Resolve "export * from" statements. This must be done after we - // discover all modules that can be CommonJS because export stars are ignored - // for CommonJS modules. - exportStarStack := make([]uint32, 0, 32) - for _, sourceIndex := range c.reachableFiles { - file := &c.files[sourceIndex] - repr, ok := file.repr.(*reprJS) + // Step 2: Propagate dynamic export status for export star statements that + // are re-exports from a module whose exports are not statically analyzable. + // In this case the export star must be evaluated at run time instead of at + // bundle time. + for _, sourceIndex := range c.graph.ReachableFiles { + repr, ok := c.graph.Files[sourceIndex].InputFile.Repr.(*graph.JSRepr) if !ok { continue } - // Expression-style loaders defer code generation until linking. Code - // generation is done here because at this point we know that the - // "cjsStyleExports" flag has its final value and will not be changed. - if repr.ast.HasLazyExport { - c.generateCodeForLazyExport(sourceIndex) + if repr.Meta.Wrap != graph.WrapNone { + c.recursivelyWrapDependencies(sourceIndex) } - // If the output format doesn't have an implicit CommonJS wrapper, any file - // that uses CommonJS features will need to be wrapped, even though the - // resulting wrapper won't be invoked by other files. - if repr.meta.cjsStyleExports && - (c.options.OutputFormat == config.FormatIIFE || - c.options.OutputFormat == config.FormatESModule) { - repr.meta.cjsWrap = true + if len(repr.AST.ExportStarImportRecords) > 0 { + visited := make(map[uint32]bool) + c.hasDynamicExportsDueToExportStar(sourceIndex, visited) } // Even if the output file is CommonJS-like, we may still need to wrap @@ -1175,46 +1082,63 @@ func (c *linkerContext) scanImportsAndExports() { // method, whatever it is, will need to invoke the wrapper. Note that // this can include entry points (e.g. an entry point that imports a file // that imports that entry point). - for _, record := range repr.ast.ImportRecords { - if record.SourceIndex != nil { - otherRepr := c.files[*record.SourceIndex].repr.(*reprJS) - if otherRepr.meta.cjsStyleExports { - otherRepr.meta.cjsWrap = true + for _, record := range repr.AST.ImportRecords { + if record.SourceIndex.IsValid() { + otherRepr := c.graph.Files[record.SourceIndex.GetIndex()].InputFile.Repr.(*graph.JSRepr) + if otherRepr.AST.ExportsKind == js_ast.ExportsCommonJS { + c.recursivelyWrapDependencies(record.SourceIndex.GetIndex()) } } } - - // Propagate exports for export star statements - if len(repr.ast.ExportStarImportRecords) > 0 { - c.addExportsForExportStar(repr.meta.resolvedExports, sourceIndex, exportStarStack) - } - - // Add an empty part for the namespace export that we can fill in later - repr.meta.nsExportPartIndex = c.addPartToFile(sourceIndex, js_ast.Part{ - CanBeRemovedIfUnused: true, - IsNamespaceExport: true, - }, partMeta{}) - - // Also add a special export called "*" so import stars can bind to it. - // This must be done in this step because it must come after CommonJS - // module discovery but before matching imports with exports. - repr.meta.resolvedExports["*"] = exportData{ - ref: repr.ast.ExportsRef, - sourceIndex: sourceIndex, - } - repr.ast.TopLevelSymbolToParts[repr.ast.ExportsRef] = []uint32{repr.meta.nsExportPartIndex} } - // Step 4: Match imports with exports. This must be done after we process all - // export stars because imports can bind to export star re-exports. - for _, sourceIndex := range c.reachableFiles { - file := &c.files[sourceIndex] - repr, ok := file.repr.(*reprJS) + // Step 3: Resolve "export * from" statements. This must be done after we + // discover all modules that can have dynamic exports because export stars + // are ignored for those modules. + exportStarStack := make([]uint32, 0, 32) + for _, sourceIndex := range c.graph.ReachableFiles { + repr, ok := c.graph.Files[sourceIndex].InputFile.Repr.(*graph.JSRepr) if !ok { continue } - if len(repr.ast.NamedImports) > 0 { + // Expression-style loaders defer code generation until linking. Code + // generation is done here because at this point we know that the + // "ExportsKind" field has its final value and will not be changed. + if repr.AST.HasLazyExport { + c.generateCodeForLazyExport(sourceIndex) + } + + // Propagate exports for export star statements + if len(repr.AST.ExportStarImportRecords) > 0 { + c.addExportsForExportStar(repr.Meta.ResolvedExports, sourceIndex, exportStarStack) + } + + // Add an empty part for the namespace export that we can fill in later + repr.Meta.NSExportPartIndex = c.graph.AddPartToFile(sourceIndex, js_ast.Part{ + CanBeRemovedIfUnused: true, + }) + + // Also add a special export so import stars can bind to it. This must be + // done in this step because it must come after CommonJS module discovery + // but before matching imports with exports. + repr.Meta.ResolvedExportStar = &graph.ExportData{ + Ref: repr.AST.ExportsRef, + SourceIndex: sourceIndex, + } + repr.AST.TopLevelSymbolToParts[repr.AST.ExportsRef] = []uint32{repr.Meta.NSExportPartIndex} + } + + // Step 4: Match imports with exports. This must be done after we process all + // export stars because imports can bind to export star re-exports. + for _, sourceIndex := range c.graph.ReachableFiles { + file := &c.graph.Files[sourceIndex] + repr, ok := file.InputFile.Repr.(*graph.JSRepr) + if !ok { + continue + } + + if len(repr.AST.NamedImports) > 0 { c.matchImportsWithExportsForFile(uint32(sourceIndex)) } @@ -1223,51 +1147,48 @@ func (c *linkerContext) scanImportsAndExports() { // symbols. In that case make sure to mark them as such so they don't // get minified. if (c.options.OutputFormat == config.FormatPreserve || c.options.OutputFormat == config.FormatCommonJS) && - !repr.meta.cjsWrap && file.isEntryPoint { - exportsRef := js_ast.FollowSymbols(c.symbols, repr.ast.ExportsRef) - moduleRef := js_ast.FollowSymbols(c.symbols, repr.ast.ModuleRef) - c.symbols.Get(exportsRef).Kind = js_ast.SymbolUnbound - c.symbols.Get(moduleRef).Kind = js_ast.SymbolUnbound + repr.Meta.Wrap == graph.WrapNone && file.IsEntryPoint() { + exportsRef := js_ast.FollowSymbols(c.graph.Symbols, repr.AST.ExportsRef) + moduleRef := js_ast.FollowSymbols(c.graph.Symbols, repr.AST.ModuleRef) + c.graph.Symbols.Get(exportsRef).Kind = js_ast.SymbolUnbound + c.graph.Symbols.Get(moduleRef).Kind = js_ast.SymbolUnbound } + + // Create the wrapper part for wrapped files. This is needed by a later step. + c.createWrapperForFile(uint32(sourceIndex)) } // Step 5: Create namespace exports for every file. This is always necessary // for CommonJS files, and is also necessary for other files if they are // imported using an import star statement. waitGroup := sync.WaitGroup{} - for _, sourceIndex := range c.reachableFiles { - repr, ok := c.files[sourceIndex].repr.(*reprJS) + for _, sourceIndex := range c.graph.ReachableFiles { + repr, ok := c.graph.Files[sourceIndex].InputFile.Repr.(*graph.JSRepr) if !ok { continue } // This is the slowest step and is also parallelizable, so do this in parallel. waitGroup.Add(1) - go func(sourceIndex uint32, repr *reprJS) { + go func(sourceIndex uint32, repr *graph.JSRepr) { // Now that all exports have been resolved, sort and filter them to create // something we can iterate over later. - aliases := make([]string, 0, len(repr.meta.resolvedExports)) + aliases := make([]string, 0, len(repr.Meta.ResolvedExports)) nextAlias: - for alias, export := range repr.meta.resolvedExports { - // The automatically-generated namespace export is just for internal binding - // purposes and isn't meant to end up in generated code. - if alias == "*" { - continue - } - + for alias, export := range repr.Meta.ResolvedExports { // Re-exporting multiple symbols with the same name causes an ambiguous // export. These names cannot be used and should not end up in generated code. - otherRepr := c.files[export.sourceIndex].repr.(*reprJS) - if len(export.potentiallyAmbiguousExportStarRefs) > 0 { - mainRef := export.ref - if imported, ok := otherRepr.meta.importsToBind[export.ref]; ok { - mainRef = imported.ref + otherRepr := c.graph.Files[export.SourceIndex].InputFile.Repr.(*graph.JSRepr) + if len(export.PotentiallyAmbiguousExportStarRefs) > 0 { + mainRef := export.Ref + if imported, ok := otherRepr.Meta.ImportsToBind[export.Ref]; ok { + mainRef = imported.Ref } - for _, ambiguousExport := range export.potentiallyAmbiguousExportStarRefs { - ambiguousRepr := c.files[ambiguousExport.sourceIndex].repr.(*reprJS) - ambiguousRef := ambiguousExport.ref - if imported, ok := ambiguousRepr.meta.importsToBind[ambiguousExport.ref]; ok { - ambiguousRef = imported.ref + for _, ambiguousExport := range export.PotentiallyAmbiguousExportStarRefs { + ambiguousRepr := c.graph.Files[ambiguousExport.SourceIndex].InputFile.Repr.(*graph.JSRepr) + ambiguousRef := ambiguousExport.Ref + if imported, ok := ambiguousRepr.Meta.ImportsToBind[ambiguousExport.Ref]; ok { + ambiguousRef = imported.Ref } if mainRef != ambiguousRef { continue nextAlias @@ -1278,90 +1199,253 @@ func (c *linkerContext) scanImportsAndExports() { // Ignore re-exported imports in TypeScript files that failed to be // resolved. These are probably just type-only imports so the best thing to // do is to silently omit them from the export list. - if otherRepr.meta.isProbablyTypeScriptType[export.ref] { + if otherRepr.Meta.IsProbablyTypeScriptType[export.Ref] { continue } aliases = append(aliases, alias) } sort.Strings(aliases) - repr.meta.sortedAndFilteredExportAliases = aliases + repr.Meta.SortedAndFilteredExportAliases = aliases // Export creation uses "sortedAndFilteredExportAliases" so this must // come second after we fill in that array c.createExportsForFile(uint32(sourceIndex)) + waitGroup.Done() }(sourceIndex, repr) } waitGroup.Wait() // Step 6: Bind imports to exports. This adds non-local dependencies on the - // parts that declare the export to all parts that use the import. - for _, sourceIndex := range c.reachableFiles { - file := &c.files[sourceIndex] - repr, ok := file.repr.(*reprJS) + // parts that declare the export to all parts that use the import. Also + // generate wrapper parts for wrapped files. + for _, sourceIndex := range c.graph.ReachableFiles { + file := &c.graph.Files[sourceIndex] + repr, ok := file.InputFile.Repr.(*graph.JSRepr) if !ok { continue } + // Pre-generate symbols for re-exports CommonJS symbols in case they + // are necessary later. This is done now because the symbols map cannot be + // mutated later due to parallelism. + if file.IsEntryPoint() && c.options.OutputFormat == config.FormatESModule { + copies := make([]js_ast.Ref, len(repr.Meta.SortedAndFilteredExportAliases)) + for i, alias := range repr.Meta.SortedAndFilteredExportAliases { + copies[i] = c.graph.GenerateNewSymbol(sourceIndex, js_ast.SymbolOther, "export_"+alias) + } + repr.Meta.CJSExportCopies = copies + } + + // Use "init_*" for ESM wrappers instead of "require_*" + if repr.Meta.Wrap == graph.WrapESM { + c.graph.Symbols.Get(repr.AST.WrapperRef).OriginalName = "init_" + file.InputFile.Source.IdentifierName + } + // If this isn't CommonJS, then rename the unused "exports" and "module" // variables to avoid them causing the identically-named variables in // actual CommonJS files from being renamed. This is purely about // aesthetics and is not about correctness. This is done here because by // this point, we know the CommonJS status will not change further. - if !repr.meta.cjsWrap && !repr.meta.cjsStyleExports && (!file.isEntryPoint || + if repr.Meta.Wrap != graph.WrapCJS && repr.AST.ExportsKind != js_ast.ExportsCommonJS && (!file.IsEntryPoint() || c.options.OutputFormat != config.FormatCommonJS) { - name := file.source.IdentifierName - c.symbols.Get(repr.ast.ExportsRef).OriginalName = name + "_exports" - c.symbols.Get(repr.ast.ModuleRef).OriginalName = name + "_module" + name := file.InputFile.Source.IdentifierName + c.graph.Symbols.Get(repr.AST.ExportsRef).OriginalName = name + "_exports" + c.graph.Symbols.Get(repr.AST.ModuleRef).OriginalName = name + "_module" } // Include the "__export" symbol from the runtime if it was used in the // previous step. The previous step can't do this because it's running in // parallel and can't safely mutate the "importsToBind" map of another file. - if repr.meta.needsExportSymbolFromRuntime || repr.meta.needsMarkAsModuleSymbolFromRuntime { - runtimeRepr := c.files[runtime.SourceIndex].repr.(*reprJS) - exportPart := &repr.ast.Parts[repr.meta.nsExportPartIndex] - if repr.meta.needsExportSymbolFromRuntime { - exportRef := runtimeRepr.ast.ModuleScope.Members["__export"].Ref - c.generateUseOfSymbolForInclude(exportPart, &repr.meta, 1, exportRef, runtime.SourceIndex) + if repr.Meta.NeedsExportSymbolFromRuntime || repr.Meta.NeedsMarkAsModuleSymbolFromRuntime { + runtimeRepr := c.graph.Files[runtime.SourceIndex].InputFile.Repr.(*graph.JSRepr) + if repr.Meta.NeedsExportSymbolFromRuntime { + exportRef := runtimeRepr.AST.ModuleScope.Members["__export"].Ref + c.graph.GenerateSymbolImportAndUse(sourceIndex, repr.Meta.NSExportPartIndex, exportRef, 1, runtime.SourceIndex) } - if repr.meta.needsMarkAsModuleSymbolFromRuntime { - exportRef := runtimeRepr.ast.ModuleScope.Members["__markAsModule"].Ref - c.generateUseOfSymbolForInclude(exportPart, &repr.meta, 1, exportRef, runtime.SourceIndex) + if repr.Meta.NeedsMarkAsModuleSymbolFromRuntime { + markAsModuleRef := runtimeRepr.AST.ModuleScope.Members["__markAsModule"].Ref + c.graph.GenerateSymbolImportAndUse(sourceIndex, repr.Meta.NSExportPartIndex, markAsModuleRef, 1, runtime.SourceIndex) } } - for importRef, importToBind := range repr.meta.importsToBind { - resolvedRepr := c.files[importToBind.sourceIndex].repr.(*reprJS) - partsDeclaringSymbol := resolvedRepr.ast.TopLevelSymbolToParts[importToBind.ref] + for importRef, importData := range repr.Meta.ImportsToBind { + resolvedRepr := c.graph.Files[importData.SourceIndex].InputFile.Repr.(*graph.JSRepr) + partsDeclaringSymbol := resolvedRepr.AST.TopLevelSymbolToParts[importData.Ref] - for _, partIndex := range repr.ast.NamedImports[importRef].LocalPartsWithUses { - partMeta := &repr.meta.partMeta[partIndex] + for _, partIndex := range repr.AST.NamedImports[importRef].LocalPartsWithUses { + part := &repr.AST.Parts[partIndex] + // Depend on the file containing the imported symbol for _, resolvedPartIndex := range partsDeclaringSymbol { - partMeta.nonLocalDependencies = append(partMeta.nonLocalDependencies, partRef{ - sourceIndex: importToBind.sourceIndex, - partIndex: resolvedPartIndex, + part.Dependencies = append(part.Dependencies, js_ast.Dependency{ + SourceIndex: importData.SourceIndex, + PartIndex: resolvedPartIndex, + }) + } + + // Also depend on any files that re-exported this symbol in between the + // file containing the import and the file containing the imported symbol + part.Dependencies = append(part.Dependencies, importData.ReExports...) + } + + // Merge these symbols so they will share the same name + js_ast.MergeSymbols(c.graph.Symbols, importRef, importData.Ref) + } + + // If this is an entry point, depend on all exports so they are included + if file.IsEntryPoint() { + var dependencies []js_ast.Dependency + + for _, alias := range repr.Meta.SortedAndFilteredExportAliases { + export := repr.Meta.ResolvedExports[alias] + targetSourceIndex := export.SourceIndex + targetRef := export.Ref + + // If this is an import, then target what the import points to + targetRepr := c.graph.Files[targetSourceIndex].InputFile.Repr.(*graph.JSRepr) + if importData, ok := targetRepr.Meta.ImportsToBind[targetRef]; ok { + targetSourceIndex = importData.SourceIndex + targetRef = importData.Ref + targetRepr = c.graph.Files[targetSourceIndex].InputFile.Repr.(*graph.JSRepr) + dependencies = append(dependencies, importData.ReExports...) + } + + // Pull in all declarations of this symbol + for _, partIndex := range targetRepr.AST.TopLevelSymbolToParts[targetRef] { + dependencies = append(dependencies, js_ast.Dependency{ + SourceIndex: targetSourceIndex, + PartIndex: partIndex, }) } } - // Merge these symbols so they will share the same name - js_ast.MergeSymbols(c.symbols, importRef, importToBind.ref) + // Ensure "exports" is included if the current output format needs it + if repr.Meta.ForceIncludeExportsForEntryPoint { + dependencies = append(dependencies, js_ast.Dependency{ + SourceIndex: sourceIndex, + PartIndex: repr.Meta.NSExportPartIndex, + }) + } + + // Include the wrapper if present + if repr.Meta.Wrap != graph.WrapNone { + dependencies = append(dependencies, js_ast.Dependency{ + SourceIndex: sourceIndex, + PartIndex: repr.Meta.WrapperPartIndex.GetIndex(), + }) + } + + // Represent these constraints with a dummy part + entryPointPartIndex := c.graph.AddPartToFile(sourceIndex, js_ast.Part{ + Dependencies: dependencies, + CanBeRemovedIfUnused: false, + }) + repr.Meta.EntryPointPartIndex = ast.MakeIndex32(entryPointPartIndex) + } + + // Encode import-specific constraints in the dependency graph + for partIndex, part := range repr.AST.Parts { + toModuleUses := uint32(0) + + // Imports of wrapped files must depend on the wrapper + for _, importRecordIndex := range part.ImportRecordIndices { + record := &repr.AST.ImportRecords[importRecordIndex] + + // Don't follow external imports (this includes import() expressions) + if !record.SourceIndex.IsValid() || c.isExternalDynamicImport(record, sourceIndex) { + // This is an external import, so it needs the "__toModule" wrapper as + // long as it's not a bare "require()" + if record.Kind != ast.ImportRequire && (!c.options.OutputFormat.KeepES6ImportExportSyntax() || + (record.Kind == ast.ImportDynamic && c.options.UnsupportedJSFeatures.Has(compat.DynamicImport))) { + record.WrapWithToModule = true + toModuleUses++ + } + continue + } + + otherSourceIndex := record.SourceIndex.GetIndex() + otherRepr := c.graph.Files[otherSourceIndex].InputFile.Repr.(*graph.JSRepr) + + if otherRepr.Meta.Wrap != graph.WrapNone { + // Depend on the automatically-generated require wrapper symbol + wrapperRef := otherRepr.AST.WrapperRef + c.graph.GenerateSymbolImportAndUse(sourceIndex, uint32(partIndex), wrapperRef, 1, otherSourceIndex) + + // This is an ES6 import of a CommonJS module, so it needs the + // "__toModule" wrapper as long as it's not a bare "require()" + if record.Kind != ast.ImportRequire && otherRepr.AST.ExportsKind == js_ast.ExportsCommonJS { + record.WrapWithToModule = true + toModuleUses++ + } + + // If this is an ESM wrapper, also depend on the exports object + // since the final code will contain an inline reference to it. + // This must be done for "require()" and "import()" expressions + // but does not need to be done for "import" statements since + // those just cause us to reference the exports directly. + if otherRepr.Meta.Wrap == graph.WrapESM && record.Kind != ast.ImportStmt { + c.graph.GenerateSymbolImportAndUse(sourceIndex, uint32(partIndex), otherRepr.AST.ExportsRef, 1, otherSourceIndex) + } + } else if record.Kind == ast.ImportStmt && otherRepr.AST.ExportsKind == js_ast.ExportsESMWithDynamicFallback { + // This is an import of a module that has a dynamic export fallback + // object. In that case we need to depend on that object in case + // something ends up needing to use it later. This could potentially + // be omitted in some cases with more advanced analysis if this + // dynamic export fallback object doesn't end up being needed. + c.graph.GenerateSymbolImportAndUse(sourceIndex, uint32(partIndex), otherRepr.AST.ExportsRef, 1, otherSourceIndex) + } + } + + // If there's an ES6 import of a non-ES6 module, then we're going to need the + // "__toModule" symbol from the runtime to wrap the result of "require()" + c.graph.GenerateRuntimeSymbolImportAndUse(sourceIndex, uint32(partIndex), "__toModule", toModuleUses) + + // If there's an ES6 export star statement of a non-ES6 module, then we're + // going to need the "__reExport" symbol from the runtime + reExportUses := uint32(0) + for _, importRecordIndex := range repr.AST.ExportStarImportRecords { + record := &repr.AST.ImportRecords[importRecordIndex] + + // Is this export star evaluated at run time? + happensAtRunTime := !record.SourceIndex.IsValid() && (!file.IsEntryPoint() || !c.options.OutputFormat.KeepES6ImportExportSyntax()) + if record.SourceIndex.IsValid() { + otherSourceIndex := record.SourceIndex.GetIndex() + otherRepr := c.graph.Files[otherSourceIndex].InputFile.Repr.(*graph.JSRepr) + if otherSourceIndex != sourceIndex && otherRepr.AST.ExportsKind.IsDynamic() { + happensAtRunTime = true + } + if otherRepr.AST.ExportsKind == js_ast.ExportsESMWithDynamicFallback { + // This looks like "__reExport(exports_a, exports_b)". Make sure to + // pull in the "exports_b" symbol into this export star. This matters + // in code splitting situations where the "export_b" symbol might live + // in a different chunk than this export star. + c.graph.GenerateSymbolImportAndUse(sourceIndex, uint32(partIndex), otherRepr.AST.ExportsRef, 1, otherSourceIndex) + } + } + if happensAtRunTime { + // Depend on this file's "exports" object for the first argument to "__reExport" + c.graph.GenerateSymbolImportAndUse(sourceIndex, uint32(partIndex), repr.AST.ExportsRef, 1, sourceIndex) + record.CallsRunTimeReExportFn = true + repr.AST.UsesExportsRef = true + reExportUses++ + } + } + c.graph.GenerateRuntimeSymbolImportAndUse(sourceIndex, uint32(partIndex), "__reExport", reExportUses) } } } func (c *linkerContext) generateCodeForLazyExport(sourceIndex uint32) { - file := &c.files[sourceIndex] - repr := file.repr.(*reprJS) + file := &c.graph.Files[sourceIndex] + repr := file.InputFile.Repr.(*graph.JSRepr) // Grab the lazy expression - if len(repr.ast.Parts) < 1 { + if len(repr.AST.Parts) < 1 { panic("Internal error") } - part := &repr.ast.Parts[0] + part := &repr.AST.Parts[0] if len(part.Stmts) != 1 { panic("Internal error") } @@ -1371,17 +1455,16 @@ func (c *linkerContext) generateCodeForLazyExport(sourceIndex uint32) { } // Use "module.exports = value" for CommonJS-style modules - if repr.meta.cjsStyleExports { + if repr.AST.ExportsKind == js_ast.ExportsCommonJS { part.Stmts = []js_ast.Stmt{js_ast.AssignStmt( js_ast.Expr{Loc: lazy.Value.Loc, Data: &js_ast.EDot{ - Target: js_ast.Expr{Loc: lazy.Value.Loc, Data: &js_ast.EIdentifier{Ref: repr.ast.ModuleRef}}, + Target: js_ast.Expr{Loc: lazy.Value.Loc, Data: &js_ast.EIdentifier{Ref: repr.AST.ModuleRef}}, Name: "exports", NameLoc: lazy.Value.Loc, }}, lazy.Value, )} - part.SymbolUses[repr.ast.ModuleRef] = js_ast.SymbolUse{CountEstimate: 1} - repr.ast.UsesModuleRef = true + c.graph.GenerateSymbolImportAndUse(sourceIndex, 0, repr.AST.ModuleRef, 1, sourceIndex) return } @@ -1394,12 +1477,9 @@ func (c *linkerContext) generateCodeForLazyExport(sourceIndex uint32) { partIndex uint32 } - generateExport := func(name string, alias string, value js_ast.Expr, prevExports []prevExport) prevExport { + generateExport := func(name string, alias string, value js_ast.Expr) prevExport { // Generate a new symbol - inner := &c.symbols.Outer[sourceIndex] - ref := js_ast.Ref{OuterIndex: sourceIndex, InnerIndex: uint32(len(*inner))} - *inner = append(*inner, js_ast.Symbol{Kind: js_ast.SymbolOther, OriginalName: name, Link: js_ast.InvalidRef}) - repr.ast.ModuleScope.Generated = append(repr.ast.ModuleScope.Generated, ref) + ref := c.graph.GenerateNewSymbol(sourceIndex, js_ast.SymbolOther, name) // Generate an ES6 export var stmt js_ast.Stmt @@ -1419,41 +1499,40 @@ func (c *linkerContext) generateCodeForLazyExport(sourceIndex uint32) { } // Link the export into the graph for tree shaking - partIndex := c.addPartToFile(sourceIndex, js_ast.Part{ + partIndex := c.graph.AddPartToFile(sourceIndex, js_ast.Part{ Stmts: []js_ast.Stmt{stmt}, - SymbolUses: map[js_ast.Ref]js_ast.SymbolUse{repr.ast.ModuleRef: {CountEstimate: 1}}, DeclaredSymbols: []js_ast.DeclaredSymbol{{Ref: ref, IsTopLevel: true}}, CanBeRemovedIfUnused: true, - }, partMeta{}) - repr.ast.TopLevelSymbolToParts[ref] = []uint32{partIndex} - repr.meta.resolvedExports[alias] = exportData{ref: ref, sourceIndex: sourceIndex} - part := &repr.ast.Parts[partIndex] - for _, export := range prevExports { - part.SymbolUses[export.ref] = js_ast.SymbolUse{CountEstimate: 1} - part.LocalDependencies[export.partIndex] = true - } + }) + c.graph.GenerateSymbolImportAndUse(sourceIndex, partIndex, repr.AST.ModuleRef, 1, sourceIndex) + repr.Meta.ResolvedExports[alias] = graph.ExportData{Ref: ref, SourceIndex: sourceIndex} return prevExport{ref: ref, partIndex: partIndex} } // Unwrap JSON objects into separate top-level variables - var prevExports []prevExport + var prevExports []js_ast.Ref jsonValue := lazy.Value if object, ok := jsonValue.Data.(*js_ast.EObject); ok { clone := *object clone.Properties = append(make([]js_ast.Property, 0, len(clone.Properties)), clone.Properties...) for i, property := range clone.Properties { - if str, ok := property.Key.Data.(*js_ast.EString); ok && (!file.isEntryPoint || js_lexer.IsIdentifierUTF16(str.Value)) { + if str, ok := property.Key.Data.(*js_ast.EString); ok && (!file.IsEntryPoint() || js_lexer.IsIdentifierUTF16(str.Value)) { name := js_lexer.UTF16ToString(str.Value) - export := generateExport(name, name, *property.Value, nil) - prevExports = append(prevExports, export) - clone.Properties[i].Value = &js_ast.Expr{Loc: property.Key.Loc, Data: &js_ast.EIdentifier{Ref: export.ref}} + exportRef := generateExport(name, name, *property.Value).ref + prevExports = append(prevExports, exportRef) + clone.Properties[i].Value = &js_ast.Expr{Loc: property.Key.Loc, Data: &js_ast.EIdentifier{Ref: exportRef}} } } jsonValue.Data = &clone } // Generate the default export - generateExport(file.source.IdentifierName+"_default", "default", jsonValue, prevExports) + finalExportPartIndex := generateExport(file.InputFile.Source.IdentifierName+"_default", "default", jsonValue).partIndex + + // The default export depends on all of the previous exports + for _, exportRef := range prevExports { + c.graph.GenerateSymbolImportAndUse(sourceIndex, finalExportPartIndex, exportRef, 1, sourceIndex) + } } func (c *linkerContext) createExportsForFile(sourceIndex uint32) { @@ -1462,139 +1541,33 @@ func (c *linkerContext) createExportsForFile(sourceIndex uint32) { // for other files within this method or you will create a data race. //////////////////////////////////////////////////////////////////////////////// - var entryPointES6ExportItems []js_ast.ClauseItem - var entryPointExportStmts []js_ast.Stmt - file := &c.files[sourceIndex] - repr := file.repr.(*reprJS) - - // If the output format is ES6 modules and we're an entry point, generate an - // ES6 export statement containing all exports. Except don't do that if this - // entry point is a CommonJS-style module, since that would generate an ES6 - // export statement that's not top-level. Instead, we will export the CommonJS - // exports as a default export later on. - needsEntryPointES6ExportPart := file.isEntryPoint && !repr.meta.cjsWrap && - c.options.OutputFormat == config.FormatESModule && len(repr.meta.sortedAndFilteredExportAliases) > 0 + file := &c.graph.Files[sourceIndex] + repr := file.InputFile.Repr.(*graph.JSRepr) // Generate a getter per export properties := []js_ast.Property{} - nsExportNonLocalDependencies := []partRef{} - entryPointExportNonLocalDependencies := []partRef{} + nsExportDependencies := []js_ast.Dependency{} nsExportSymbolUses := make(map[js_ast.Ref]js_ast.SymbolUse) - entryPointExportSymbolUses := make(map[js_ast.Ref]js_ast.SymbolUse) - for _, alias := range repr.meta.sortedAndFilteredExportAliases { - export := repr.meta.resolvedExports[alias] + for _, alias := range repr.Meta.SortedAndFilteredExportAliases { + export := repr.Meta.ResolvedExports[alias] // If this is an export of an import, reference the symbol that the import // was eventually resolved to. We need to do this because imports have // already been resolved by this point, so we can't generate a new import // and have that be resolved later. - if importToBind, ok := c.files[export.sourceIndex].repr.(*reprJS).meta.importsToBind[export.ref]; ok { - export.ref = importToBind.ref - export.sourceIndex = importToBind.sourceIndex + if importData, ok := c.graph.Files[export.SourceIndex].InputFile.Repr.(*graph.JSRepr).Meta.ImportsToBind[export.Ref]; ok { + export.Ref = importData.Ref + export.SourceIndex = importData.SourceIndex + nsExportDependencies = append(nsExportDependencies, importData.ReExports...) } // Exports of imports need EImportIdentifier in case they need to be re- // written to a property access later on var value js_ast.Expr - if c.symbols.Get(export.ref).NamespaceAlias != nil { - value = js_ast.Expr{Data: &js_ast.EImportIdentifier{Ref: export.ref}} - - // Imported identifiers must be assigned to a local variable to be - // exported using an ES6 export clause. The import needs to be an - // EImportIdentifier in case it's imported from a CommonJS module. - if needsEntryPointES6ExportPart { - // Generate a temporary variable - inner := &c.symbols.Outer[sourceIndex] - tempRef := js_ast.Ref{OuterIndex: sourceIndex, InnerIndex: uint32(len(*inner))} - *inner = append(*inner, js_ast.Symbol{ - Kind: js_ast.SymbolOther, - OriginalName: "export_" + alias, - Link: js_ast.InvalidRef, - }) - - // Stick it on the module scope so it gets renamed and minified - generated := &repr.ast.ModuleScope.Generated - *generated = append(*generated, tempRef) - - // Create both a local variable and an export clause for that variable. - // The local variable is initialized with the initial value of the - // export. This isn't fully correct because it's a "dead" binding and - // doesn't update with the "live" value as it changes. But ES6 modules - // don't have any syntax for bare named getter functions so this is the - // best we can do. - // - // These input files: - // - // // entry_point.js - // export {foo} from './cjs-format.js' - // - // // cjs-format.js - // Object.defineProperty(exports, 'foo', { - // enumerable: true, - // get: () => Math.random(), - // }) - // - // Become this output file: - // - // // cjs-format.js - // var require_cjs_format = __commonJS((exports) => { - // Object.defineProperty(exports, "foo", { - // enumerable: true, - // get: () => Math.random() - // }); - // }); - // - // // entry_point.js - // var cjs_format = __toModule(require_cjs_format()); - // var export_foo = cjs_format.foo; - // export { - // export_foo as foo - // }; - // - entryPointExportStmts = append(entryPointExportStmts, js_ast.Stmt{Data: &js_ast.SLocal{ - Decls: []js_ast.Decl{{ - Binding: js_ast.Binding{Data: &js_ast.BIdentifier{Ref: tempRef}}, - Value: &js_ast.Expr{Data: &js_ast.EImportIdentifier{Ref: export.ref}}, - }}, - }}) - entryPointES6ExportItems = append(entryPointES6ExportItems, js_ast.ClauseItem{ - Name: js_ast.LocRef{Ref: tempRef}, - Alias: alias, - }) - entryPointExportSymbolUses[tempRef] = js_ast.SymbolUse{CountEstimate: 2} - } + if c.graph.Symbols.Get(export.Ref).NamespaceAlias != nil { + value = js_ast.Expr{Data: &js_ast.EImportIdentifier{Ref: export.Ref}} } else { - value = js_ast.Expr{Data: &js_ast.EIdentifier{Ref: export.ref}} - - if needsEntryPointES6ExportPart { - // Local identifiers can be exported using an export clause. This is done - // this way instead of leaving the "export" keyword on the local declaration - // itself both because it lets the local identifier be minified and because - // it works transparently for re-exports across files. - // - // These input files: - // - // // entry_point.js - // export * from './esm-format.js' - // - // // esm-format.js - // export let foo = 123 - // - // Become this output file: - // - // // esm-format.js - // let foo = 123; - // - // // entry_point.js - // export { - // foo - // }; - // - entryPointES6ExportItems = append(entryPointES6ExportItems, js_ast.ClauseItem{ - Name: js_ast.LocRef{Ref: export.ref}, - Alias: alias, - }) - } + value = js_ast.Expr{Data: &js_ast.EIdentifier{Ref: export.Ref}} } // Add a getter property @@ -1609,33 +1582,29 @@ func (c *linkerContext) createExportsForFile(sourceIndex uint32) { Key: js_ast.Expr{Data: &js_ast.EString{Value: js_lexer.StringToUTF16(alias)}}, Value: &getter, }) - nsExportSymbolUses[export.ref] = js_ast.SymbolUse{CountEstimate: 1} - if file.isEntryPoint { - entryPointExportSymbolUses[export.ref] = js_ast.SymbolUse{CountEstimate: 1} - } + nsExportSymbolUses[export.Ref] = js_ast.SymbolUse{CountEstimate: 1} // Make sure the part that declares the export is included - for _, partIndex := range c.files[export.sourceIndex].repr.(*reprJS).ast.TopLevelSymbolToParts[export.ref] { + for _, partIndex := range c.graph.Files[export.SourceIndex].InputFile.Repr.(*graph.JSRepr).AST.TopLevelSymbolToParts[export.Ref] { // Use a non-local dependency since this is likely from a different // file if it came in through an export star - dep := partRef{sourceIndex: export.sourceIndex, partIndex: partIndex} - nsExportNonLocalDependencies = append(nsExportNonLocalDependencies, dep) - if file.isEntryPoint { - entryPointExportNonLocalDependencies = append(entryPointExportNonLocalDependencies, dep) - } + nsExportDependencies = append(nsExportDependencies, js_ast.Dependency{ + SourceIndex: export.SourceIndex, + PartIndex: partIndex, + }) } } // Prefix this part with "var exports = {}" if this isn't a CommonJS module declaredSymbols := []js_ast.DeclaredSymbol{} var nsExportStmts []js_ast.Stmt - if !repr.meta.cjsStyleExports && (!file.isEntryPoint || c.options.OutputFormat != config.FormatCommonJS) { + if repr.AST.ExportsKind != js_ast.ExportsCommonJS && (!file.IsEntryPoint() || c.options.OutputFormat != config.FormatCommonJS) { nsExportStmts = append(nsExportStmts, js_ast.Stmt{Data: &js_ast.SLocal{Decls: []js_ast.Decl{{ - Binding: js_ast.Binding{Data: &js_ast.BIdentifier{Ref: repr.ast.ExportsRef}}, + Binding: js_ast.Binding{Data: &js_ast.BIdentifier{Ref: repr.AST.ExportsRef}}, Value: &js_ast.Expr{Data: &js_ast.EObject{}}, }}}}) declaredSymbols = append(declaredSymbols, js_ast.DeclaredSymbol{ - Ref: repr.ast.ExportsRef, + Ref: repr.AST.ExportsRef, IsTopLevel: true, }) } @@ -1644,33 +1613,38 @@ func (c *linkerContext) createExportsForFile(sourceIndex uint32) { // "__markAsModule" which sets the "__esModule" property to true. This must // be done before any to "require()" or circular imports of multiple modules // that have been each converted from ESM to CommonJS may not work correctly. - if repr.ast.HasES6Exports && (repr.meta.cjsStyleExports || (file.isEntryPoint && c.options.OutputFormat == config.FormatCommonJS)) { - runtimeRepr := c.files[runtime.SourceIndex].repr.(*reprJS) - markAsModuleRef := runtimeRepr.ast.ModuleScope.Members["__markAsModule"].Ref + if repr.AST.ExportKeyword.Len > 0 && (repr.AST.ExportsKind == js_ast.ExportsCommonJS || + (file.IsEntryPoint() && c.options.OutputFormat == config.FormatCommonJS)) { + runtimeRepr := c.graph.Files[runtime.SourceIndex].InputFile.Repr.(*graph.JSRepr) + markAsModuleRef := runtimeRepr.AST.ModuleScope.Members["__markAsModule"].Ref nsExportStmts = append(nsExportStmts, js_ast.Stmt{Data: &js_ast.SExpr{Value: js_ast.Expr{Data: &js_ast.ECall{ Target: js_ast.Expr{Data: &js_ast.EIdentifier{Ref: markAsModuleRef}}, - Args: []js_ast.Expr{{Data: &js_ast.EIdentifier{Ref: repr.ast.ExportsRef}}}, + Args: []js_ast.Expr{{Data: &js_ast.EIdentifier{Ref: repr.AST.ExportsRef}}}, }}}}) // Make sure this file depends on the "__markAsModule" symbol - for _, partIndex := range runtimeRepr.ast.TopLevelSymbolToParts[markAsModuleRef] { - dep := partRef{sourceIndex: runtime.SourceIndex, partIndex: partIndex} - nsExportNonLocalDependencies = append(nsExportNonLocalDependencies, dep) + for _, partIndex := range runtimeRepr.AST.TopLevelSymbolToParts[markAsModuleRef] { + nsExportDependencies = append(nsExportDependencies, js_ast.Dependency{ + SourceIndex: runtime.SourceIndex, + PartIndex: partIndex, + }) } - // Pull in the "__markAsModule" symbol later - repr.meta.needsMarkAsModuleSymbolFromRuntime = true + // Pull in the "__markAsModule" symbol later. Also make sure the "exports" + // variable is marked as used because we used it above. + repr.Meta.NeedsMarkAsModuleSymbolFromRuntime = true + repr.AST.UsesExportsRef = true } // "__export(exports, { foo: () => foo })" exportRef := js_ast.InvalidRef if len(properties) > 0 { - runtimeRepr := c.files[runtime.SourceIndex].repr.(*reprJS) - exportRef = runtimeRepr.ast.ModuleScope.Members["__export"].Ref + runtimeRepr := c.graph.Files[runtime.SourceIndex].InputFile.Repr.(*graph.JSRepr) + exportRef = runtimeRepr.AST.ModuleScope.Members["__export"].Ref nsExportStmts = append(nsExportStmts, js_ast.Stmt{Data: &js_ast.SExpr{Value: js_ast.Expr{Data: &js_ast.ECall{ Target: js_ast.Expr{Data: &js_ast.EIdentifier{Ref: exportRef}}, Args: []js_ast.Expr{ - {Data: &js_ast.EIdentifier{Ref: repr.ast.ExportsRef}}, + {Data: &js_ast.EIdentifier{Ref: repr.AST.ExportsRef}}, {Data: &js_ast.EObject{ Properties: properties, }}, @@ -1678,124 +1652,131 @@ func (c *linkerContext) createExportsForFile(sourceIndex uint32) { }}}}) // Make sure this file depends on the "__export" symbol - for _, partIndex := range runtimeRepr.ast.TopLevelSymbolToParts[exportRef] { - dep := partRef{sourceIndex: runtime.SourceIndex, partIndex: partIndex} - nsExportNonLocalDependencies = append(nsExportNonLocalDependencies, dep) + for _, partIndex := range runtimeRepr.AST.TopLevelSymbolToParts[exportRef] { + nsExportDependencies = append(nsExportDependencies, js_ast.Dependency{ + SourceIndex: runtime.SourceIndex, + PartIndex: partIndex, + }) } // Make sure the CommonJS closure, if there is one, includes "exports" - repr.ast.UsesExportsRef = true + repr.AST.UsesExportsRef = true } // No need to generate a part if it'll be empty if len(nsExportStmts) > 0 { // Initialize the part that was allocated for us earlier. The information // here will be used after this during tree shaking. - exportPart := &repr.ast.Parts[repr.meta.nsExportPartIndex] - *exportPart = js_ast.Part{ - Stmts: nsExportStmts, - LocalDependencies: make(map[uint32]bool), - SymbolUses: nsExportSymbolUses, - DeclaredSymbols: declaredSymbols, + repr.AST.Parts[repr.Meta.NSExportPartIndex] = js_ast.Part{ + Stmts: nsExportStmts, + SymbolUses: nsExportSymbolUses, + Dependencies: nsExportDependencies, + DeclaredSymbols: declaredSymbols, - // This can be removed if nothing uses it. Except if we're a CommonJS - // module, in which case it's always necessary. - CanBeRemovedIfUnused: !repr.meta.cjsStyleExports, - - // Put the export definitions first before anything else gets evaluated - IsNamespaceExport: true, + // This can be removed if nothing uses it + CanBeRemovedIfUnused: true, // Make sure this is trimmed if unused even if tree shaking is disabled ForceTreeShaking: true, } - repr.meta.partMeta[repr.meta.nsExportPartIndex].nonLocalDependencies = nsExportNonLocalDependencies // Pull in the "__export" symbol if it was used if exportRef != js_ast.InvalidRef { - repr.meta.needsExportSymbolFromRuntime = true + repr.Meta.NeedsExportSymbolFromRuntime = true } } +} - if len(entryPointES6ExportItems) > 0 { - entryPointExportStmts = append(entryPointExportStmts, - js_ast.Stmt{Data: &js_ast.SExportClause{Items: entryPointES6ExportItems}}) - } +func (c *linkerContext) createWrapperForFile(sourceIndex uint32) { + repr := c.graph.Files[sourceIndex].InputFile.Repr.(*graph.JSRepr) - // If we're an entry point, call the require function at the end of the - // bundle right before bundle evaluation ends - var cjsWrapStmt js_ast.Stmt - if file.isEntryPoint { - if repr.meta.cjsWrap { - switch c.options.OutputFormat { - case config.FormatPreserve: - // "require_foo();" - cjsWrapStmt = js_ast.Stmt{Data: &js_ast.SExpr{Value: js_ast.Expr{Data: &js_ast.ECall{ - Target: js_ast.Expr{Data: &js_ast.EIdentifier{Ref: repr.ast.WrapperRef}}, - }}}} + switch repr.Meta.Wrap { + // If this is a CommonJS file, we're going to need to generate a wrapper + // for the CommonJS closure. That will end up looking something like this: + // + // var require_foo = __commonJS((exports, module) => { + // ... + // }); + // + // However, that generation is special-cased for various reasons and is + // done later on. Still, we're going to need to ensure that this file + // both depends on the "__commonJS" symbol and declares the "require_foo" + // symbol. Instead of special-casing this during the reachablity analysis + // below, we just append a dummy part to the end of the file with these + // dependencies and let the general-purpose reachablity analysis take care + // of it. + case graph.WrapCJS: + runtimeRepr := c.graph.Files[runtime.SourceIndex].InputFile.Repr.(*graph.JSRepr) + commonJSRef := runtimeRepr.AST.NamedExports["__commonJS"].Ref + commonJSParts := runtimeRepr.AST.TopLevelSymbolToParts[commonJSRef] - case config.FormatIIFE: - if len(c.options.GlobalName) > 0 { - // "return require_foo();" - cjsWrapStmt = js_ast.Stmt{Data: &js_ast.SReturn{Value: &js_ast.Expr{Data: &js_ast.ECall{ - Target: js_ast.Expr{Data: &js_ast.EIdentifier{Ref: repr.ast.WrapperRef}}, - }}}} - } else { - // "require_foo();" - cjsWrapStmt = js_ast.Stmt{Data: &js_ast.SExpr{Value: js_ast.Expr{Data: &js_ast.ECall{ - Target: js_ast.Expr{Data: &js_ast.EIdentifier{Ref: repr.ast.WrapperRef}}, - }}}} - } - - case config.FormatCommonJS: - // "module.exports = require_foo();" - cjsWrapStmt = js_ast.AssignStmt( - js_ast.Expr{Data: &js_ast.EDot{ - Target: js_ast.Expr{Data: &js_ast.EIdentifier{Ref: c.unboundModuleRef}}, - Name: "exports", - }}, - js_ast.Expr{Data: &js_ast.ECall{ - Target: js_ast.Expr{Data: &js_ast.EIdentifier{Ref: repr.ast.WrapperRef}}, - }}, - ) - - case config.FormatESModule: - // "export default require_foo();" - cjsWrapStmt = js_ast.Stmt{Data: &js_ast.SExportDefault{Value: js_ast.ExprOrStmt{Expr: &js_ast.Expr{Data: &js_ast.ECall{ - Target: js_ast.Expr{Data: &js_ast.EIdentifier{Ref: repr.ast.WrapperRef}}, - }}}}} + // Generate the dummy part + dependencies := make([]js_ast.Dependency, len(commonJSParts)) + for i, partIndex := range commonJSParts { + dependencies[i] = js_ast.Dependency{ + SourceIndex: runtime.SourceIndex, + PartIndex: partIndex, } - } else if repr.meta.forceIncludeExportsForEntryPoint && c.options.OutputFormat == config.FormatIIFE && len(c.options.GlobalName) > 0 { - // "return exports;" - cjsWrapStmt = js_ast.Stmt{Data: &js_ast.SReturn{Value: &js_ast.Expr{Data: &js_ast.EIdentifier{Ref: repr.ast.ExportsRef}}}} } - } - - if len(entryPointExportStmts) > 0 || cjsWrapStmt.Data != nil { - // Trigger evaluation of the CommonJS wrapper - if cjsWrapStmt.Data != nil { - entryPointExportSymbolUses[repr.ast.WrapperRef] = js_ast.SymbolUse{CountEstimate: 1} - entryPointExportStmts = append(entryPointExportStmts, cjsWrapStmt) - } - - // Add a part for this export clause - partIndex := c.addPartToFile(sourceIndex, js_ast.Part{ - Stmts: entryPointExportStmts, - SymbolUses: entryPointExportSymbolUses, - }, partMeta{ - nonLocalDependencies: append([]partRef{}, entryPointExportNonLocalDependencies...), + partIndex := c.graph.AddPartToFile(sourceIndex, js_ast.Part{ + SymbolUses: map[js_ast.Ref]js_ast.SymbolUse{ + repr.AST.WrapperRef: {CountEstimate: 1}, + }, + DeclaredSymbols: []js_ast.DeclaredSymbol{ + {Ref: repr.AST.ExportsRef, IsTopLevel: true}, + {Ref: repr.AST.ModuleRef, IsTopLevel: true}, + {Ref: repr.AST.WrapperRef, IsTopLevel: true}, + }, + Dependencies: dependencies, }) - repr.meta.entryPointExportPartIndex = &partIndex + repr.Meta.WrapperPartIndex = ast.MakeIndex32(partIndex) + c.graph.GenerateSymbolImportAndUse(sourceIndex, partIndex, commonJSRef, 1, runtime.SourceIndex) + + // If this is a lazily-initialized ESM file, we're going to need to + // generate a wrapper for the ESM closure. That will end up looking + // something like this: + // + // var init_foo = __esm(() => { + // ... + // }); + // + // This depends on the "__esm" symbol and declares the "init_foo" symbol + // for similar reasons to the CommonJS closure above. + case graph.WrapESM: + runtimeRepr := c.graph.Files[runtime.SourceIndex].InputFile.Repr.(*graph.JSRepr) + esmRef := runtimeRepr.AST.NamedExports["__esm"].Ref + esmParts := runtimeRepr.AST.TopLevelSymbolToParts[esmRef] + + // Generate the dummy part + dependencies := make([]js_ast.Dependency, len(esmParts)) + for i, partIndex := range esmParts { + dependencies[i] = js_ast.Dependency{ + SourceIndex: runtime.SourceIndex, + PartIndex: partIndex, + } + } + partIndex := c.graph.AddPartToFile(sourceIndex, js_ast.Part{ + SymbolUses: map[js_ast.Ref]js_ast.SymbolUse{ + repr.AST.WrapperRef: {CountEstimate: 1}, + }, + DeclaredSymbols: []js_ast.DeclaredSymbol{ + {Ref: repr.AST.WrapperRef, IsTopLevel: true}, + }, + Dependencies: dependencies, + }) + repr.Meta.WrapperPartIndex = ast.MakeIndex32(partIndex) + c.graph.GenerateSymbolImportAndUse(sourceIndex, partIndex, esmRef, 1, runtime.SourceIndex) } } func (c *linkerContext) matchImportsWithExportsForFile(sourceIndex uint32) { - file := &c.files[sourceIndex] - repr := file.repr.(*reprJS) + file := &c.graph.Files[sourceIndex] + repr := file.InputFile.Repr.(*graph.JSRepr) // Sort imports for determinism. Otherwise our unit tests will randomly // fail sometimes when error messages are reordered. - sortedImportRefs := make([]int, 0, len(repr.ast.NamedImports)) - for ref := range repr.ast.NamedImports { + sortedImportRefs := make([]int, 0, len(repr.AST.NamedImports)) + for ref := range repr.AST.NamedImports { sortedImportRefs = append(sortedImportRefs, int(ref.InnerIndex)) } sort.Ints(sortedImportRefs) @@ -1805,56 +1786,60 @@ func (c *linkerContext) matchImportsWithExportsForFile(sourceIndex uint32) { // Re-use memory for the cycle detector c.cycleDetector = c.cycleDetector[:0] - importRef := js_ast.Ref{OuterIndex: sourceIndex, InnerIndex: uint32(innerIndex)} - result := c.matchImportWithExport(importTracker{sourceIndex: sourceIndex, importRef: importRef}) + importRef := js_ast.Ref{SourceIndex: sourceIndex, InnerIndex: uint32(innerIndex)} + result, reExports := c.matchImportWithExport(importTracker{sourceIndex: sourceIndex, importRef: importRef}, nil) switch result.kind { + case matchImportIgnore: + case matchImportNormal: - repr.meta.importsToBind[importRef] = importToBind{ - sourceIndex: result.sourceIndex, - ref: result.ref, + repr.Meta.ImportsToBind[importRef] = graph.ImportData{ + ReExports: reExports, + SourceIndex: result.sourceIndex, + Ref: result.ref, } case matchImportNamespace: - c.symbols.Get(importRef).NamespaceAlias = &js_ast.NamespaceAlias{ + c.graph.Symbols.Get(importRef).NamespaceAlias = &js_ast.NamespaceAlias{ NamespaceRef: result.namespaceRef, Alias: result.alias, } case matchImportNormalAndNamespace: - repr.meta.importsToBind[importRef] = importToBind{ - sourceIndex: result.sourceIndex, - ref: result.ref, + repr.Meta.ImportsToBind[importRef] = graph.ImportData{ + ReExports: reExports, + SourceIndex: result.sourceIndex, + Ref: result.ref, } - c.symbols.Get(importRef).NamespaceAlias = &js_ast.NamespaceAlias{ + c.graph.Symbols.Get(importRef).NamespaceAlias = &js_ast.NamespaceAlias{ NamespaceRef: result.namespaceRef, Alias: result.alias, } case matchImportCycle: - namedImport := repr.ast.NamedImports[importRef] - c.addRangeError(file.source, js_lexer.RangeOfIdentifier(file.source, namedImport.AliasLoc), + namedImport := repr.AST.NamedImports[importRef] + c.log.AddRangeError(&file.InputFile.Source, js_lexer.RangeOfIdentifier(file.InputFile.Source, namedImport.AliasLoc), fmt.Sprintf("Detected cycle while resolving import %q", namedImport.Alias)) case matchImportProbablyTypeScriptType: - repr.meta.isProbablyTypeScriptType[importRef] = true + repr.Meta.IsProbablyTypeScriptType[importRef] = true case matchImportAmbiguous: - namedImport := repr.ast.NamedImports[importRef] - r := js_lexer.RangeOfIdentifier(file.source, namedImport.AliasLoc) + namedImport := repr.AST.NamedImports[importRef] + r := js_lexer.RangeOfIdentifier(file.InputFile.Source, namedImport.AliasLoc) var notes []logger.MsgData // Provide the locations of both ambiguous exports if possible if result.nameLoc.Start != 0 && result.otherNameLoc.Start != 0 { - a := c.files[result.sourceIndex].source - b := c.files[result.otherSourceIndex].source + a := c.graph.Files[result.sourceIndex].InputFile.Source + b := c.graph.Files[result.otherSourceIndex].InputFile.Source notes = []logger.MsgData{ logger.RangeData(&a, js_lexer.RangeOfIdentifier(a, result.nameLoc), "One matching export is here"), logger.RangeData(&b, js_lexer.RangeOfIdentifier(b, result.otherNameLoc), "Another matching export is here"), } } - symbol := c.symbols.Get(importRef) + symbol := c.graph.Symbols.Get(importRef) if symbol.ImportItemStatus == js_ast.ImportItemGenerated { // This is a warning instead of an error because although it appears // to be a named import, it's actually an automatically-generated @@ -1865,10 +1850,10 @@ func (c *linkerContext) matchImportsWithExportsForFile(sourceIndex uint32) { // "undefined" instead of emitting an error. symbol.ImportItemStatus = js_ast.ImportItemMissing msg := fmt.Sprintf("Import %q will always be undefined because there are multiple matching exports", namedImport.Alias) - c.log.AddRangeWarningWithNotes(&file.source, r, msg, notes) + c.log.AddRangeWarningWithNotes(&file.InputFile.Source, r, msg, notes) } else { msg := fmt.Sprintf("Ambiguous import %q has multiple matching exports", namedImport.Alias) - c.addRangeErrorWithNotes(file.source, r, msg, notes) + c.log.AddRangeErrorWithNotes(&file.InputFile.Source, r, msg, notes) } } } @@ -1910,8 +1895,11 @@ type matchImportResult struct { ref js_ast.Ref } -func (c *linkerContext) matchImportWithExport(tracker importTracker) (result matchImportResult) { +func (c *linkerContext) matchImportWithExport( + tracker importTracker, reExportsIn []js_ast.Dependency, +) (result matchImportResult, reExports []js_ast.Dependency) { var ambiguousResults []matchImportResult + reExports = reExportsIn loop: for { @@ -1947,8 +1935,8 @@ loop: // property access. Don't do this if the namespace reference is invalid // though. This is the case for star imports, where the import is the // namespace. - trackerFile := &c.files[tracker.sourceIndex] - namedImport := trackerFile.repr.(*reprJS).ast.NamedImports[tracker.importRef] + trackerFile := &c.graph.Files[tracker.sourceIndex] + namedImport := trackerFile.InputFile.Repr.(*graph.JSRepr).AST.NamedImports[tracker.importRef] if namedImport.NamespaceRef != js_ast.InvalidRef { if result.kind == matchImportNormal { result.kind = matchImportNormalAndNamespace @@ -1965,19 +1953,35 @@ loop: // Warn about importing from a file that is known to not have any exports if status == importCommonJSWithoutExports { - source := trackerFile.source - symbol := c.symbols.Get(tracker.importRef) + source := trackerFile.InputFile.Source + symbol := c.graph.Symbols.Get(tracker.importRef) symbol.ImportItemStatus = js_ast.ImportItemMissing c.log.AddRangeWarning(&source, js_lexer.RangeOfIdentifier(source, namedImport.AliasLoc), fmt.Sprintf("Import %q will always be undefined because the file %q has no exports", - namedImport.Alias, c.files[nextTracker.sourceIndex].source.PrettyPath)) + namedImport.Alias, c.graph.Files[nextTracker.sourceIndex].InputFile.Source.PrettyPath)) + } + + case importDynamicFallback: + // If it's a file with dynamic export fallback, rewrite the import to a property access + trackerFile := &c.graph.Files[tracker.sourceIndex] + namedImport := trackerFile.InputFile.Repr.(*graph.JSRepr).AST.NamedImports[tracker.importRef] + if result.kind == matchImportNormal { + result.kind = matchImportNormalAndNamespace + result.namespaceRef = nextTracker.importRef + result.alias = namedImport.Alias + } else { + result = matchImportResult{ + kind: matchImportNamespace, + namespaceRef: nextTracker.importRef, + alias: namedImport.Alias, + } } case importNoMatch: - symbol := c.symbols.Get(tracker.importRef) - trackerFile := &c.files[tracker.sourceIndex] - source := trackerFile.source - namedImport := trackerFile.repr.(*reprJS).ast.NamedImports[tracker.importRef] + symbol := c.graph.Symbols.Get(tracker.importRef) + trackerFile := &c.graph.Files[tracker.sourceIndex] + source := trackerFile.InputFile.Source + namedImport := trackerFile.InputFile.Repr.(*graph.JSRepr).AST.NamedImports[tracker.importRef] r := js_lexer.RangeOfIdentifier(source, namedImport.AliasLoc) // Report mismatched imports and exports @@ -1990,9 +1994,11 @@ loop: // time, so we emit a warning and rewrite the value to the literal // "undefined" instead of emitting an error. symbol.ImportItemStatus = js_ast.ImportItemMissing - c.log.AddRangeWarning(&source, r, fmt.Sprintf("Import %q will always be undefined because there is no matching export", namedImport.Alias)) + c.log.AddRangeWarning(&source, r, fmt.Sprintf( + "Import %q will always be undefined because there is no matching export", namedImport.Alias)) } else { - c.addRangeError(source, r, fmt.Sprintf("No matching export for import %q", namedImport.Alias)) + c.log.AddRangeError(&source, r, fmt.Sprintf("No matching export in %q for import %q", + c.graph.Files[nextTracker.sourceIndex].InputFile.Source.PrettyPath, namedImport.Alias)) } case importProbablyTypeScriptType: @@ -2005,17 +2011,22 @@ loop: // statements, trace them all to see if they point to different things. for _, ambiguousTracker := range potentiallyAmbiguousExportStarRefs { // If this is a re-export of another import, follow the import - if _, ok := c.files[ambiguousTracker.sourceIndex].repr.(*reprJS).ast.NamedImports[ambiguousTracker.ref]; ok { - ambiguousResults = append(ambiguousResults, c.matchImportWithExport(importTracker{ - sourceIndex: ambiguousTracker.sourceIndex, - importRef: ambiguousTracker.ref, - })) + if _, ok := c.graph.Files[ambiguousTracker.SourceIndex].InputFile.Repr.(*graph.JSRepr).AST.NamedImports[ambiguousTracker.Ref]; ok { + // Save and restore the cycle detector to avoid mixing information + oldCycleDetector := c.cycleDetector + ambiguousResult, newReExportFiles := c.matchImportWithExport(importTracker{ + sourceIndex: ambiguousTracker.SourceIndex, + importRef: ambiguousTracker.Ref, + }, reExports) + c.cycleDetector = oldCycleDetector + ambiguousResults = append(ambiguousResults, ambiguousResult) + reExports = newReExportFiles } else { ambiguousResults = append(ambiguousResults, matchImportResult{ kind: matchImportNormal, - sourceIndex: ambiguousTracker.sourceIndex, - ref: ambiguousTracker.ref, - nameLoc: ambiguousTracker.nameLoc, + sourceIndex: ambiguousTracker.SourceIndex, + ref: ambiguousTracker.Ref, + nameLoc: ambiguousTracker.NameLoc, }) } } @@ -2032,9 +2043,18 @@ loop: nameLoc: nextTracker.nameLoc, } + // Depend on the statement(s) that declared this import symbol in the + // original file + for _, resolvedPartIndex := range c.graph.Files[tracker.sourceIndex].InputFile.Repr.(*graph.JSRepr).AST.TopLevelSymbolToParts[tracker.importRef] { + reExports = append(reExports, js_ast.Dependency{ + SourceIndex: tracker.sourceIndex, + PartIndex: resolvedPartIndex, + }) + } + // If this is a re-export of another import, continue for another // iteration of the loop to resolve that import as well - if _, ok := c.files[nextTracker.sourceIndex].repr.(*reprJS).ast.NamedImports[nextTracker.importRef]; ok { + if _, ok := c.graph.Files[nextTracker.sourceIndex].InputFile.Repr.(*graph.JSRepr).AST.NamedImports[nextTracker.importRef]; ok { tracker = nextTracker continue } @@ -2058,19 +2078,48 @@ loop: nameLoc: result.nameLoc, otherSourceIndex: ambiguousResult.sourceIndex, otherNameLoc: ambiguousResult.nameLoc, - } + }, nil } - return matchImportResult{kind: matchImportAmbiguous} + return matchImportResult{kind: matchImportAmbiguous}, nil } } return } -func (c *linkerContext) isCommonJSDueToExportStar(sourceIndex uint32, visited map[uint32]bool) bool { - // Terminate the traversal now if this file is CommonJS - repr := c.files[sourceIndex].repr.(*reprJS) - if repr.meta.cjsStyleExports { +func (c *linkerContext) recursivelyWrapDependencies(sourceIndex uint32) { + repr := c.graph.Files[sourceIndex].InputFile.Repr.(*graph.JSRepr) + if repr.Meta.DidWrapDependencies { + return + } + repr.Meta.DidWrapDependencies = true + + // Never wrap the runtime file since it always comes first + if sourceIndex == runtime.SourceIndex { + return + } + + // This module must be wrapped + if repr.Meta.Wrap == graph.WrapNone { + if repr.AST.ExportsKind == js_ast.ExportsCommonJS { + repr.Meta.Wrap = graph.WrapCJS + } else { + repr.Meta.Wrap = graph.WrapESM + } + } + + // All dependencies must also be wrapped + for _, record := range repr.AST.ImportRecords { + if record.SourceIndex.IsValid() { + c.recursivelyWrapDependencies(record.SourceIndex.GetIndex()) + } + } +} + +func (c *linkerContext) hasDynamicExportsDueToExportStar(sourceIndex uint32, visited map[uint32]bool) bool { + // Terminate the traversal now if this file already has dynamic exports + repr := c.graph.Files[sourceIndex].InputFile.Repr.(*graph.JSRepr) + if repr.AST.ExportsKind == js_ast.ExportsCommonJS || repr.AST.ExportsKind == js_ast.ExportsESMWithDynamicFallback { return true } @@ -2081,15 +2130,15 @@ func (c *linkerContext) isCommonJSDueToExportStar(sourceIndex uint32, visited ma visited[sourceIndex] = true // Scan over the export star graph - for _, importRecordIndex := range repr.ast.ExportStarImportRecords { - record := &repr.ast.ImportRecords[importRecordIndex] + for _, importRecordIndex := range repr.AST.ExportStarImportRecords { + record := &repr.AST.ImportRecords[importRecordIndex] - // This file is CommonJS if the exported imports are from a file that is - // either CommonJS directly or transitively by itself having an export star - // from a CommonJS file. - if (record.SourceIndex == nil && (!c.files[sourceIndex].isEntryPoint || !c.options.OutputFormat.KeepES6ImportExportSyntax())) || - (record.SourceIndex != nil && *record.SourceIndex != sourceIndex && c.isCommonJSDueToExportStar(*record.SourceIndex, visited)) { - repr.meta.cjsStyleExports = true + // This file has dynamic exports if the exported imports are from a file + // that either has dynamic exports directly or transitively by itself + // having an export star from a file with dynamic exports. + if (!record.SourceIndex.IsValid() && (!c.graph.Files[sourceIndex].IsEntryPoint() || !c.options.OutputFormat.KeepES6ImportExportSyntax())) || + (record.SourceIndex.IsValid() && record.SourceIndex.GetIndex() != sourceIndex && c.hasDynamicExportsDueToExportStar(record.SourceIndex.GetIndex(), visited)) { + repr.AST.ExportsKind = js_ast.ExportsESMWithDynamicFallback return true } } @@ -2098,7 +2147,7 @@ func (c *linkerContext) isCommonJSDueToExportStar(sourceIndex uint32, visited ma } func (c *linkerContext) addExportsForExportStar( - resolvedExports map[string]exportData, + resolvedExports map[string]graph.ExportData, sourceIndex uint32, sourceIndexStack []uint32, ) { @@ -2109,15 +2158,15 @@ func (c *linkerContext) addExportsForExportStar( } } sourceIndexStack = append(sourceIndexStack, sourceIndex) - repr := c.files[sourceIndex].repr.(*reprJS) + repr := c.graph.Files[sourceIndex].InputFile.Repr.(*graph.JSRepr) - for _, importRecordIndex := range repr.ast.ExportStarImportRecords { - record := &repr.ast.ImportRecords[importRecordIndex] - if record.SourceIndex == nil { + for _, importRecordIndex := range repr.AST.ExportStarImportRecords { + record := &repr.AST.ImportRecords[importRecordIndex] + if !record.SourceIndex.IsValid() { // This will be resolved at run time instead continue } - otherSourceIndex := *record.SourceIndex + otherSourceIndex := record.SourceIndex.GetIndex() // Export stars from a CommonJS module don't work because they can't be // statically discovered. Just silently ignore them in this case. @@ -2126,15 +2175,15 @@ func (c *linkerContext) addExportsForExportStar( // exports even though it still uses CommonJS features. However, when // doing this we'd also have to rewrite any imports of these export star // re-exports as property accesses off of a generated require() call. - otherRepr := c.files[otherSourceIndex].repr.(*reprJS) - if otherRepr.meta.cjsStyleExports { - // This will be resolved at run time instead + otherRepr := c.graph.Files[otherSourceIndex].InputFile.Repr.(*graph.JSRepr) + if otherRepr.AST.ExportsKind == js_ast.ExportsCommonJS { + // All exports will be resolved at run time instead continue } // Accumulate this file's exports nextExport: - for alias, name := range otherRepr.ast.NamedExports { + for alias, name := range otherRepr.AST.NamedExports { // ES6 export star statements ignore exports named "default" if alias == "default" { continue @@ -2142,33 +2191,33 @@ func (c *linkerContext) addExportsForExportStar( // This export star is shadowed if any file in the stack has a matching real named export for _, prevSourceIndex := range sourceIndexStack { - prevRepr := c.files[prevSourceIndex].repr.(*reprJS) - if _, ok := prevRepr.ast.NamedExports[alias]; ok { + prevRepr := c.graph.Files[prevSourceIndex].InputFile.Repr.(*graph.JSRepr) + if _, ok := prevRepr.AST.NamedExports[alias]; ok { continue nextExport } } if existing, ok := resolvedExports[alias]; !ok { // Initialize the re-export - resolvedExports[alias] = exportData{ - ref: name.Ref, - sourceIndex: otherSourceIndex, - nameLoc: name.AliasLoc, + resolvedExports[alias] = graph.ExportData{ + Ref: name.Ref, + SourceIndex: otherSourceIndex, + NameLoc: name.AliasLoc, } // Make sure the symbol is marked as imported so that code splitting // imports it correctly if it ends up being shared with another chunk - repr.meta.importsToBind[name.Ref] = importToBind{ - ref: name.Ref, - sourceIndex: otherSourceIndex, + repr.Meta.ImportsToBind[name.Ref] = graph.ImportData{ + Ref: name.Ref, + SourceIndex: otherSourceIndex, } - } else if existing.sourceIndex != otherSourceIndex { + } else if existing.SourceIndex != otherSourceIndex { // Two different re-exports colliding makes it potentially ambiguous - existing.potentiallyAmbiguousExportStarRefs = - append(existing.potentiallyAmbiguousExportStarRefs, importToBind{ - sourceIndex: otherSourceIndex, - ref: name.Ref, - nameLoc: name.AliasLoc, + existing.PotentiallyAmbiguousExportStarRefs = + append(existing.PotentiallyAmbiguousExportStarRefs, graph.ImportData{ + SourceIndex: otherSourceIndex, + Ref: name.Ref, + NameLoc: name.AliasLoc, }) resolvedExports[alias] = existing } @@ -2197,6 +2246,9 @@ const ( // The imported file is CommonJS and has unknown exports importCommonJS + // The import is missing but there is a dynamic fallback object + importDynamicFallback + // The import was treated as a CommonJS import but the file is known to have no exports importCommonJSWithoutExports @@ -2211,230 +2263,179 @@ const ( importProbablyTypeScriptType ) -func (c *linkerContext) advanceImportTracker(tracker importTracker) (importTracker, importStatus, []importToBind) { - file := &c.files[tracker.sourceIndex] - repr := file.repr.(*reprJS) - namedImport := repr.ast.NamedImports[tracker.importRef] +func (c *linkerContext) advanceImportTracker(tracker importTracker) (importTracker, importStatus, []graph.ImportData) { + file := &c.graph.Files[tracker.sourceIndex] + repr := file.InputFile.Repr.(*graph.JSRepr) + namedImport := repr.AST.NamedImports[tracker.importRef] // Is this an external file? - record := &repr.ast.ImportRecords[namedImport.ImportRecordIndex] - if record.SourceIndex == nil { + record := &repr.AST.ImportRecords[namedImport.ImportRecordIndex] + if !record.SourceIndex.IsValid() { return importTracker{}, importExternal, nil } // Is this a disabled file? - otherSourceIndex := *record.SourceIndex - if c.files[otherSourceIndex].source.KeyPath.IsDisabled() { + otherSourceIndex := record.SourceIndex.GetIndex() + if c.graph.Files[otherSourceIndex].InputFile.Source.KeyPath.IsDisabled() { return importTracker{sourceIndex: otherSourceIndex, importRef: js_ast.InvalidRef}, importDisabled, nil } // Is this a named import of a file without any exports? - otherRepr := c.files[otherSourceIndex].repr.(*reprJS) - if namedImport.Alias != "*" && !otherRepr.ast.UsesCommonJSExports() && !otherRepr.ast.HasES6ImportsOrExports() && !otherRepr.ast.HasLazyExport { + otherRepr := c.graph.Files[otherSourceIndex].InputFile.Repr.(*graph.JSRepr) + if !namedImport.AliasIsStar && !otherRepr.AST.HasLazyExport && + // CommonJS exports + otherRepr.AST.ExportKeyword.Len == 0 && namedImport.Alias != "default" && + // ESM exports + !otherRepr.AST.UsesExportsRef && !otherRepr.AST.UsesModuleRef { // Just warn about it and replace the import with "undefined" return importTracker{sourceIndex: otherSourceIndex, importRef: js_ast.InvalidRef}, importCommonJSWithoutExports, nil } // Is this a CommonJS file? - if otherRepr.meta.cjsStyleExports { + if otherRepr.AST.ExportsKind == js_ast.ExportsCommonJS { return importTracker{sourceIndex: otherSourceIndex, importRef: js_ast.InvalidRef}, importCommonJS, nil } - // Match this import up with an export from the imported file - if matchingExport, ok := otherRepr.meta.resolvedExports[namedImport.Alias]; ok { + // Match this import star with an export star from the imported file + if matchingExport := otherRepr.Meta.ResolvedExportStar; namedImport.AliasIsStar && matchingExport != nil { // Check to see if this is a re-export of another import return importTracker{ - sourceIndex: matchingExport.sourceIndex, - importRef: matchingExport.ref, - nameLoc: matchingExport.nameLoc, - }, importFound, matchingExport.potentiallyAmbiguousExportStarRefs + sourceIndex: matchingExport.SourceIndex, + importRef: matchingExport.Ref, + nameLoc: matchingExport.NameLoc, + }, importFound, matchingExport.PotentiallyAmbiguousExportStarRefs + } + + // Match this import up with an export from the imported file + if matchingExport, ok := otherRepr.Meta.ResolvedExports[namedImport.Alias]; ok { + // Check to see if this is a re-export of another import + return importTracker{ + sourceIndex: matchingExport.SourceIndex, + importRef: matchingExport.Ref, + nameLoc: matchingExport.NameLoc, + }, importFound, matchingExport.PotentiallyAmbiguousExportStarRefs + } + + // Is this a file with dynamic exports? + if otherRepr.AST.ExportsKind == js_ast.ExportsESMWithDynamicFallback { + return importTracker{sourceIndex: otherSourceIndex, importRef: otherRepr.AST.ExportsRef}, importDynamicFallback, nil } // Missing re-exports in TypeScript files are indistinguishable from types - if file.loader.IsTypeScript() && namedImport.IsExported { + if file.InputFile.Loader.IsTypeScript() && namedImport.IsExported { return importTracker{}, importProbablyTypeScriptType, nil } - return importTracker{}, importNoMatch, nil + return importTracker{sourceIndex: otherSourceIndex}, importNoMatch, nil } -func (c *linkerContext) markPartsReachableFromEntryPoints() { - // Allocate bit sets - bitCount := uint(len(c.entryPoints)) - for _, sourceIndex := range c.reachableFiles { - file := &c.files[sourceIndex] - file.entryBits = newBitSet(bitCount) - - switch repr := file.repr.(type) { - case *reprJS: - for partIndex := range repr.meta.partMeta { - partMeta := &repr.meta.partMeta[partIndex] - partMeta.entryBits = newBitSet(bitCount) - partMeta.prevSibling = uint32(partIndex) - partMeta.nextSibling = uint32(partIndex) - } - - // If this is a CommonJS file, we're going to need to generate a wrapper - // for the CommonJS closure. That will end up looking something like this: - // - // var require_foo = __commonJS((exports, module) => { - // ... - // }); - // - // However, that generation is special-cased for various reasons and is - // done later on. Still, we're going to need to ensure that this file - // both depends on the "__commonJS" symbol and declares the "require_foo" - // symbol. Instead of special-casing this during the reachablity analysis - // below, we just append a dummy part to the end of the file with these - // dependencies and let the general-purpose reachablity analysis take care - // of it. - if repr.meta.cjsWrap { - runtimeRepr := c.files[runtime.SourceIndex].repr.(*reprJS) - commonJSRef := runtimeRepr.ast.NamedExports["__commonJS"].Ref - commonJSParts := runtimeRepr.ast.TopLevelSymbolToParts[commonJSRef] - - // Generate the dummy part - nonLocalDependencies := make([]partRef, len(commonJSParts)) - for i, partIndex := range commonJSParts { - nonLocalDependencies[i] = partRef{sourceIndex: runtime.SourceIndex, partIndex: partIndex} - } - partIndex := c.addPartToFile(sourceIndex, js_ast.Part{ - SymbolUses: map[js_ast.Ref]js_ast.SymbolUse{ - repr.ast.WrapperRef: {CountEstimate: 1}, - commonJSRef: {CountEstimate: 1}, - }, - DeclaredSymbols: []js_ast.DeclaredSymbol{ - {Ref: repr.ast.ExportsRef, IsTopLevel: true}, - {Ref: repr.ast.ModuleRef, IsTopLevel: true}, - {Ref: repr.ast.WrapperRef, IsTopLevel: true}, - }, - }, partMeta{ - nonLocalDependencies: nonLocalDependencies, - }) - repr.meta.cjsWrapperPartIndex = &partIndex - repr.ast.TopLevelSymbolToParts[repr.ast.WrapperRef] = []uint32{partIndex} - repr.meta.importsToBind[commonJSRef] = importToBind{ - ref: commonJSRef, - sourceIndex: runtime.SourceIndex, - } - } - } +func (c *linkerContext) treeShakingAndCodeSplitting() { + // Tree shaking: Each entry point marks all files reachable from itself + for _, entryPoint := range c.graph.EntryPoints() { + c.markFileLiveForTreeShaking(entryPoint.SourceIndex) } - // Each entry point marks all files reachable from itself - for i, entryPoint := range c.entryPoints { - c.includeFile(entryPoint, uint(i), 0) + // Code splitting: Determine which entry points can reach which files. This + // has to happen after tree shaking because there is an implicit dependency + // between live parts within the same file. All liveness has to be computed + // first before determining which entry points can reach which files. + for i, entryPoint := range c.graph.EntryPoints() { + c.markFileReachableForCodeSplitting(entryPoint.SourceIndex, uint(i), 0) } } -// Code splitting may cause an assignment to a local variable to end up in a -// separate chunk from the variable. This is bad because that will generate -// an assignment to an import, which will fail. Make sure these parts end up -// in the same chunk in these cases. -func (c *linkerContext) handleCrossChunkAssignments() { - if len(c.entryPoints) < 2 { - // No need to do this if there cannot be cross-chunk assignments +func (c *linkerContext) markFileReachableForCodeSplitting(sourceIndex uint32, entryPointBit uint, distanceFromEntryPoint uint32) { + file := &c.graph.Files[sourceIndex] + if !file.IsLive { return } - neverReachedEntryBits := newBitSet(uint(len(c.entryPoints))) - - for _, sourceIndex := range c.reachableFiles { - file := &c.files[sourceIndex] - repr, ok := file.repr.(*reprJS) - if !ok { - continue - } - - for partIndex, part := range repr.ast.Parts { - // Ignore this part if it's dead code - if repr.meta.partMeta[partIndex].entryBits.equals(neverReachedEntryBits) { - continue - } - - // If this part assigns to a local variable, make sure the parts for the - // variable's declaration are in the same chunk as this part - for ref, use := range part.SymbolUses { - if use.IsAssigned { - if otherParts, ok := repr.ast.TopLevelSymbolToParts[ref]; ok { - for _, otherPartIndex := range otherParts { - partMetaA := &repr.meta.partMeta[partIndex] - partMetaB := &repr.meta.partMeta[otherPartIndex] - - // Make sure both sibling subsets have the same entry points - for entryPointBit := range c.entryPoints { - hasA := partMetaA.entryBits.hasBit(uint(entryPointBit)) - hasB := partMetaB.entryBits.hasBit(uint(entryPointBit)) - if hasA && !hasB { - c.includePart(sourceIndex, otherPartIndex, uint(entryPointBit), file.distanceFromEntryPoint) - } else if hasB && !hasA { - c.includePart(sourceIndex, uint32(partIndex), uint(entryPointBit), file.distanceFromEntryPoint) - } - } - - // Perform the merge - repr.meta.partMeta[partMetaA.nextSibling].prevSibling = partMetaB.prevSibling - repr.meta.partMeta[partMetaB.prevSibling].nextSibling = partMetaA.nextSibling - partMetaA.nextSibling = otherPartIndex - partMetaB.prevSibling = uint32(partIndex) - } - } - } - } - } - } -} - -func (c *linkerContext) includeFile(sourceIndex uint32, entryPointBit uint, distanceFromEntryPoint uint32) { - file := &c.files[sourceIndex] + traverseAgain := false // Track the minimum distance to an entry point - if distanceFromEntryPoint < file.distanceFromEntryPoint { - file.distanceFromEntryPoint = distanceFromEntryPoint + if distanceFromEntryPoint < file.DistanceFromEntryPoint { + file.DistanceFromEntryPoint = distanceFromEntryPoint + traverseAgain = true } distanceFromEntryPoint++ // Don't mark this file more than once - if file.entryBits.hasBit(entryPointBit) { + if file.EntryBits.HasBit(entryPointBit) && !traverseAgain { return } - file.entryBits.setBit(entryPointBit) + file.EntryBits.SetBit(entryPointBit) - switch repr := file.repr.(type) { - case *reprJS: + switch repr := file.InputFile.Repr.(type) { + case *graph.JSRepr: + // If the JavaScript stub for a CSS file is included, also include the CSS file + if repr.CSSSourceIndex.IsValid() { + c.markFileReachableForCodeSplitting(repr.CSSSourceIndex.GetIndex(), entryPointBit, distanceFromEntryPoint) + } + + // Traverse into all imported files + for _, record := range repr.AST.ImportRecords { + if record.SourceIndex.IsValid() && !c.isExternalDynamicImport(&record, sourceIndex) { + c.markFileReachableForCodeSplitting(record.SourceIndex.GetIndex(), entryPointBit, distanceFromEntryPoint) + } + } + + // Traverse into all dependencies of all parts in this file + for _, part := range repr.AST.Parts { + for _, dependency := range part.Dependencies { + if dependency.SourceIndex != sourceIndex { + c.markFileReachableForCodeSplitting(dependency.SourceIndex, entryPointBit, distanceFromEntryPoint) + } + } + } + + case *graph.CSSRepr: + // Traverse into all dependencies + for _, record := range repr.AST.ImportRecords { + if record.SourceIndex.IsValid() { + c.markFileReachableForCodeSplitting(record.SourceIndex.GetIndex(), entryPointBit, distanceFromEntryPoint) + } + } + } +} + +func (c *linkerContext) markFileLiveForTreeShaking(sourceIndex uint32) { + file := &c.graph.Files[sourceIndex] + + // Don't mark this file more than once + if file.IsLive { + return + } + file.IsLive = true + + switch repr := file.InputFile.Repr.(type) { + case *graph.JSRepr: isTreeShakingEnabled := config.IsTreeShakingEnabled(c.options.Mode, c.options.OutputFormat) // If the JavaScript stub for a CSS file is included, also include the CSS file - if repr.cssSourceIndex != nil { - c.includeFile(*repr.cssSourceIndex, entryPointBit, distanceFromEntryPoint) + if repr.CSSSourceIndex.IsValid() { + c.markFileLiveForTreeShaking(repr.CSSSourceIndex.GetIndex()) } - for partIndex, part := range repr.ast.Parts { + for partIndex, part := range repr.AST.Parts { canBeRemovedIfUnused := part.CanBeRemovedIfUnused - // Don't include the entry point part if we're not the entry point - if repr.meta.entryPointExportPartIndex != nil && uint32(partIndex) == *repr.meta.entryPointExportPartIndex && - sourceIndex != c.entryPoints[entryPointBit] { - continue - } - // Also include any statement-level imports for _, importRecordIndex := range part.ImportRecordIndices { - record := &repr.ast.ImportRecords[importRecordIndex] + record := &repr.AST.ImportRecords[importRecordIndex] if record.Kind != ast.ImportStmt { continue } - if record.SourceIndex != nil { - otherSourceIndex := *record.SourceIndex + if record.SourceIndex.IsValid() { + otherSourceIndex := record.SourceIndex.GetIndex() // Don't include this module for its side effects if it can be // considered to have no side effects - if otherFile := &c.files[otherSourceIndex]; otherFile.ignoreIfUnused && !c.options.IgnoreDCEAnnotations { + if otherFile := &c.graph.Files[otherSourceIndex]; otherFile.InputFile.SideEffects.Kind != graph.HasSideEffects && !c.options.IgnoreDCEAnnotations { continue } // Otherwise, include this module for its side effects - c.includeFile(otherSourceIndex, entryPointBit, distanceFromEntryPoint) + c.markFileLiveForTreeShaking(otherSourceIndex) } // If we get here then the import was included for its side effects, so @@ -2445,185 +2446,55 @@ func (c *linkerContext) includeFile(sourceIndex uint32, entryPointBit uint, dist // Include all parts in this file with side effects, or just include // everything if tree-shaking is disabled. Note that we still want to // perform tree-shaking on the runtime even if tree-shaking is disabled. - if !canBeRemovedIfUnused || (!part.ForceTreeShaking && !isTreeShakingEnabled && file.isEntryPoint) { - c.includePart(sourceIndex, uint32(partIndex), entryPointBit, distanceFromEntryPoint) + if !canBeRemovedIfUnused || (!part.ForceTreeShaking && !isTreeShakingEnabled && file.IsEntryPoint()) { + c.markPartLiveForTreeShaking(sourceIndex, uint32(partIndex)) } } - // If this is an entry point, include all exports - if file.isEntryPoint { - for _, alias := range repr.meta.sortedAndFilteredExportAliases { - export := repr.meta.resolvedExports[alias] - targetSourceIndex := export.sourceIndex - targetRef := export.ref - - // If this is an import, then target what the import points to - targetRepr := c.files[targetSourceIndex].repr.(*reprJS) - if importToBind, ok := targetRepr.meta.importsToBind[targetRef]; ok { - targetSourceIndex = importToBind.sourceIndex - targetRef = importToBind.ref - } - - // Pull in all declarations of this symbol - for _, partIndex := range targetRepr.ast.TopLevelSymbolToParts[targetRef] { - c.includePart(targetSourceIndex, partIndex, entryPointBit, distanceFromEntryPoint) - } - } - - // Ensure "exports" is included if the current output format needs it - if repr.meta.forceIncludeExportsForEntryPoint { - c.includePart(sourceIndex, repr.meta.nsExportPartIndex, entryPointBit, distanceFromEntryPoint) - } - } - - case *reprCSS: + case *graph.CSSRepr: // Include all "@import" rules - for _, record := range repr.ast.ImportRecords { - if record.SourceIndex != nil { - c.includeFile(*record.SourceIndex, entryPointBit, distanceFromEntryPoint) + for _, record := range repr.AST.ImportRecords { + if record.SourceIndex.IsValid() { + c.markFileLiveForTreeShaking(record.SourceIndex.GetIndex()) } } } } -func (c *linkerContext) includePartsForRuntimeSymbol( - part *js_ast.Part, fileMeta *fileMeta, useCount uint32, - name string, entryPointBit uint, distanceFromEntryPoint uint32, -) { - if useCount > 0 { - runtimeRepr := c.files[runtime.SourceIndex].repr.(*reprJS) - ref := runtimeRepr.ast.NamedExports[name].Ref - - // Depend on the symbol from the runtime - c.generateUseOfSymbolForInclude(part, fileMeta, useCount, ref, runtime.SourceIndex) - - // Since this part was included, also include the parts from the runtime - // that declare this symbol - for _, partIndex := range runtimeRepr.ast.TopLevelSymbolToParts[ref] { - c.includePart(runtime.SourceIndex, partIndex, entryPointBit, distanceFromEntryPoint) - } - } +func (c *linkerContext) isExternalDynamicImport(record *ast.ImportRecord, sourceIndex uint32) bool { + return record.Kind == ast.ImportDynamic && c.graph.Files[record.SourceIndex.GetIndex()].IsEntryPoint() && record.SourceIndex.GetIndex() != sourceIndex } -func (c *linkerContext) generateUseOfSymbolForInclude( - part *js_ast.Part, fileMeta *fileMeta, useCount uint32, - ref js_ast.Ref, otherSourceIndex uint32, -) { - use := part.SymbolUses[ref] - use.CountEstimate += useCount - part.SymbolUses[ref] = use - fileMeta.importsToBind[ref] = importToBind{ - sourceIndex: otherSourceIndex, - ref: ref, - } -} - -func (c *linkerContext) isExternalDynamicImport(record *ast.ImportRecord) bool { - return record.Kind == ast.ImportDynamic && c.files[*record.SourceIndex].isEntryPoint -} - -func (c *linkerContext) includePart(sourceIndex uint32, partIndex uint32, entryPointBit uint, distanceFromEntryPoint uint32) { - file := &c.files[sourceIndex] - repr := file.repr.(*reprJS) - partMeta := &repr.meta.partMeta[partIndex] +func (c *linkerContext) markPartLiveForTreeShaking(sourceIndex uint32, partIndex uint32) { + file := &c.graph.Files[sourceIndex] + repr := file.InputFile.Repr.(*graph.JSRepr) + part := &repr.AST.Parts[partIndex] // Don't mark this part more than once - if partMeta.entryBits.hasBit(entryPointBit) { + if part.IsLive { return } - partMeta.entryBits.setBit(entryPointBit) - - part := &repr.ast.Parts[partIndex] + part.IsLive = true // Include the file containing this part - c.includeFile(sourceIndex, entryPointBit, distanceFromEntryPoint) + c.markFileLiveForTreeShaking(sourceIndex) - // Also include any local dependencies - for otherPartIndex := range part.LocalDependencies { - c.includePart(sourceIndex, otherPartIndex, entryPointBit, distanceFromEntryPoint) + // Also include any dependencies + for _, dep := range part.Dependencies { + c.markPartLiveForTreeShaking(dep.SourceIndex, dep.PartIndex) } - - // Also include any non-local dependencies - for _, nonLocalDependency := range partMeta.nonLocalDependencies { - c.includePart(nonLocalDependency.sourceIndex, nonLocalDependency.partIndex, entryPointBit, distanceFromEntryPoint) - } - - // Also include any cross-chunk assignment siblings - for i := partMeta.nextSibling; i != partIndex; i = repr.meta.partMeta[i].nextSibling { - c.includePart(sourceIndex, i, entryPointBit, distanceFromEntryPoint) - } - - // Also include any require() imports - toModuleUses := uint32(0) - for _, importRecordIndex := range part.ImportRecordIndices { - record := &repr.ast.ImportRecords[importRecordIndex] - - // Don't follow external imports (this includes import() expressions) - if record.SourceIndex == nil || c.isExternalDynamicImport(record) { - // This is an external import, so it needs the "__toModule" wrapper as - // long as it's not a bare "require()" - if record.Kind != ast.ImportRequire && !c.options.OutputFormat.KeepES6ImportExportSyntax() { - record.WrapWithToModule = true - toModuleUses++ - } - continue - } - - otherSourceIndex := *record.SourceIndex - otherRepr := c.files[otherSourceIndex].repr.(*reprJS) - if record.Kind == ast.ImportStmt && !otherRepr.meta.cjsStyleExports { - // Skip this since it's not a require() import - continue - } - - // This is a require() import - c.includeFile(otherSourceIndex, entryPointBit, distanceFromEntryPoint) - - // Depend on the automatically-generated require wrapper symbol - wrapperRef := otherRepr.ast.WrapperRef - c.generateUseOfSymbolForInclude(part, &repr.meta, 1, wrapperRef, otherSourceIndex) - - // This is an ES6 import of a CommonJS module, so it needs the - // "__toModule" wrapper as long as it's not a bare "require()" - if record.Kind != ast.ImportRequire { - record.WrapWithToModule = true - toModuleUses++ - } - } - - // If there's an ES6 import of a non-ES6 module, then we're going to need the - // "__toModule" symbol from the runtime to wrap the result of "require()" - c.includePartsForRuntimeSymbol(part, &repr.meta, toModuleUses, "__toModule", entryPointBit, distanceFromEntryPoint) - - // If there's an ES6 export star statement of a non-ES6 module, then we're - // going to need the "__exportStar" symbol from the runtime - exportStarUses := uint32(0) - for _, importRecordIndex := range repr.ast.ExportStarImportRecords { - record := &repr.ast.ImportRecords[importRecordIndex] - - // Is this export star evaluated at run time? - if (record.SourceIndex == nil && (!file.isEntryPoint || !c.options.OutputFormat.KeepES6ImportExportSyntax())) || - (record.SourceIndex != nil && *record.SourceIndex != sourceIndex && c.files[*record.SourceIndex].repr.(*reprJS).meta.cjsStyleExports) { - record.CallsRunTimeExportStarFn = true - repr.ast.UsesExportsRef = true - exportStarUses++ - } - } - c.includePartsForRuntimeSymbol(part, &repr.meta, exportStarUses, "__exportStar", entryPointBit, distanceFromEntryPoint) } -func baseFileNameForVirtualModulePath(path string) string { - _, base, ext := logger.PlatformIndependentPathDirBaseExt(path) - - // Convert it to a safe file name. See: https://stackoverflow.com/a/31976060 +func sanitizeFilePathForVirtualModulePath(path string) string { + // Convert it to a safe file path. See: https://stackoverflow.com/a/31976060 sb := strings.Builder{} needsGap := false - for _, c := range base + ext { + for _, c := range path { switch c { - case 0, '/': + case 0: // These characters are forbidden on Unix and Windows - case '<', '>', ':', '"', '\\', '|', '?', '*': + case '<', '>', ':', '"', '|', '?', '*': // These characters are forbidden on Windows default: @@ -2658,144 +2529,196 @@ func baseFileNameForVirtualModulePath(path string) string { } func (c *linkerContext) computeChunks() []chunkInfo { - chunks := make(map[string]chunkInfo) - neverReachedKey := string(newBitSet(uint(len(c.entryPoints))).entries) + jsChunks := make(map[string]chunkInfo) + cssChunks := make(map[string]chunkInfo) - // Compute entry point names - for i, entryPoint := range c.entryPoints { - var relDir string - var baseName string - var repr chunkRepr - file := &c.files[entryPoint] - - switch file.repr.(type) { - case *reprJS: - repr = &chunkReprJS{} - case *reprCSS: - repr = &chunkReprCSS{} - } - - if c.options.AbsOutputFile != "" { - baseName = c.fs.Base(c.options.AbsOutputFile) - } else { - source := file.source - if source.KeyPath.Namespace != "file" { - baseName = baseFileNameForVirtualModulePath(source.KeyPath.Text) - } else if relPath, ok := c.fs.Rel(c.options.AbsOutputBase, source.KeyPath.Text); ok { - relDir = c.fs.Dir(relPath) - baseName = c.fs.Base(relPath) - relDir = strings.ReplaceAll(relDir, "\\", "/") - - // Replace leading "../" so we don't try to write outside of the output - // directory. This normally can't happen because "AbsOutputBase" is - // automatically computed to contain all entry point files, but it can - // happen if someone sets it manually via the "outbase" API option. - // - // Note that we can't just strip any leading "../" because that could - // cause two separate entry point paths to collide. For example, there - // could be both "src/index.js" and "../src/index.js" as entry points. - dotDotCount := 0 - for strings.HasPrefix(relDir[dotDotCount*3:], "../") { - dotDotCount++ - } - if dotDotCount > 0 { - // The use of "_.._" here is somewhat arbitrary but it is unlikely to - // collide with a folder named by a human and it works on Windows - // (Windows doesn't like names that end with a "."). And not starting - // with a "." means that it will not be hidden on Unix. - relDir = strings.Repeat("_.._/", dotDotCount) + relDir[dotDotCount*3:] - } - } else { - baseName = c.fs.Base(source.KeyPath.Text) - } - - // Swap the extension for the standard one - ext := c.fs.Ext(baseName) - baseName = baseName[:len(baseName)-len(ext)] - switch repr.(type) { - case *chunkReprJS: - baseName += c.options.OutputExtensionJS - case *chunkReprCSS: - baseName += c.options.OutputExtensionCSS - } - } - - // Always use cross-platform path separators to avoid problems with Windows - file.entryPointRelPath = path.Join(relDir, baseName) + // Create chunks for entry points + for i, entryPoint := range c.graph.EntryPoints() { + file := &c.graph.Files[entryPoint.SourceIndex] // Create a chunk for the entry point here to ensure that the chunk is // always generated even if the resulting file is empty - entryBits := newBitSet(uint(len(c.entryPoints))) - entryBits.setBit(uint(i)) - chunks[string(entryBits.entries)] = chunkInfo{ + entryBits := helpers.NewBitSet(uint(len(c.graph.EntryPoints()))) + entryBits.SetBit(uint(i)) + info := chunkInfo{ entryBits: entryBits, isEntryPoint: true, - sourceIndex: entryPoint, + sourceIndex: entryPoint.SourceIndex, entryPointBit: uint(i), - relDir: relDir, - baseNameOrEmpty: baseName, filesWithPartsInChunk: make(map[uint32]bool), - repr: repr, + } + + switch file.InputFile.Repr.(type) { + case *graph.JSRepr: + info.chunkRepr = &chunkReprJS{} + jsChunks[entryBits.String()] = info + + case *graph.CSSRepr: + info.chunkRepr = &chunkReprCSS{} + cssChunks[entryBits.String()] = info } } // Figure out which files are in which chunk - for _, sourceIndex := range c.reachableFiles { - file := &c.files[sourceIndex] - switch repr := file.repr.(type) { - case *reprJS: - for _, partMeta := range repr.meta.partMeta { - key := string(partMeta.entryBits.entries) - if key == neverReachedKey { - // Ignore this part if it was never reached - continue - } - chunk, ok := chunks[key] - if !ok { - chunk.entryBits = partMeta.entryBits - chunk.filesWithPartsInChunk = make(map[uint32]bool) - chunk.repr = &chunkReprJS{} - chunks[key] = chunk - } - chunk.filesWithPartsInChunk[uint32(sourceIndex)] = true - } - - case *reprCSS: - key := string(file.entryBits.entries) - if key == neverReachedKey { - // Ignore this file if it was never reached - continue - } - chunk, ok := chunks[key] - if !ok { - chunk.entryBits = file.entryBits - chunk.filesWithPartsInChunk = make(map[uint32]bool) - chunk.repr = &chunkReprJS{} - chunks[key] = chunk - } - chunk.filesWithPartsInChunk[uint32(sourceIndex)] = true + for _, sourceIndex := range c.graph.ReachableFiles { + file := &c.graph.Files[sourceIndex] + if !file.IsLive { + // Ignore this file if it's not included in the bundle + continue } + key := file.EntryBits.String() + var chunk chunkInfo + var ok bool + switch file.InputFile.Repr.(type) { + case *graph.JSRepr: + chunk, ok = jsChunks[key] + if !ok { + chunk.entryBits = file.EntryBits + chunk.filesWithPartsInChunk = make(map[uint32]bool) + chunk.chunkRepr = &chunkReprJS{} + jsChunks[key] = chunk + } + case *graph.CSSRepr: + chunk, ok = cssChunks[key] + if !ok { + chunk.entryBits = file.EntryBits + chunk.filesWithPartsInChunk = make(map[uint32]bool) + chunk.chunkRepr = &chunkReprCSS{} + + // Check whether this is the CSS file to go with a JS entry point + if jsChunk, ok := jsChunks[key]; ok && jsChunk.isEntryPoint { + chunk.isEntryPoint = true + chunk.sourceIndex = jsChunk.sourceIndex + chunk.entryPointBit = jsChunk.entryPointBit + } + + cssChunks[key] = chunk + } + } + chunk.filesWithPartsInChunk[uint32(sourceIndex)] = true } - // Sort the chunks for determinism. This mostly doesn't matter because each - // chunk is a separate file, but it matters for error messages in tests since - // tests stop on the first output mismatch. - sortedKeys := make([]string, 0, len(chunks)) - for key := range chunks { + // Sort the chunks for determinism. This matters because we use chunk indices + // as sorting keys in a few places. + sortedChunks := make([]chunkInfo, 0, len(jsChunks)+len(cssChunks)) + sortedKeys := make([]string, 0, len(jsChunks)+len(cssChunks)) + for key := range jsChunks { sortedKeys = append(sortedKeys, key) } sort.Strings(sortedKeys) - sortedChunks := make([]chunkInfo, len(chunks)) - for i, key := range sortedKeys { - sortedChunks[i] = chunks[key] + for _, key := range sortedKeys { + sortedChunks = append(sortedChunks, jsChunks[key]) } + sortedKeys = sortedKeys[:0] + for key := range cssChunks { + sortedKeys = append(sortedKeys, key) + } + sort.Strings(sortedKeys) + for _, key := range sortedKeys { + sortedChunks = append(sortedChunks, cssChunks[key]) + } + + // Map from the entry point file to this chunk. We will need this later if + // a file contains a dynamic import to this entry point, since we'll need + // to look up the path for this chunk to use with the import. + for chunkIndex, chunk := range sortedChunks { + if chunk.isEntryPoint { + file := &c.graph.Files[chunk.sourceIndex] + + // JS entry points that import CSS files generate two chunks, a JS chunk + // and a CSS chunk. Don't link the CSS chunk to the JS file since the CSS + // chunk is secondary (the JS chunk is primary). + if _, ok := chunk.chunkRepr.(*chunkReprCSS); ok { + if _, ok := file.InputFile.Repr.(*graph.JSRepr); ok { + continue + } + } + + file.EntryPointChunkIndex = uint32(chunkIndex) + } + } + + // Determine the order of files within the chunk ahead of time. This may + // generate additional CSS chunks from JS chunks that import CSS files. + { + for chunkIndex, chunk := range sortedChunks { + js, jsParts, css := c.chunkFileOrder(&chunk) + + switch chunk.chunkRepr.(type) { + case *chunkReprJS: + sortedChunks[chunkIndex].filesInChunkInOrder = js + sortedChunks[chunkIndex].partsInChunkInOrder = jsParts + + // If JS files include CSS files, make a sibling chunk for the CSS + if len(css) > 0 { + sortedChunks = append(sortedChunks, chunkInfo{ + filesInChunkInOrder: css, + entryBits: chunk.entryBits, + isEntryPoint: chunk.isEntryPoint, + sourceIndex: chunk.sourceIndex, + entryPointBit: chunk.entryPointBit, + filesWithPartsInChunk: make(map[uint32]bool), + chunkRepr: &chunkReprCSS{}, + }) + } + + case *chunkReprCSS: + sortedChunks[chunkIndex].filesInChunkInOrder = css + } + } + } + + // Assign general information to each chunk + for chunkIndex := range sortedChunks { + chunk := &sortedChunks[chunkIndex] + + // Assign a unique key to each chunk. This key encodes the index directly so + // we can easily recover it later without needing to look it up in a map. The + // last 8 numbers of the key are the chunk index. + chunk.uniqueKey = fmt.Sprintf("%s%08d", c.uniqueKeyPrefix, chunkIndex) + + // Determine the standard file extension + var stdExt string + switch chunk.chunkRepr.(type) { + case *chunkReprJS: + stdExt = c.options.OutputExtensionJS + case *chunkReprCSS: + stdExt = c.options.OutputExtensionCSS + } + + // Compute the template substitutions + var dir, base, ext string + var template []config.PathTemplate + if chunk.isEntryPoint { + if c.graph.Files[chunk.sourceIndex].IsUserSpecifiedEntryPoint() { + dir, base, ext = c.pathRelativeToOutbase(chunk.sourceIndex, chunk.entryPointBit, stdExt, false /* avoidIndex */) + template = c.options.EntryPathTemplate + } else { + dir, base, ext = c.pathRelativeToOutbase(chunk.sourceIndex, chunk.entryPointBit, stdExt, true /* avoidIndex */) + template = c.options.ChunkPathTemplate + } + } else { + dir = "/" + base = "chunk" + ext = stdExt + template = c.options.ChunkPathTemplate + } + + // Determine the output path template + template = append(append(make([]config.PathTemplate, 0, len(template)+1), template...), config.PathTemplate{Data: ext}) + chunk.finalTemplate = config.SubstituteTemplate(template, config.PathPlaceholders{ + Dir: &dir, + Name: &base, + }) + } + return sortedChunks } type chunkOrder struct { sourceIndex uint32 distance uint32 - path logger.Path + tieBreaker uint32 } // This type is just so we can use Go's native sort function @@ -2805,7 +2728,9 @@ func (a chunkOrderArray) Len() int { return len(a) } func (a chunkOrderArray) Swap(i int, j int) { a[i], a[j] = a[j], a[i] } func (a chunkOrderArray) Less(i int, j int) bool { - return a[i].distance < a[j].distance || (a[i].distance == a[j].distance && a[i].path.ComesBeforeInSortedOrder(a[j].path)) + ai := a[i] + aj := a[j] + return ai.distance < aj.distance || (ai.distance == aj.distance && ai.tieBreaker < aj.tieBreaker) } func appendOrExtendPartRange(ranges []partRange, sourceIndex uint32, partIndex uint32) []partRange { @@ -2823,14 +2748,14 @@ func appendOrExtendPartRange(ranges []partRange, sourceIndex uint32, partIndex u }) } -func (c *linkerContext) shouldIncludePart(repr *reprJS, part js_ast.Part) bool { +func (c *linkerContext) shouldIncludePart(repr *graph.JSRepr, part js_ast.Part) bool { // As an optimization, ignore parts containing a single import statement to - // an internal non-CommonJS file. These will be ignored anyway and it's a + // an internal non-wrapped file. These will be ignored anyway and it's a // performance hit to spin up a goroutine only to discover this later. if len(part.Stmts) == 1 { if s, ok := part.Stmts[0].Data.(*js_ast.SImport); ok { - record := &repr.ast.ImportRecords[s.ImportRecordIndex] - if record.SourceIndex != nil && !c.files[*record.SourceIndex].repr.(*reprJS).meta.cjsStyleExports { + record := &repr.AST.ImportRecords[s.ImportRecordIndex] + if record.SourceIndex.IsValid() && c.graph.Files[record.SourceIndex.GetIndex()].InputFile.Repr.(*graph.JSRepr).Meta.Wrap == graph.WrapNone { return false } } @@ -2843,17 +2768,17 @@ func (c *linkerContext) chunkFileOrder(chunk *chunkInfo) (js []uint32, jsParts [ // Attach information to the files for use with sorting for sourceIndex := range chunk.filesWithPartsInChunk { - file := &c.files[sourceIndex] + file := &c.graph.Files[sourceIndex] sorted = append(sorted, chunkOrder{ sourceIndex: sourceIndex, - distance: file.distanceFromEntryPoint, - path: file.source.KeyPath, + distance: file.DistanceFromEntryPoint, + tieBreaker: c.graph.StableSourceIndices[sourceIndex], }) } // Sort so files closest to an entry point come first. If two files are // equidistant to an entry point, then break the tie by sorting on the - // absolute path. + // stable source index derived from the DFS over all entry points. sort.Sort(sorted) visited := make(map[uint32]bool) @@ -2868,39 +2793,39 @@ func (c *linkerContext) chunkFileOrder(chunk *chunkInfo) (js []uint32, jsParts [ } visited[sourceIndex] = true - file := &c.files[sourceIndex] - isFileInThisChunk := chunk.entryBits.equals(file.entryBits) + file := &c.graph.Files[sourceIndex] + isFileInThisChunk := chunk.entryBits.Equals(file.EntryBits) - switch repr := file.repr.(type) { - case *reprJS: - // CommonJS files can't be split because they are all inside the wrapper - canFileBeSplit := !repr.meta.cjsWrap + switch repr := file.InputFile.Repr.(type) { + case *graph.JSRepr: + // Wrapped files can't be split because they are all inside the wrapper + canFileBeSplit := repr.Meta.Wrap == graph.WrapNone // Make sure the generated call to "__export(exports, ...)" comes first // before anything else in this file - if canFileBeSplit && chunk.entryBits.equals(repr.meta.partMeta[repr.meta.nsExportPartIndex].entryBits) { - jsParts = appendOrExtendPartRange(jsParts, sourceIndex, repr.meta.nsExportPartIndex) + if canFileBeSplit && isFileInThisChunk && repr.AST.Parts[repr.Meta.NSExportPartIndex].IsLive { + jsParts = appendOrExtendPartRange(jsParts, sourceIndex, repr.Meta.NSExportPartIndex) } - for partIndex, part := range repr.ast.Parts { - isPartInThisChunk := chunk.entryBits.equals(repr.meta.partMeta[partIndex].entryBits) + for partIndex, part := range repr.AST.Parts { + isPartInThisChunk := isFileInThisChunk && repr.AST.Parts[partIndex].IsLive // Also traverse any files imported by this part for _, importRecordIndex := range part.ImportRecordIndices { - record := &repr.ast.ImportRecords[importRecordIndex] - if record.SourceIndex != nil && (record.Kind == ast.ImportStmt || isPartInThisChunk) { - if c.isExternalDynamicImport(record) { + record := &repr.AST.ImportRecords[importRecordIndex] + if record.SourceIndex.IsValid() && (record.Kind == ast.ImportStmt || isPartInThisChunk) { + if c.isExternalDynamicImport(record, sourceIndex) { // Don't follow import() dependencies continue } - visit(*record.SourceIndex) + visit(record.SourceIndex.GetIndex()) } } // Then include this part after the files it imports if isPartInThisChunk { isFileInThisChunk = true - if canFileBeSplit && uint32(partIndex) != repr.meta.nsExportPartIndex && c.shouldIncludePart(repr, part) { + if canFileBeSplit && uint32(partIndex) != repr.Meta.NSExportPartIndex && c.shouldIncludePart(repr, part) { if sourceIndex == runtime.SourceIndex { jsPartsPrefix = appendOrExtendPartRange(jsPartsPrefix, sourceIndex, uint32(partIndex)) } else { @@ -2918,17 +2843,17 @@ func (c *linkerContext) chunkFileOrder(chunk *chunkInfo) (js []uint32, jsParts [ jsPartsPrefix = append(jsPartsPrefix, partRange{ sourceIndex: sourceIndex, partIndexBegin: 0, - partIndexEnd: uint32(len(repr.ast.Parts)), + partIndexEnd: uint32(len(repr.AST.Parts)), }) } } - case *reprCSS: + case *graph.CSSRepr: if isFileInThisChunk { // All imported files come first - for _, record := range repr.ast.ImportRecords { - if record.SourceIndex != nil { - visit(*record.SourceIndex) + for _, record := range repr.AST.ImportRecords { + if record.SourceIndex.IsValid() { + visit(record.SourceIndex.GetIndex()) } } @@ -2950,121 +2875,100 @@ func (c *linkerContext) chunkFileOrder(chunk *chunkInfo) (js []uint32, jsParts [ func (c *linkerContext) shouldRemoveImportExportStmt( sourceIndex uint32, stmtList *stmtList, - partStmts []js_ast.Stmt, loc logger.Loc, namespaceRef js_ast.Ref, importRecordIndex uint32, ) bool { - // Is this an import from another module inside this bundle? - repr := c.files[sourceIndex].repr.(*reprJS) - record := &repr.ast.ImportRecords[importRecordIndex] - if record.SourceIndex != nil { - if !c.files[*record.SourceIndex].repr.(*reprJS).meta.cjsStyleExports { - // Remove the statement entirely if this is not a CommonJS module - return true + repr := c.graph.Files[sourceIndex].InputFile.Repr.(*graph.JSRepr) + record := &repr.AST.ImportRecords[importRecordIndex] + + // Is this an external import? + if !record.SourceIndex.IsValid() { + // Keep the "import" statement if "import" statements are supported + if c.options.OutputFormat.KeepES6ImportExportSyntax() { + return false } - } else if c.options.OutputFormat.KeepES6ImportExportSyntax() { - // If this is an external module and the output format allows ES6 - // import/export syntax, then just keep the statement - return false + + // Otherwise, replace this statement with a call to "require()" + stmtList.insideWrapperPrefix = append(stmtList.insideWrapperPrefix, js_ast.Stmt{ + Loc: loc, + Data: &js_ast.SLocal{Decls: []js_ast.Decl{{ + Binding: js_ast.Binding{Loc: loc, Data: &js_ast.BIdentifier{Ref: namespaceRef}}, + Value: &js_ast.Expr{Loc: record.Range.Loc, Data: &js_ast.ERequire{ImportRecordIndex: importRecordIndex}}, + }}}, + }) + return true } // We don't need a call to "require()" if this is a self-import inside a // CommonJS-style module, since we can just reference the exports directly. - if repr.meta.cjsStyleExports && js_ast.FollowSymbols(c.symbols, namespaceRef) == repr.ast.ExportsRef { + if repr.AST.ExportsKind == js_ast.ExportsCommonJS && js_ast.FollowSymbols(c.graph.Symbols, namespaceRef) == repr.AST.ExportsRef { return true } - // Replace the statement with a call to "require()" - stmtList.prefixStmts = append(stmtList.prefixStmts, js_ast.Stmt{ - Loc: loc, - Data: &js_ast.SLocal{Decls: []js_ast.Decl{{ - Binding: js_ast.Binding{Loc: loc, Data: &js_ast.BIdentifier{Ref: namespaceRef}}, - Value: &js_ast.Expr{Loc: record.Range.Loc, Data: &js_ast.ERequire{ImportRecordIndex: importRecordIndex}}, - }}}, - }) + otherFile := &c.graph.Files[record.SourceIndex.GetIndex()] + otherRepr := otherFile.InputFile.Repr.(*graph.JSRepr) + switch otherRepr.Meta.Wrap { + case graph.WrapNone: + // Remove the statement entirely if this module is not wrapped + + case graph.WrapCJS: + // Replace the statement with a call to "require()" + stmtList.insideWrapperPrefix = append(stmtList.insideWrapperPrefix, js_ast.Stmt{ + Loc: loc, + Data: &js_ast.SLocal{Decls: []js_ast.Decl{{ + Binding: js_ast.Binding{Loc: loc, Data: &js_ast.BIdentifier{Ref: namespaceRef}}, + Value: &js_ast.Expr{Loc: record.Range.Loc, Data: &js_ast.ERequire{ImportRecordIndex: importRecordIndex}}, + }}}, + }) + + case graph.WrapESM: + // Ignore this file if it's not included in the bundle. This can happen for + // wrapped ESM files but not for wrapped CommonJS files because we allow + // tree shaking inside wrapped ESM files. + if !otherFile.IsLive { + break + } + + // Replace the statement with a call to "init()" + value := js_ast.Expr{Loc: loc, Data: &js_ast.ECall{Target: js_ast.Expr{Loc: loc, Data: &js_ast.EIdentifier{Ref: otherRepr.AST.WrapperRef}}}} + if otherRepr.Meta.IsAsyncOrHasAsyncDependency { + // This currently evaluates sibling dependencies in serial instead of in + // parallel, which is incorrect. This should be changed to store a promise + // and await all stored promises after all imports but before any code. + value.Data = &js_ast.EAwait{Value: value} + } + stmtList.insideWrapperPrefix = append(stmtList.insideWrapperPrefix, js_ast.Stmt{Loc: loc, Data: &js_ast.SExpr{Value: value}}) + } + return true } func (c *linkerContext) convertStmtsForChunk(sourceIndex uint32, stmtList *stmtList, partStmts []js_ast.Stmt) { - file := &c.files[sourceIndex] - shouldStripExports := c.options.Mode != config.ModePassThrough || !file.isEntryPoint - repr := file.repr.(*reprJS) - shouldExtractES6StmtsForCJSWrap := repr.meta.cjsWrap + file := &c.graph.Files[sourceIndex] + shouldStripExports := c.options.Mode != config.ModePassThrough || !file.IsEntryPoint() + repr := file.InputFile.Repr.(*graph.JSRepr) + shouldExtractESMStmtsForWrap := repr.Meta.Wrap != graph.WrapNone for _, stmt := range partStmts { switch s := stmt.Data.(type) { case *js_ast.SImport: // "import * as ns from 'path'" // "import {foo} from 'path'" - if c.shouldRemoveImportExportStmt(sourceIndex, stmtList, partStmts, stmt.Loc, s.NamespaceRef, s.ImportRecordIndex) { + if c.shouldRemoveImportExportStmt(sourceIndex, stmtList, stmt.Loc, s.NamespaceRef, s.ImportRecordIndex) { continue } - // Make sure these don't end up in a CommonJS wrapper - if shouldExtractES6StmtsForCJSWrap { - stmtList.es6StmtsForCJSWrap = append(stmtList.es6StmtsForCJSWrap, stmt) + // Make sure these don't end up in the wrapper closure + if shouldExtractESMStmtsForWrap { + stmtList.outsideWrapperPrefix = append(stmtList.outsideWrapperPrefix, stmt) continue } case *js_ast.SExportStar: - if s.Alias == nil { - // "export * from 'path'" - if shouldStripExports { - record := &repr.ast.ImportRecords[s.ImportRecordIndex] - - // Is this export star evaluated at run time? - if record.SourceIndex == nil && c.options.OutputFormat.KeepES6ImportExportSyntax() { - if record.CallsRunTimeExportStarFn { - // Turn this statement into "import * as ns from 'path'" - stmt.Data = &js_ast.SImport{ - NamespaceRef: s.NamespaceRef, - StarNameLoc: &stmt.Loc, - ImportRecordIndex: s.ImportRecordIndex, - } - - // Prefix this module with "__exportStar(exports, ns)" - exportStarRef := c.files[runtime.SourceIndex].repr.(*reprJS).ast.ModuleScope.Members["__exportStar"].Ref - stmtList.prefixStmts = append(stmtList.prefixStmts, js_ast.Stmt{ - Loc: stmt.Loc, - Data: &js_ast.SExpr{Value: js_ast.Expr{Loc: stmt.Loc, Data: &js_ast.ECall{ - Target: js_ast.Expr{Loc: stmt.Loc, Data: &js_ast.EIdentifier{Ref: exportStarRef}}, - Args: []js_ast.Expr{ - {Loc: stmt.Loc, Data: &js_ast.EIdentifier{Ref: repr.ast.ExportsRef}}, - {Loc: stmt.Loc, Data: &js_ast.EIdentifier{Ref: s.NamespaceRef}}, - }, - }}}, - }) - - // Make sure these don't end up in a CommonJS wrapper - if shouldExtractES6StmtsForCJSWrap { - stmtList.es6StmtsForCJSWrap = append(stmtList.es6StmtsForCJSWrap, stmt) - continue - } - } - } else { - if record.CallsRunTimeExportStarFn { - // Prefix this module with "__exportStar(exports, require(path))" - exportStarRef := c.files[runtime.SourceIndex].repr.(*reprJS).ast.ModuleScope.Members["__exportStar"].Ref - stmtList.prefixStmts = append(stmtList.prefixStmts, js_ast.Stmt{ - Loc: stmt.Loc, - Data: &js_ast.SExpr{Value: js_ast.Expr{Loc: stmt.Loc, Data: &js_ast.ECall{ - Target: js_ast.Expr{Loc: stmt.Loc, Data: &js_ast.EIdentifier{Ref: exportStarRef}}, - Args: []js_ast.Expr{ - {Loc: stmt.Loc, Data: &js_ast.EIdentifier{Ref: repr.ast.ExportsRef}}, - {Loc: record.Range.Loc, Data: &js_ast.ERequire{ImportRecordIndex: s.ImportRecordIndex}}, - }, - }}}, - }) - } - - // Remove the export star statement - continue - } - } - } else { - // "export * as ns from 'path'" - if c.shouldRemoveImportExportStmt(sourceIndex, stmtList, partStmts, stmt.Loc, s.NamespaceRef, s.ImportRecordIndex) { + // "export * as ns from 'path'" + if s.Alias != nil { + if c.shouldRemoveImportExportStmt(sourceIndex, stmtList, stmt.Loc, s.NamespaceRef, s.ImportRecordIndex) { continue } @@ -3077,16 +2981,90 @@ func (c *linkerContext) convertStmtsForChunk(sourceIndex uint32, stmtList *stmtL } } - // Make sure these don't end up in a CommonJS wrapper - if shouldExtractES6StmtsForCJSWrap { - stmtList.es6StmtsForCJSWrap = append(stmtList.es6StmtsForCJSWrap, stmt) + // Make sure these don't end up in the wrapper closure + if shouldExtractESMStmtsForWrap { + stmtList.outsideWrapperPrefix = append(stmtList.outsideWrapperPrefix, stmt) continue } + break + } + + // "export * from 'path'" + if !shouldStripExports { + break + } + record := &repr.AST.ImportRecords[s.ImportRecordIndex] + + // Is this export star evaluated at run time? + if !record.SourceIndex.IsValid() && c.options.OutputFormat.KeepES6ImportExportSyntax() { + if record.CallsRunTimeReExportFn { + // Turn this statement into "import * as ns from 'path'" + stmt.Data = &js_ast.SImport{ + NamespaceRef: s.NamespaceRef, + StarNameLoc: &stmt.Loc, + ImportRecordIndex: s.ImportRecordIndex, + } + + // Prefix this module with "__reExport(exports, ns)" + exportStarRef := c.graph.Files[runtime.SourceIndex].InputFile.Repr.(*graph.JSRepr).AST.ModuleScope.Members["__reExport"].Ref + stmtList.insideWrapperPrefix = append(stmtList.insideWrapperPrefix, js_ast.Stmt{ + Loc: stmt.Loc, + Data: &js_ast.SExpr{Value: js_ast.Expr{Loc: stmt.Loc, Data: &js_ast.ECall{ + Target: js_ast.Expr{Loc: stmt.Loc, Data: &js_ast.EIdentifier{Ref: exportStarRef}}, + Args: []js_ast.Expr{ + {Loc: stmt.Loc, Data: &js_ast.EIdentifier{Ref: repr.AST.ExportsRef}}, + {Loc: stmt.Loc, Data: &js_ast.EIdentifier{Ref: s.NamespaceRef}}, + }, + }}}, + }) + + // Make sure these don't end up in the wrapper closure + if shouldExtractESMStmtsForWrap { + stmtList.outsideWrapperPrefix = append(stmtList.outsideWrapperPrefix, stmt) + continue + } + } + } else { + if record.SourceIndex.IsValid() { + if otherRepr := c.graph.Files[record.SourceIndex.GetIndex()].InputFile.Repr.(*graph.JSRepr); otherRepr.Meta.Wrap == graph.WrapESM { + stmtList.insideWrapperPrefix = append(stmtList.insideWrapperPrefix, js_ast.Stmt{Loc: stmt.Loc, + Data: &js_ast.SExpr{Value: js_ast.Expr{Loc: stmt.Loc, Data: &js_ast.ECall{ + Target: js_ast.Expr{Loc: stmt.Loc, Data: &js_ast.EIdentifier{Ref: otherRepr.AST.WrapperRef}}}}}}) + } + } + + if record.CallsRunTimeReExportFn { + var target js_ast.E + if record.SourceIndex.IsValid() { + if otherRepr := c.graph.Files[record.SourceIndex.GetIndex()].InputFile.Repr.(*graph.JSRepr); otherRepr.AST.ExportsKind == js_ast.ExportsESMWithDynamicFallback { + // Prefix this module with "__reExport(exports, otherExports)" + target = &js_ast.EIdentifier{Ref: otherRepr.AST.ExportsRef} + } + } + if target == nil { + // Prefix this module with "__reExport(exports, require(path))" + target = &js_ast.ERequire{ImportRecordIndex: s.ImportRecordIndex} + } + exportStarRef := c.graph.Files[runtime.SourceIndex].InputFile.Repr.(*graph.JSRepr).AST.ModuleScope.Members["__reExport"].Ref + stmtList.insideWrapperPrefix = append(stmtList.insideWrapperPrefix, js_ast.Stmt{ + Loc: stmt.Loc, + Data: &js_ast.SExpr{Value: js_ast.Expr{Loc: stmt.Loc, Data: &js_ast.ECall{ + Target: js_ast.Expr{Loc: stmt.Loc, Data: &js_ast.EIdentifier{Ref: exportStarRef}}, + Args: []js_ast.Expr{ + {Loc: stmt.Loc, Data: &js_ast.EIdentifier{Ref: repr.AST.ExportsRef}}, + {Loc: record.Range.Loc, Data: target}, + }, + }}}, + }) + } + + // Remove the export star statement + continue } case *js_ast.SExportFrom: // "export {foo} from 'path'" - if c.shouldRemoveImportExportStmt(sourceIndex, stmtList, partStmts, stmt.Loc, s.NamespaceRef, s.ImportRecordIndex) { + if c.shouldRemoveImportExportStmt(sourceIndex, stmtList, stmt.Loc, s.NamespaceRef, s.ImportRecordIndex) { continue } @@ -3103,9 +3081,9 @@ func (c *linkerContext) convertStmtsForChunk(sourceIndex uint32, stmtList *stmtL } } - // Make sure these don't end up in a CommonJS wrapper - if shouldExtractES6StmtsForCJSWrap { - stmtList.es6StmtsForCJSWrap = append(stmtList.es6StmtsForCJSWrap, stmt) + // Make sure these don't end up in the wrapper closure + if shouldExtractESMStmtsForWrap { + stmtList.outsideWrapperPrefix = append(stmtList.outsideWrapperPrefix, stmt) continue } @@ -3115,9 +3093,9 @@ func (c *linkerContext) convertStmtsForChunk(sourceIndex uint32, stmtList *stmtL continue } - // Make sure these don't end up in a CommonJS wrapper - if shouldExtractES6StmtsForCJSWrap { - stmtList.es6StmtsForCJSWrap = append(stmtList.es6StmtsForCJSWrap, stmt) + // Make sure these don't end up in the wrapper closure + if shouldExtractESMStmtsForWrap { + stmtList.outsideWrapperPrefix = append(stmtList.outsideWrapperPrefix, stmt) continue } @@ -3183,7 +3161,7 @@ func (c *linkerContext) convertStmtsForChunk(sourceIndex uint32, stmtList *stmtL } } - stmtList.normalStmts = append(stmtList.normalStmts, stmt) + stmtList.insideWrapperSuffix = append(stmtList.insideWrapperSuffix, stmt) } } @@ -3229,72 +3207,74 @@ func mergeAdjacentLocalStmts(stmts []js_ast.Stmt) []js_ast.Stmt { } type stmtList struct { - // These statements come first, and can be inside the CommonJS wrapper - prefixStmts []js_ast.Stmt + // These statements come first, and can be inside the wrapper + insideWrapperPrefix []js_ast.Stmt - // These statements come last, and can be inside the CommonJS wrapper - normalStmts []js_ast.Stmt + // These statements come last, and can be inside the wrapper + insideWrapperSuffix []js_ast.Stmt - // Order doesn't matter for these statements, but they must be outside any - // CommonJS wrapper since they are top-level ES6 import/export statements - es6StmtsForCJSWrap []js_ast.Stmt - - // These statements are for an entry point and come at the end of the chunk - entryPointTail []js_ast.Stmt -} - -type lineColumnOffset struct { - lines int - columns int + outsideWrapperPrefix []js_ast.Stmt } type compileResultJS struct { js_printer.PrintResult - // If this is an entry point, this is optional code to stick on the end of - // the chunk. This is used to for example trigger the lazily-evaluated - // CommonJS wrapper for the entry point. - entryPointTail *js_printer.PrintResult - sourceIndex uint32 // This is the line and column offset since the previous JavaScript string // or the start of the file if this is the first JavaScript string. - generatedOffset lineColumnOffset + generatedOffset sourcemap.LineColumnOffset +} + +func (c *linkerContext) requireOrImportMetaForSource(sourceIndex uint32) (meta js_printer.RequireOrImportMeta) { + repr := c.graph.Files[sourceIndex].InputFile.Repr.(*graph.JSRepr) + meta.WrapperRef = repr.AST.WrapperRef + meta.IsWrapperAsync = repr.Meta.IsAsyncOrHasAsyncDependency + if repr.Meta.Wrap == graph.WrapESM { + meta.ExportsRef = repr.AST.ExportsRef + } else { + meta.ExportsRef = js_ast.InvalidRef + } + return } func (c *linkerContext) generateCodeForFileInChunkJS( r renamer.Renamer, waitGroup *sync.WaitGroup, partRange partRange, - entryBits bitSet, + entryBits helpers.BitSet, chunkAbsDir string, commonJSRef js_ast.Ref, + esmRef js_ast.Ref, toModuleRef js_ast.Ref, result *compileResultJS, dataForSourceMaps []dataForSourceMap, ) { - file := &c.files[partRange.sourceIndex] - repr := file.repr.(*reprJS) - nsExportPartIndex := repr.meta.nsExportPartIndex + file := &c.graph.Files[partRange.sourceIndex] + repr := file.InputFile.Repr.(*graph.JSRepr) + nsExportPartIndex := repr.Meta.NSExportPartIndex needsWrapper := false stmtList := stmtList{} // Make sure the generated call to "__export(exports, ...)" comes first // before anything else. if nsExportPartIndex >= partRange.partIndexBegin && nsExportPartIndex < partRange.partIndexEnd && - entryBits.equals(repr.meta.partMeta[nsExportPartIndex].entryBits) { - c.convertStmtsForChunk(partRange.sourceIndex, &stmtList, repr.ast.Parts[nsExportPartIndex].Stmts) + repr.AST.Parts[nsExportPartIndex].IsLive { + c.convertStmtsForChunk(partRange.sourceIndex, &stmtList, repr.AST.Parts[nsExportPartIndex].Stmts) // Move everything to the prefix list - stmtList.prefixStmts = append(stmtList.prefixStmts, stmtList.normalStmts...) - stmtList.normalStmts = nil + if repr.Meta.Wrap == graph.WrapESM { + stmtList.outsideWrapperPrefix = append(stmtList.outsideWrapperPrefix, stmtList.insideWrapperSuffix...) + } else { + stmtList.insideWrapperPrefix = append(stmtList.insideWrapperPrefix, stmtList.insideWrapperSuffix...) + } + stmtList.insideWrapperSuffix = nil } // Add all other parts in this chunk for partIndex := partRange.partIndexBegin; partIndex < partRange.partIndexEnd; partIndex++ { - part := repr.ast.Parts[partIndex] - if !entryBits.equals(repr.meta.partMeta[partIndex].entryBits) { + part := repr.AST.Parts[partIndex] + if !repr.AST.Parts[partIndex].IsLive { // Skip the part if it's not in this chunk continue } @@ -3304,18 +3284,12 @@ func (c *linkerContext) generateCodeForFileInChunkJS( continue } - // Mark if we hit the dummy part representing the CommonJS wrapper - if repr.meta.cjsWrapperPartIndex != nil && uint32(partIndex) == *repr.meta.cjsWrapperPartIndex { + // Mark if we hit the dummy part representing the wrapper + if uint32(partIndex) == repr.Meta.WrapperPartIndex.GetIndex() { needsWrapper = true continue } - // Emit export statements in the entry point part verbatim - if repr.meta.entryPointExportPartIndex != nil && uint32(partIndex) == *repr.meta.entryPointExportPartIndex { - stmtList.entryPointTail = append(stmtList.entryPointTail, part.Stmts...) - continue - } - c.convertStmtsForChunk(partRange.sourceIndex, &stmtList, part.Stmts) } @@ -3325,55 +3299,128 @@ func (c *linkerContext) generateCodeForFileInChunkJS( // evaluated (well, except for cyclic import scenarios). We need to preserve // these semantics even when modules imported via ES6 import statements end // up being CommonJS modules. - stmts := stmtList.normalStmts - if len(stmtList.prefixStmts) > 0 { - stmts = append(stmtList.prefixStmts, stmts...) + stmts := stmtList.insideWrapperSuffix + if len(stmtList.insideWrapperPrefix) > 0 { + stmts = append(stmtList.insideWrapperPrefix, stmts...) } if c.options.MangleSyntax { stmts = mergeAdjacentLocalStmts(stmts) } - // Optionally wrap all statements in a closure for CommonJS + // Optionally wrap all statements in a closure if needsWrapper { - // Only include the arguments that are actually used - args := []js_ast.Arg{} - if repr.ast.UsesExportsRef || repr.ast.UsesModuleRef { - args = append(args, js_ast.Arg{Binding: js_ast.Binding{Data: &js_ast.BIdentifier{Ref: repr.ast.ExportsRef}}}) - if repr.ast.UsesModuleRef { - args = append(args, js_ast.Arg{Binding: js_ast.Binding{Data: &js_ast.BIdentifier{Ref: repr.ast.ModuleRef}}}) + switch repr.Meta.Wrap { + case graph.WrapCJS: + // Only include the arguments that are actually used + args := []js_ast.Arg{} + if repr.AST.UsesExportsRef || repr.AST.UsesModuleRef { + args = append(args, js_ast.Arg{Binding: js_ast.Binding{Data: &js_ast.BIdentifier{Ref: repr.AST.ExportsRef}}}) + if repr.AST.UsesModuleRef { + args = append(args, js_ast.Arg{Binding: js_ast.Binding{Data: &js_ast.BIdentifier{Ref: repr.AST.ModuleRef}}}) + } } - } - // "__commonJS((exports, module) => { ... })" - var value js_ast.Expr - if c.options.UnsupportedJSFeatures.Has(compat.Arrow) { - value = js_ast.Expr{Data: &js_ast.ECall{ - Target: js_ast.Expr{Data: &js_ast.EIdentifier{Ref: commonJSRef}}, - Args: []js_ast.Expr{{Data: &js_ast.EFunction{Fn: js_ast.Fn{Args: args, Body: js_ast.FnBody{Stmts: stmts}}}}}, - }} - } else { - value = js_ast.Expr{Data: &js_ast.ECall{ - Target: js_ast.Expr{Data: &js_ast.EIdentifier{Ref: commonJSRef}}, - Args: []js_ast.Expr{{Data: &js_ast.EArrow{Args: args, Body: js_ast.FnBody{Stmts: stmts}}}}, - }} - } + // "__commonJS((exports, module) => { ... })" + var value js_ast.Expr + if c.options.UnsupportedJSFeatures.Has(compat.Arrow) { + value = js_ast.Expr{Data: &js_ast.ECall{ + Target: js_ast.Expr{Data: &js_ast.EIdentifier{Ref: commonJSRef}}, + Args: []js_ast.Expr{{Data: &js_ast.EFunction{Fn: js_ast.Fn{Args: args, Body: js_ast.FnBody{Stmts: stmts}}}}}, + }} + } else { + value = js_ast.Expr{Data: &js_ast.ECall{ + Target: js_ast.Expr{Data: &js_ast.EIdentifier{Ref: commonJSRef}}, + Args: []js_ast.Expr{{Data: &js_ast.EArrow{Args: args, Body: js_ast.FnBody{Stmts: stmts}}}}, + }} + } - // "var require_foo = __commonJS((exports, module) => { ... });" - stmts = append(stmtList.es6StmtsForCJSWrap, js_ast.Stmt{Data: &js_ast.SLocal{ - Decls: []js_ast.Decl{{ - Binding: js_ast.Binding{Data: &js_ast.BIdentifier{Ref: repr.ast.WrapperRef}}, - Value: &value, - }}, - }}) + // "var require_foo = __commonJS((exports, module) => { ... });" + stmts = append(stmtList.outsideWrapperPrefix, js_ast.Stmt{Data: &js_ast.SLocal{ + Decls: []js_ast.Decl{{ + Binding: js_ast.Binding{Data: &js_ast.BIdentifier{Ref: repr.AST.WrapperRef}}, + Value: &value, + }}, + }}) + + case graph.WrapESM: + // The wrapper only needs to be "async" if there is a transitive async + // dependency. For correctness, we must not use "async" if the module + // isn't async because then calling "require()" on that module would + // swallow any exceptions thrown during module initialization. + isAsync := repr.Meta.IsAsyncOrHasAsyncDependency + + // Hoist all top-level "var" and "function" declarations out of the closure + var decls []js_ast.Decl + end := 0 + for _, stmt := range stmts { + switch s := stmt.Data.(type) { + case *js_ast.SLocal: + // Convert the declarations to assignments + wrapIdentifier := func(loc logger.Loc, ref js_ast.Ref) js_ast.Expr { + decls = append(decls, js_ast.Decl{Binding: js_ast.Binding{Loc: loc, Data: &js_ast.BIdentifier{Ref: ref}}}) + return js_ast.Expr{Loc: loc, Data: &js_ast.EIdentifier{Ref: ref}} + } + var value js_ast.Expr + for _, decl := range s.Decls { + binding := js_ast.ConvertBindingToExpr(decl.Binding, wrapIdentifier) + if decl.Value != nil { + value = js_ast.JoinWithComma(value, js_ast.Assign(binding, *decl.Value)) + } + } + if value.Data == nil { + continue + } + stmt = js_ast.Stmt{Loc: stmt.Loc, Data: &js_ast.SExpr{Value: value}} + + case *js_ast.SFunction: + stmtList.outsideWrapperPrefix = append(stmtList.outsideWrapperPrefix, stmt) + continue + } + + stmts[end] = stmt + end++ + } + stmts = stmts[:end] + + // "__esm(() => { ... })" + var value js_ast.Expr + if c.options.UnsupportedJSFeatures.Has(compat.Arrow) { + value = js_ast.Expr{Data: &js_ast.ECall{ + Target: js_ast.Expr{Data: &js_ast.EIdentifier{Ref: esmRef}}, + Args: []js_ast.Expr{{Data: &js_ast.EFunction{Fn: js_ast.Fn{Body: js_ast.FnBody{Stmts: stmts}, IsAsync: isAsync}}}}, + }} + } else { + value = js_ast.Expr{Data: &js_ast.ECall{ + Target: js_ast.Expr{Data: &js_ast.EIdentifier{Ref: esmRef}}, + Args: []js_ast.Expr{{Data: &js_ast.EArrow{Body: js_ast.FnBody{Stmts: stmts}, IsAsync: isAsync}}}, + }} + } + + // "var foo, bar;" + if !c.options.MangleSyntax && len(decls) > 0 { + stmtList.outsideWrapperPrefix = append(stmtList.outsideWrapperPrefix, js_ast.Stmt{Data: &js_ast.SLocal{ + Decls: decls, + }}) + decls = nil + } + + // "var init_foo = __esm(() => { ... });" + stmts = append(stmtList.outsideWrapperPrefix, js_ast.Stmt{Data: &js_ast.SLocal{ + Decls: append(decls, js_ast.Decl{ + Binding: js_ast.Binding{Data: &js_ast.BIdentifier{Ref: repr.AST.WrapperRef}}, + Value: &value, + }), + }}) + } } // Only generate a source map if needed var addSourceMappings bool var inputSourceMap *sourcemap.SourceMap var lineOffsetTables []js_printer.LineOffsetTable - if file.loader.CanHaveSourceMap() && c.options.SourceMap != config.SourceMapNone { + if file.InputFile.Loader.CanHaveSourceMap() && c.options.SourceMap != config.SourceMapNone { addSourceMappings = true - inputSourceMap = file.sourceMap + inputSourceMap = file.InputFile.InputSourceMap lineOffsetTables = dataForSourceMaps[partRange.sourceIndex].lineOffsetTables } @@ -3385,49 +3432,334 @@ func (c *linkerContext) generateCodeForFileInChunkJS( // Convert the AST to JavaScript code printOptions := js_printer.Options{ - Indent: indent, - OutputFormat: c.options.OutputFormat, - RemoveWhitespace: c.options.RemoveWhitespace, - MangleSyntax: c.options.MangleSyntax, - ASCIIOnly: c.options.ASCIIOnly, - ToModuleRef: toModuleRef, - ExtractComments: c.options.Mode == config.ModeBundle && c.options.RemoveWhitespace, - UnsupportedFeatures: c.options.UnsupportedJSFeatures, - AddSourceMappings: addSourceMappings, - InputSourceMap: inputSourceMap, - LineOffsetTables: lineOffsetTables, - WrapperRefForSource: func(sourceIndex uint32) js_ast.Ref { - return c.files[sourceIndex].repr.(*reprJS).ast.WrapperRef - }, + Indent: indent, + OutputFormat: c.options.OutputFormat, + RemoveWhitespace: c.options.RemoveWhitespace, + MangleSyntax: c.options.MangleSyntax, + ASCIIOnly: c.options.ASCIIOnly, + ToModuleRef: toModuleRef, + ExtractComments: c.options.Mode == config.ModeBundle && c.options.RemoveWhitespace, + UnsupportedFeatures: c.options.UnsupportedJSFeatures, + AddSourceMappings: addSourceMappings, + InputSourceMap: inputSourceMap, + LineOffsetTables: lineOffsetTables, + RequireOrImportMetaForSource: c.requireOrImportMetaForSource, } - tree := repr.ast + tree := repr.AST tree.Directive = "" // This is handled elsewhere tree.Parts = []js_ast.Part{{Stmts: stmts}} *result = compileResultJS{ - PrintResult: js_printer.Print(tree, c.symbols, r, printOptions), + PrintResult: js_printer.Print(tree, c.graph.Symbols, r, printOptions), sourceIndex: partRange.sourceIndex, } - // Write this separately as the entry point tail so it can be split off - // from the main entry point code. This is sometimes required to deal with - // CommonJS import cycles. - if len(stmtList.entryPointTail) > 0 { - tree := repr.ast - tree.Parts = []js_ast.Part{{Stmts: stmtList.entryPointTail}} - entryPointTail := js_printer.Print(tree, c.symbols, r, printOptions) - result.entryPointTail = &entryPointTail + waitGroup.Done() +} + +func (c *linkerContext) generateEntryPointTailJS( + r renamer.Renamer, + toModuleRef js_ast.Ref, + sourceIndex uint32, +) (result compileResultJS) { + file := &c.graph.Files[sourceIndex] + repr := file.InputFile.Repr.(*graph.JSRepr) + var stmts []js_ast.Stmt + + switch c.options.OutputFormat { + case config.FormatPreserve: + if repr.Meta.Wrap != graph.WrapNone { + // "require_foo();" + // "init_foo();" + stmts = append(stmts, js_ast.Stmt{Data: &js_ast.SExpr{Value: js_ast.Expr{Data: &js_ast.ECall{ + Target: js_ast.Expr{Data: &js_ast.EIdentifier{Ref: repr.AST.WrapperRef}}, + }}}}) + } + + case config.FormatIIFE: + if repr.Meta.Wrap == graph.WrapCJS { + if len(c.options.GlobalName) > 0 { + // "return require_foo();" + stmts = append(stmts, js_ast.Stmt{Data: &js_ast.SReturn{Value: &js_ast.Expr{Data: &js_ast.ECall{ + Target: js_ast.Expr{Data: &js_ast.EIdentifier{Ref: repr.AST.WrapperRef}}, + }}}}) + } else { + // "require_foo();" + stmts = append(stmts, js_ast.Stmt{Data: &js_ast.SExpr{Value: js_ast.Expr{Data: &js_ast.ECall{ + Target: js_ast.Expr{Data: &js_ast.EIdentifier{Ref: repr.AST.WrapperRef}}, + }}}}) + } + } else { + if repr.Meta.Wrap == graph.WrapESM { + // "init_foo();" + stmts = append(stmts, js_ast.Stmt{Data: &js_ast.SExpr{Value: js_ast.Expr{Data: &js_ast.ECall{ + Target: js_ast.Expr{Data: &js_ast.EIdentifier{Ref: repr.AST.WrapperRef}}, + }}}}) + } + if repr.Meta.ForceIncludeExportsForEntryPoint && len(c.options.GlobalName) > 0 { + // "return exports;" + stmts = append(stmts, js_ast.Stmt{Data: &js_ast.SReturn{ + Value: &js_ast.Expr{Data: &js_ast.EIdentifier{Ref: repr.AST.ExportsRef}}, + }}) + } + } + + case config.FormatCommonJS: + if repr.Meta.Wrap == graph.WrapCJS { + // "module.exports = require_foo();" + stmts = append(stmts, js_ast.AssignStmt( + js_ast.Expr{Data: &js_ast.EDot{ + Target: js_ast.Expr{Data: &js_ast.EIdentifier{Ref: c.unboundModuleRef}}, + Name: "exports", + }}, + js_ast.Expr{Data: &js_ast.ECall{ + Target: js_ast.Expr{Data: &js_ast.EIdentifier{Ref: repr.AST.WrapperRef}}, + }}, + )) + } else if repr.Meta.Wrap == graph.WrapESM { + // "init_foo();" + stmts = append(stmts, js_ast.Stmt{Data: &js_ast.SExpr{Value: js_ast.Expr{Data: &js_ast.ECall{ + Target: js_ast.Expr{Data: &js_ast.EIdentifier{Ref: repr.AST.WrapperRef}}, + }}}}) + } + + // If we are generating CommonJS for node, encode the known export names in + // a form that node can understand them. This relies on the specific behavior + // of this parser, which the node project uses to detect named exports in + // CommonJS files: https://github.com/guybedford/cjs-module-lexer. Think of + // this code as an annotation for that parser. + if c.options.Platform == config.PlatformNode && len(repr.Meta.SortedAndFilteredExportAliases) > 0 { + // Add a comment since otherwise people will surely wonder what this is. + // This annotation means you can do this and have it work: + // + // import { name } from './file-from-esbuild.cjs' + // + // when "file-from-esbuild.cjs" looks like this: + // + // __export(exports, { name: () => name }); + // 0 && (module.exports = {name}); + // + // The maintainer of "cjs-module-lexer" is receptive to adding esbuild- + // friendly patterns to this library. However, this library has already + // shipped in node and using existing patterns instead of defining new + // patterns is maximally compatible. + // + // An alternative to doing this could be to use "Object.defineProperties" + // instead of "__export" but support for that would need to be added to + // "cjs-module-lexer" and then we would need to be ok with not supporting + // older versions of node that don't have that newly-added support. + if !c.options.RemoveWhitespace { + stmts = append(stmts, + js_ast.Stmt{Data: &js_ast.SComment{Text: `// Annotate the CommonJS export names for ESM import in node:`}}, + ) + } + + // "{a, b, if: null}" + var moduleExports []js_ast.Property + for _, export := range repr.Meta.SortedAndFilteredExportAliases { + if export == "default" { + // In node the default export is always "module.exports" regardless of + // what the annotation says. So don't bother generating "default". + continue + } + + // "{if: null}" + var value *js_ast.Expr + if _, ok := js_lexer.Keywords[export]; ok { + // Make sure keywords don't cause a syntax error. This has to map to + // "null" instead of something shorter like "0" because the library + // "cjs-module-lexer" only supports identifiers in this position, and + // it thinks "null" is an identifier. + value = &js_ast.Expr{Data: &js_ast.ENull{}} + } + + moduleExports = append(moduleExports, js_ast.Property{ + Key: js_ast.Expr{Data: &js_ast.EString{Value: js_lexer.StringToUTF16(export)}}, + Value: value, + }) + } + + // "0 && (module.exports = {a, b, if: null});" + expr := js_ast.Expr{Data: &js_ast.EBinary{ + Op: js_ast.BinOpLogicalAnd, + Left: js_ast.Expr{Data: &js_ast.ENumber{Value: 0}}, + Right: js_ast.Assign( + js_ast.Expr{Data: &js_ast.EDot{ + Target: js_ast.Expr{Data: &js_ast.EIdentifier{Ref: repr.AST.ModuleRef}}, + Name: "exports", + }}, + js_ast.Expr{Data: &js_ast.EObject{Properties: moduleExports}}, + ), + }} + + stmts = append(stmts, js_ast.Stmt{Data: &js_ast.SExpr{Value: expr}}) + } + + case config.FormatESModule: + if repr.Meta.Wrap == graph.WrapCJS { + // "export default require_foo();" + stmts = append(stmts, js_ast.Stmt{ + Data: &js_ast.SExportDefault{Value: js_ast.ExprOrStmt{Expr: &js_ast.Expr{ + Data: &js_ast.ECall{Target: js_ast.Expr{ + Data: &js_ast.EIdentifier{Ref: repr.AST.WrapperRef}}}}}}}) + } else { + if repr.Meta.Wrap == graph.WrapESM { + if repr.Meta.IsAsyncOrHasAsyncDependency { + // "await init_foo();" + stmts = append(stmts, js_ast.Stmt{ + Data: &js_ast.SExpr{Value: js_ast.Expr{ + Data: &js_ast.EAwait{Value: js_ast.Expr{ + Data: &js_ast.ECall{Target: js_ast.Expr{ + Data: &js_ast.EIdentifier{Ref: repr.AST.WrapperRef}}}}}}}}) + } else { + // "init_foo();" + stmts = append(stmts, js_ast.Stmt{ + Data: &js_ast.SExpr{ + Value: js_ast.Expr{Data: &js_ast.ECall{Target: js_ast.Expr{ + Data: &js_ast.EIdentifier{Ref: repr.AST.WrapperRef}}}}}}) + } + } + + if len(repr.Meta.SortedAndFilteredExportAliases) > 0 { + // If the output format is ES6 modules and we're an entry point, generate an + // ES6 export statement containing all exports. Except don't do that if this + // entry point is a CommonJS-style module, since that would generate an ES6 + // export statement that's not top-level. Instead, we will export the CommonJS + // exports as a default export later on. + var items []js_ast.ClauseItem + + for i, alias := range repr.Meta.SortedAndFilteredExportAliases { + export := repr.Meta.ResolvedExports[alias] + + // If this is an export of an import, reference the symbol that the import + // was eventually resolved to. We need to do this because imports have + // already been resolved by this point, so we can't generate a new import + // and have that be resolved later. + if importData, ok := c.graph.Files[export.SourceIndex].InputFile.Repr.(*graph.JSRepr).Meta.ImportsToBind[export.Ref]; ok { + export.Ref = importData.Ref + export.SourceIndex = importData.SourceIndex + } + + // Exports of imports need EImportIdentifier in case they need to be re- + // written to a property access later on + if c.graph.Symbols.Get(export.Ref).NamespaceAlias != nil { + // Create both a local variable and an export clause for that variable. + // The local variable is initialized with the initial value of the + // export. This isn't fully correct because it's a "dead" binding and + // doesn't update with the "live" value as it changes. But ES6 modules + // don't have any syntax for bare named getter functions so this is the + // best we can do. + // + // These input files: + // + // // entry_point.js + // export {foo} from './cjs-format.js' + // + // // cjs-format.js + // Object.defineProperty(exports, 'foo', { + // enumerable: true, + // get: () => Math.random(), + // }) + // + // Become this output file: + // + // // cjs-format.js + // var require_cjs_format = __commonJS((exports) => { + // Object.defineProperty(exports, "foo", { + // enumerable: true, + // get: () => Math.random() + // }); + // }); + // + // // entry_point.js + // var cjs_format = __toModule(require_cjs_format()); + // var export_foo = cjs_format.foo; + // export { + // export_foo as foo + // }; + // + tempRef := repr.Meta.CJSExportCopies[i] + stmts = append(stmts, js_ast.Stmt{Data: &js_ast.SLocal{ + Decls: []js_ast.Decl{{ + Binding: js_ast.Binding{Data: &js_ast.BIdentifier{Ref: tempRef}}, + Value: &js_ast.Expr{Data: &js_ast.EImportIdentifier{Ref: export.Ref}}, + }}, + }}) + items = append(items, js_ast.ClauseItem{ + Name: js_ast.LocRef{Ref: tempRef}, + Alias: alias, + }) + } else { + // Local identifiers can be exported using an export clause. This is done + // this way instead of leaving the "export" keyword on the local declaration + // itself both because it lets the local identifier be minified and because + // it works transparently for re-exports across files. + // + // These input files: + // + // // entry_point.js + // export * from './esm-format.js' + // + // // esm-format.js + // export let foo = 123 + // + // Become this output file: + // + // // esm-format.js + // let foo = 123; + // + // // entry_point.js + // export { + // foo + // }; + // + items = append(items, js_ast.ClauseItem{ + Name: js_ast.LocRef{Ref: export.Ref}, + Alias: alias, + }) + } + } + + stmts = append(stmts, js_ast.Stmt{Data: &js_ast.SExportClause{Items: items}}) + } + } } - waitGroup.Done() + if len(stmts) == 0 { + return + } + + tree := repr.AST + tree.Parts = []js_ast.Part{{Stmts: stmts}} + + // Indent the file if everything is wrapped in an IIFE + indent := 0 + if c.options.OutputFormat == config.FormatIIFE { + indent++ + } + + // Convert the AST to JavaScript code + printOptions := js_printer.Options{ + Indent: indent, + OutputFormat: c.options.OutputFormat, + RemoveWhitespace: c.options.RemoveWhitespace, + MangleSyntax: c.options.MangleSyntax, + ASCIIOnly: c.options.ASCIIOnly, + ToModuleRef: toModuleRef, + ExtractComments: c.options.Mode == config.ModeBundle && c.options.RemoveWhitespace, + UnsupportedFeatures: c.options.UnsupportedJSFeatures, + RequireOrImportMetaForSource: c.requireOrImportMetaForSource, + } + result.PrintResult = js_printer.Print(tree, c.graph.Symbols, r, printOptions) + return } func (c *linkerContext) renameSymbolsInChunk(chunk *chunkInfo, filesInOrder []uint32) renamer.Renamer { // Determine the reserved names (e.g. can't generate the name "if") moduleScopes := make([]*js_ast.Scope, len(filesInOrder)) for i, sourceIndex := range filesInOrder { - moduleScopes[i] = c.files[sourceIndex].repr.(*reprJS).ast.ModuleScope + moduleScopes[i] = c.graph.Files[sourceIndex].InputFile.Repr.(*graph.JSRepr).AST.ModuleScope } - reservedNames := renamer.ComputeReservedNames(moduleScopes, c.symbols) + reservedNames := renamer.ComputeReservedNames(moduleScopes, c.graph.Symbols) // These are used to implement bundling, and need to be free for use if c.options.Mode != config.ModePassThrough { @@ -3440,32 +3772,32 @@ func (c *linkerContext) renameSymbolsInChunk(chunk *chunkInfo, filesInOrder []ui // Determine the first top-level slot (i.e. not in a nested scope) var firstTopLevelSlots js_ast.SlotCounts for _, sourceIndex := range filesInOrder { - firstTopLevelSlots.UnionMax(c.files[sourceIndex].repr.(*reprJS).ast.NestedScopeSlotCounts) + firstTopLevelSlots.UnionMax(c.graph.Files[sourceIndex].InputFile.Repr.(*graph.JSRepr).AST.NestedScopeSlotCounts) } - r := renamer.NewMinifyRenamer(c.symbols, firstTopLevelSlots, reservedNames) + r := renamer.NewMinifyRenamer(c.graph.Symbols, firstTopLevelSlots, reservedNames) // Accumulate symbol usage counts into their slots freq := js_ast.CharFreq{} for _, sourceIndex := range filesInOrder { - repr := c.files[sourceIndex].repr.(*reprJS) - if repr.ast.CharFreq != nil { - freq.Include(repr.ast.CharFreq) + repr := c.graph.Files[sourceIndex].InputFile.Repr.(*graph.JSRepr) + if repr.AST.CharFreq != nil { + freq.Include(repr.AST.CharFreq) } - if repr.ast.UsesExportsRef { - r.AccumulateSymbolCount(repr.ast.ExportsRef, 1) + if repr.AST.UsesExportsRef { + r.AccumulateSymbolCount(repr.AST.ExportsRef, 1) } - if repr.ast.UsesModuleRef { - r.AccumulateSymbolCount(repr.ast.ModuleRef, 1) + if repr.AST.UsesModuleRef { + r.AccumulateSymbolCount(repr.AST.ModuleRef, 1) } - for partIndex, part := range repr.ast.Parts { - if !chunk.entryBits.equals(repr.meta.partMeta[partIndex].entryBits) { + for partIndex, part := range repr.AST.Parts { + if !repr.AST.Parts[partIndex].IsLive { // Skip the part if it's not in this chunk continue } // Accumulate symbol use counts - r.AccumulateSymbolUseCounts(part.SymbolUses, c.stableSourceIndices) + r.AccumulateSymbolUseCounts(part.SymbolUses, c.graph.StableSourceIndices) // Make sure to also count the declaration in addition to the uses for _, declared := range part.DeclaredSymbols { @@ -3486,16 +3818,16 @@ func (c *linkerContext) renameSymbolsInChunk(chunk *chunkInfo, filesInOrder []ui } // When we're not minifying, just append numbers to symbol names to avoid collisions - r := renamer.NewNumberRenamer(c.symbols, reservedNames) + r := renamer.NewNumberRenamer(c.graph.Symbols, reservedNames) nestedScopes := make(map[uint32][]*js_ast.Scope) // Make sure imports get a chance to be renamed var sorted renamer.StableRefArray - for _, imports := range chunk.repr.(*chunkReprJS).importsFromOtherChunks { + for _, imports := range chunk.chunkRepr.(*chunkReprJS).importsFromOtherChunks { for _, item := range imports { sorted = append(sorted, renamer.StableRef{ - StableOuterIndex: c.stableSourceIndices[item.ref.OuterIndex], - Ref: item.ref, + StableSourceIndex: c.graph.StableSourceIndices[item.ref.SourceIndex], + Ref: item.ref, }) } } @@ -3505,14 +3837,14 @@ func (c *linkerContext) renameSymbolsInChunk(chunk *chunkInfo, filesInOrder []ui } for _, sourceIndex := range filesInOrder { - repr := c.files[sourceIndex].repr.(*reprJS) + repr := c.graph.Files[sourceIndex].InputFile.Repr.(*graph.JSRepr) var scopes []*js_ast.Scope // Modules wrapped in a CommonJS closure look like this: // // // foo.js // var require_foo = __commonJS((exports, module) => { - // ... + // exports.foo = 123; // }); // // The symbol "require_foo" is stored in "file.ast.WrapperRef". We want @@ -3522,19 +3854,19 @@ func (c *linkerContext) renameSymbolsInChunk(chunk *chunkInfo, filesInOrder []ui // not completely accurate (e.g. we don't set the parent of the module // scope to this new top-level scope) but it's good enough for the // renaming code. - if repr.meta.cjsWrap { - r.AddTopLevelSymbol(repr.ast.WrapperRef) + if repr.Meta.Wrap == graph.WrapCJS { + r.AddTopLevelSymbol(repr.AST.WrapperRef) // External import statements will be hoisted outside of the CommonJS // wrapper if the output format supports import statements. We need to // add those symbols to the top-level scope to avoid causing name // collisions. This code special-cases only those symbols. if c.options.OutputFormat.KeepES6ImportExportSyntax() { - for _, part := range repr.ast.Parts { + for _, part := range repr.AST.Parts { for _, stmt := range part.Stmts { switch s := stmt.Data.(type) { case *js_ast.SImport: - if repr.ast.ImportRecords[s.ImportRecordIndex].SourceIndex == nil { + if !repr.AST.ImportRecords[s.ImportRecordIndex].SourceIndex.IsValid() { r.AddTopLevelSymbol(s.NamespaceRef) if s.DefaultName != nil { r.AddTopLevelSymbol(s.DefaultName.Ref) @@ -3547,12 +3879,12 @@ func (c *linkerContext) renameSymbolsInChunk(chunk *chunkInfo, filesInOrder []ui } case *js_ast.SExportStar: - if repr.ast.ImportRecords[s.ImportRecordIndex].SourceIndex == nil { + if !repr.AST.ImportRecords[s.ImportRecordIndex].SourceIndex.IsValid() { r.AddTopLevelSymbol(s.NamespaceRef) } case *js_ast.SExportFrom: - if repr.ast.ImportRecords[s.ImportRecordIndex].SourceIndex == nil { + if !repr.AST.ImportRecords[s.ImportRecordIndex].SourceIndex.IsValid() { r.AddTopLevelSymbol(s.NamespaceRef) for _, item := range s.Items { r.AddTopLevelSymbol(item.Name.Ref) @@ -3563,13 +3895,31 @@ func (c *linkerContext) renameSymbolsInChunk(chunk *chunkInfo, filesInOrder []ui } } - nestedScopes[sourceIndex] = []*js_ast.Scope{repr.ast.ModuleScope} + nestedScopes[sourceIndex] = []*js_ast.Scope{repr.AST.ModuleScope} continue } + // Modules wrapped in an ESM closure look like this: + // + // // foo.js + // var foo, foo_exports = {}; + // __exports(foo_exports, { + // foo: () => foo + // }); + // let init_foo = __esm(() => { + // foo = 123; + // }); + // + // The symbol "init_foo" is stored in "file.ast.WrapperRef". We need to + // minify everything inside the closure without introducing a new scope + // since all top-level variables will be hoisted outside of the closure. + if repr.Meta.Wrap == graph.WrapESM { + r.AddTopLevelSymbol(repr.AST.WrapperRef) + } + // Rename each top-level symbol declaration in this chunk - for partIndex, part := range repr.ast.Parts { - if chunk.entryBits.equals(repr.meta.partMeta[partIndex].entryBits) { + for partIndex, part := range repr.AST.Parts { + if repr.AST.Parts[partIndex].IsLive { for _, declared := range part.DeclaredSymbols { if declared.IsTopLevel { r.AddTopLevelSymbol(declared.Ref) @@ -3589,16 +3939,26 @@ func (c *linkerContext) renameSymbolsInChunk(chunk *chunkInfo, filesInOrder []ui return r } -func (repr *chunkReprJS) generate(c *linkerContext, chunk *chunkInfo) func(generateContinue) []OutputFile { - var results []OutputFile +func (c *linkerContext) generateChunkJS(chunks []chunkInfo, chunkIndex int, chunkWaitGroup *sync.WaitGroup) { + chunk := &chunks[chunkIndex] + chunkRepr := chunk.chunkRepr.(*chunkReprJS) compileResults := make([]compileResultJS, 0, len(chunk.partsInChunkInOrder)) - runtimeMembers := c.files[runtime.SourceIndex].repr.(*reprJS).ast.ModuleScope.Members - commonJSRef := js_ast.FollowSymbols(c.symbols, runtimeMembers["__commonJS"].Ref) - toModuleRef := js_ast.FollowSymbols(c.symbols, runtimeMembers["__toModule"].Ref) + runtimeMembers := c.graph.Files[runtime.SourceIndex].InputFile.Repr.(*graph.JSRepr).AST.ModuleScope.Members + commonJSRef := js_ast.FollowSymbols(c.graph.Symbols, runtimeMembers["__commonJS"].Ref) + esmRef := js_ast.FollowSymbols(c.graph.Symbols, runtimeMembers["__esm"].Ref) + toModuleRef := js_ast.FollowSymbols(c.graph.Symbols, runtimeMembers["__toModule"].Ref) r := c.renameSymbolsInChunk(chunk, chunk.filesInChunkInOrder) - chunkAbsDir := c.fs.Join(c.options.AbsOutputDir, chunk.relDir) dataForSourceMaps := c.dataForSourceMaps() + // Note: This contains placeholders instead of what the placeholders are + // substituted with. That should be fine though because this should only + // ever be used for figuring out how many "../" to add to a relative path + // from a chunk whose final path hasn't been calculated yet to a chunk + // whose final path has already been calculated. That and placeholders are + // never substituted with something containing a "/" so substitution should + // never change the "../" count. + chunkAbsDir := c.fs.Dir(c.fs.Join(c.options.AbsOutputDir, config.TemplateToString(chunk.finalTemplate))) + // Generate JavaScript for each file in parallel waitGroup := sync.WaitGroup{} for _, partRange := range chunk.partsInChunkInOrder { @@ -3618,393 +3978,339 @@ func (repr *chunkReprJS) generate(c *linkerContext, chunk *chunkInfo) func(gener chunk.entryBits, chunkAbsDir, commonJSRef, + esmRef, toModuleRef, compileResult, dataForSourceMaps, ) } - // Each file may optionally contain additional files to be copied to the - // output directory. This is used by the "file" loader. - for _, sourceIndex := range chunk.filesInChunkInOrder { - results = append(results, c.files[sourceIndex].additionalFiles...) + // Also generate the cross-chunk binding code + var crossChunkPrefix []byte + var crossChunkSuffix []byte + { + // Indent the file if everything is wrapped in an IIFE + indent := 0 + if c.options.OutputFormat == config.FormatIIFE { + indent++ + } + printOptions := js_printer.Options{ + Indent: indent, + OutputFormat: c.options.OutputFormat, + RemoveWhitespace: c.options.RemoveWhitespace, + MangleSyntax: c.options.MangleSyntax, + } + crossChunkImportRecords := make([]ast.ImportRecord, len(chunk.crossChunkImports)) + for i, chunkImport := range chunk.crossChunkImports { + crossChunkImportRecords[i] = ast.ImportRecord{ + Kind: chunkImport.importKind, + Path: logger.Path{Text: chunks[chunkImport.chunkIndex].uniqueKey}, + } + } + crossChunkPrefix = js_printer.Print(js_ast.AST{ + ImportRecords: crossChunkImportRecords, + Parts: []js_ast.Part{{Stmts: chunkRepr.crossChunkPrefixStmts}}, + }, c.graph.Symbols, r, printOptions).JS + crossChunkSuffix = js_printer.Print(js_ast.AST{ + Parts: []js_ast.Part{{Stmts: chunkRepr.crossChunkSuffixStmts}}, + }, c.graph.Symbols, r, printOptions).JS } - // Wait for cross-chunk import records before continuing - return func(continueData generateContinue) []OutputFile { - // Also generate the cross-chunk binding code - var crossChunkPrefix []byte - var crossChunkSuffix []byte - { - // Indent the file if everything is wrapped in an IIFE - indent := 0 - if c.options.OutputFormat == config.FormatIIFE { - indent++ - } - printOptions := js_printer.Options{ - Indent: indent, - OutputFormat: c.options.OutputFormat, - RemoveWhitespace: c.options.RemoveWhitespace, - MangleSyntax: c.options.MangleSyntax, - } - crossChunkPrefix = js_printer.Print(js_ast.AST{ - ImportRecords: continueData.crossChunkImportRecords, - Parts: []js_ast.Part{{Stmts: repr.crossChunkPrefixStmts}}, - }, c.symbols, r, printOptions).JS - crossChunkSuffix = js_printer.Print(js_ast.AST{ - Parts: []js_ast.Part{{Stmts: repr.crossChunkSuffixStmts}}, - }, c.symbols, r, printOptions).JS - } + // Generate the exports for the entry point, if there are any + var entryPointTail compileResultJS + if chunk.isEntryPoint { + entryPointTail = c.generateEntryPointTailJS( + r, + toModuleRef, + chunk.sourceIndex, + ) + } - waitGroup.Wait() + waitGroup.Wait() - j := js_printer.Joiner{} - prevOffset := lineColumnOffset{} + j := helpers.Joiner{} + prevOffset := sourcemap.LineColumnOffset{} - // Optionally strip whitespace - indent := "" - space := " " - newline := "\n" - if c.options.RemoveWhitespace { - space = "" - newline = "" - } - newlineBeforeComment := false - isExecutable := false + // Optionally strip whitespace + indent := "" + space := " " + newline := "\n" + if c.options.RemoveWhitespace { + space = "" + newline = "" + } + newlineBeforeComment := false + isExecutable := false - if chunk.isEntryPoint { - repr := c.files[chunk.sourceIndex].repr.(*reprJS) + if chunk.isEntryPoint { + repr := c.graph.Files[chunk.sourceIndex].InputFile.Repr.(*graph.JSRepr) - // Start with the hashbang if there is one - if repr.ast.Hashbang != "" { - hashbang := repr.ast.Hashbang + "\n" - prevOffset.advanceString(hashbang) - j.AddString(hashbang) - newlineBeforeComment = true - isExecutable = true - } - - // Add the top-level directive if present - if repr.ast.Directive != "" { - quoted := string(js_printer.QuoteForJSON(repr.ast.Directive, c.options.ASCIIOnly)) + ";" + newline - prevOffset.advanceString(quoted) - j.AddString(quoted) - newlineBeforeComment = true - } - } - - if len(c.options.Banner) > 0 { - prevOffset.advanceString(c.options.Banner) - prevOffset.advanceString("\n") - j.AddString(c.options.Banner) - j.AddString("\n") - } - - // Optionally wrap with an IIFE - if c.options.OutputFormat == config.FormatIIFE { - var text string - indent = " " - if len(c.options.GlobalName) > 0 { - text = c.generateGlobalNamePrefix() - } - if c.options.UnsupportedJSFeatures.Has(compat.Arrow) { - text += "(function()" + space + "{" + newline - } else { - text += "(()" + space + "=>" + space + "{" + newline - } - prevOffset.advanceString(text) - j.AddString(text) - newlineBeforeComment = false - } - - // Put the cross-chunk prefix inside the IIFE - if len(crossChunkPrefix) > 0 { + // Start with the hashbang if there is one + if repr.AST.Hashbang != "" { + hashbang := repr.AST.Hashbang + "\n" + prevOffset.AdvanceString(hashbang) + j.AddString(hashbang) newlineBeforeComment = true - prevOffset.advanceBytes(crossChunkPrefix) - j.AddBytes(crossChunkPrefix) + isExecutable = true } - // Start the metadata - jMeta := js_printer.Joiner{} - if c.options.AbsMetadataFile != "" { - // Print imports - isFirstMeta := true - jMeta.AddString("{\n \"imports\": [") - for i, importAbsPath := range continueData.crossChunkAbsPaths { - if isFirstMeta { - isFirstMeta = false - } else { - jMeta.AddString(",") - } - jMeta.AddString(fmt.Sprintf("\n {\n \"path\": %s,\n \"kind\": %s\n }", - js_printer.QuoteForJSON(c.res.PrettyPath(logger.Path{Text: importAbsPath, Namespace: "file"}), c.options.ASCIIOnly), - js_printer.QuoteForJSON(continueData.crossChunkImportRecords[i].Kind.StringForMetafile(), c.options.ASCIIOnly))) - } - if !isFirstMeta { - jMeta.AddString("\n ") - } + // Add the top-level directive if present + if repr.AST.Directive != "" { + quoted := string(js_printer.QuoteForJSON(repr.AST.Directive, c.options.ASCIIOnly)) + ";" + newline + prevOffset.AdvanceString(quoted) + j.AddString(quoted) + newlineBeforeComment = true + } + } - // Print exports - jMeta.AddString("],\n \"exports\": [") - var aliases []string - if c.options.OutputFormat.KeepES6ImportExportSyntax() { - if chunk.isEntryPoint { - if fileRepr := c.files[chunk.sourceIndex].repr.(*reprJS); fileRepr.meta.cjsWrap { - aliases = []string{"default"} - } else { - resolvedExports := fileRepr.meta.resolvedExports - aliases = make([]string, 0, len(resolvedExports)) - for alias := range resolvedExports { - if alias != "*" { - aliases = append(aliases, alias) - } - } - } + if len(c.options.JSBanner) > 0 { + prevOffset.AdvanceString(c.options.JSBanner) + prevOffset.AdvanceString("\n") + j.AddString(c.options.JSBanner) + j.AddString("\n") + } + + // Optionally wrap with an IIFE + if c.options.OutputFormat == config.FormatIIFE { + var text string + indent = " " + if len(c.options.GlobalName) > 0 { + text = c.generateGlobalNamePrefix() + } + if c.options.UnsupportedJSFeatures.Has(compat.Arrow) { + text += "(function()" + space + "{" + newline + } else { + text += "(()" + space + "=>" + space + "{" + newline + } + prevOffset.AdvanceString(text) + j.AddString(text) + newlineBeforeComment = false + } + + // Put the cross-chunk prefix inside the IIFE + if len(crossChunkPrefix) > 0 { + newlineBeforeComment = true + prevOffset.AdvanceBytes(crossChunkPrefix) + j.AddBytes(crossChunkPrefix) + } + + // Start the metadata + jMeta := helpers.Joiner{} + if c.options.NeedsMetafile { + // Print imports + isFirstMeta := true + jMeta.AddString("{\n \"imports\": [") + for _, chunkImport := range chunk.crossChunkImports { + if isFirstMeta { + isFirstMeta = false + } else { + jMeta.AddString(",") + } + jMeta.AddString(fmt.Sprintf("\n {\n \"path\": %s,\n \"kind\": %s\n }", + js_printer.QuoteForJSON(c.res.PrettyPath(logger.Path{Text: chunks[chunkImport.chunkIndex].uniqueKey, Namespace: "file"}), c.options.ASCIIOnly), + js_printer.QuoteForJSON(chunkImport.importKind.StringForMetafile(), c.options.ASCIIOnly))) + } + if !isFirstMeta { + jMeta.AddString("\n ") + } + + // Print exports + jMeta.AddString("],\n \"exports\": [") + var aliases []string + if c.options.OutputFormat.KeepES6ImportExportSyntax() { + if chunk.isEntryPoint { + if fileRepr := c.graph.Files[chunk.sourceIndex].InputFile.Repr.(*graph.JSRepr); fileRepr.Meta.Wrap == graph.WrapCJS { + aliases = []string{"default"} } else { - aliases = make([]string, 0, len(repr.exportsToOtherChunks)) - for _, alias := range repr.exportsToOtherChunks { + resolvedExports := fileRepr.Meta.ResolvedExports + aliases = make([]string, 0, len(resolvedExports)) + for alias := range resolvedExports { aliases = append(aliases, alias) } } - } - isFirstMeta = true - sort.Strings(aliases) // Sort for determinism - for _, alias := range aliases { - if isFirstMeta { - isFirstMeta = false - } else { - jMeta.AddString(",") + } else { + aliases = make([]string, 0, len(chunkRepr.exportsToOtherChunks)) + for _, alias := range chunkRepr.exportsToOtherChunks { + aliases = append(aliases, alias) } - jMeta.AddString(fmt.Sprintf("\n %s", - js_printer.QuoteForJSON(alias, c.options.ASCIIOnly))) } - if !isFirstMeta { - jMeta.AddString("\n ") + } + isFirstMeta = true + sort.Strings(aliases) // Sort for determinism + for _, alias := range aliases { + if isFirstMeta { + isFirstMeta = false + } else { + jMeta.AddString(",") } + jMeta.AddString(fmt.Sprintf("\n %s", + js_printer.QuoteForJSON(alias, c.options.ASCIIOnly))) + } + if !isFirstMeta { + jMeta.AddString("\n ") + } + if chunk.isEntryPoint { + entryPoint := c.graph.Files[chunk.sourceIndex].InputFile.Source.PrettyPath + jMeta.AddString(fmt.Sprintf("],\n \"entryPoint\": %s,\n \"inputs\": {", js_printer.QuoteForJSON(entryPoint, c.options.ASCIIOnly))) + } else { jMeta.AddString("],\n \"inputs\": {") } + } - // Concatenate the generated JavaScript chunks together - var compileResultsForSourceMap []compileResultJS - var entryPointTail *js_printer.PrintResult - var commentList []string - var metaOrder []string - var metaByteCount map[string]int - commentSet := make(map[string]bool) - prevComment := uint32(0) - if c.options.AbsMetadataFile != "" { - metaOrder = make([]string, 0, len(compileResults)) - metaByteCount = make(map[string]int, len(compileResults)) - } - for _, compileResult := range compileResults { - isRuntime := compileResult.sourceIndex == runtime.SourceIndex - for text := range compileResult.ExtractedComments { - if !commentSet[text] { - commentSet[text] = true - commentList = append(commentList, text) - } - } - - // If this is the entry point, it may have some extra code to stick at the - // end of the chunk after all modules have evaluated - if compileResult.entryPointTail != nil { - entryPointTail = compileResult.entryPointTail - } - - // Add a comment with the file path before the file contents - if c.options.Mode == config.ModeBundle && !c.options.RemoveWhitespace && prevComment != compileResult.sourceIndex && len(compileResult.JS) > 0 { - if newlineBeforeComment { - prevOffset.advanceString("\n") - j.AddString("\n") - } - - path := c.files[compileResult.sourceIndex].source.PrettyPath - - // Make sure newlines in the path can't cause a syntax error. This does - // not minimize allocations because it's expected that this case never - // comes up in practice. - path = strings.ReplaceAll(path, "\r", "\\r") - path = strings.ReplaceAll(path, "\n", "\\n") - path = strings.ReplaceAll(path, "\u2028", "\\u2028") - path = strings.ReplaceAll(path, "\u2029", "\\u2029") - - text := fmt.Sprintf("%s// %s\n", indent, path) - prevOffset.advanceString(text) - j.AddString(text) - prevComment = compileResult.sourceIndex - } - - // Don't include the runtime in source maps - if isRuntime { - prevOffset.advanceString(string(compileResult.JS)) - j.AddBytes(compileResult.JS) - } else { - // Save the offset to the start of the stored JavaScript - compileResult.generatedOffset = prevOffset - j.AddBytes(compileResult.JS) - - // Ignore empty source map chunks - if compileResult.SourceMapChunk.ShouldIgnore { - prevOffset.advanceBytes(compileResult.JS) - } else { - prevOffset = lineColumnOffset{} - - // Include this file in the source map - if c.options.SourceMap != config.SourceMapNone { - compileResultsForSourceMap = append(compileResultsForSourceMap, compileResult) - } - } - - // Include this file in the metadata - if c.options.AbsMetadataFile != "" { - // Accumulate file sizes since a given file may be split into multiple parts - path := c.files[compileResult.sourceIndex].source.PrettyPath - if count, ok := metaByteCount[path]; ok { - metaByteCount[path] = count + len(compileResult.JS) - } else { - metaOrder = append(metaOrder, path) - metaByteCount[path] = len(compileResult.JS) - } - } - } - - // Put a newline before the next file path comment - if len(compileResult.JS) > 0 { - newlineBeforeComment = true + // Concatenate the generated JavaScript chunks together + var compileResultsForSourceMap []compileResultJS + var commentList []string + var metaOrder []uint32 + var metaByteCount map[string]int + commentSet := make(map[string]bool) + prevComment := uint32(0) + if c.options.NeedsMetafile { + metaOrder = make([]uint32, 0, len(compileResults)) + metaByteCount = make(map[string]int, len(compileResults)) + } + for _, compileResult := range compileResults { + isRuntime := compileResult.sourceIndex == runtime.SourceIndex + for text := range compileResult.ExtractedComments { + if !commentSet[text] { + commentSet[text] = true + commentList = append(commentList, text) } } - // Stick the entry point tail at the end of the file. Deliberately don't - // include any source mapping information for this because it's automatically - // generated and doesn't correspond to a location in the input file. - if entryPointTail != nil { - j.AddBytes(entryPointTail.JS) - } - - // Put the cross-chunk suffix inside the IIFE - if len(crossChunkSuffix) > 0 { + // Add a comment with the file path before the file contents + if c.options.Mode == config.ModeBundle && !c.options.RemoveWhitespace && prevComment != compileResult.sourceIndex && len(compileResult.JS) > 0 { if newlineBeforeComment { - j.AddString(newline) - } - j.AddBytes(crossChunkSuffix) - } - - // Optionally wrap with an IIFE - if c.options.OutputFormat == config.FormatIIFE { - j.AddString("})();" + newline) - } - - // Make sure the file ends with a newline - if j.Length() > 0 && j.LastByte() != '\n' { - j.AddString("\n") - } - - // Add all unique license comments to the end of the file. These are - // deduplicated because some projects have thousands of files with the same - // comment. The comment must be preserved in the output for legal reasons but - // at the same time we want to generate a small bundle when minifying. - sort.Strings(commentList) - for _, text := range commentList { - j.AddString(text) - j.AddString("\n") - } - - if len(c.options.Footer) > 0 { - j.AddString(c.options.Footer) - j.AddString("\n") - } - - if c.options.SourceMap != config.SourceMapNone { - sourceMap := c.generateSourceMapForChunk(compileResultsForSourceMap, chunkAbsDir, dataForSourceMaps) - var writeDataURL bool - var writeFile bool - switch c.options.SourceMap { - case config.SourceMapInline: - writeDataURL = true - case config.SourceMapLinkedWithComment, config.SourceMapExternalWithoutComment: - writeFile = true - case config.SourceMapInlineAndExternal: - writeDataURL = true - writeFile = true - } - - // Write the generated source map as an inline comment - if writeDataURL { - j.AddString("//# sourceMappingURL=data:application/json;base64,") - j.AddString(base64.StdEncoding.EncodeToString(sourceMap)) + prevOffset.AdvanceString("\n") j.AddString("\n") } - // Write the generated source map as an external file - if writeFile { - // Optionally add metadata about the file - var jsonMetadataChunk []byte - if c.options.AbsMetadataFile != "" { - jsonMetadataChunk = []byte(fmt.Sprintf( - "{\n \"imports\": [],\n \"exports\": [],\n \"inputs\": {},\n \"bytes\": %d\n }", len(sourceMap))) - } + path := c.graph.Files[compileResult.sourceIndex].InputFile.Source.PrettyPath - // Figure out the base name for the source map which may include the content hash - var sourceMapBaseName string - if chunk.baseNameOrEmpty == "" { - hash := hashForFileName(sourceMap) - sourceMapBaseName = "chunk." + hash + c.options.OutputExtensionJS + ".map" + // Make sure newlines in the path can't cause a syntax error. This does + // not minimize allocations because it's expected that this case never + // comes up in practice. + path = strings.ReplaceAll(path, "\r", "\\r") + path = strings.ReplaceAll(path, "\n", "\\n") + path = strings.ReplaceAll(path, "\u2028", "\\u2028") + path = strings.ReplaceAll(path, "\u2029", "\\u2029") + + text := fmt.Sprintf("%s// %s\n", indent, path) + prevOffset.AdvanceString(text) + j.AddString(text) + prevComment = compileResult.sourceIndex + } + + // Don't include the runtime in source maps + if isRuntime { + prevOffset.AdvanceString(string(compileResult.JS)) + j.AddBytes(compileResult.JS) + } else { + // Save the offset to the start of the stored JavaScript + compileResult.generatedOffset = prevOffset + j.AddBytes(compileResult.JS) + + // Ignore empty source map chunks + if compileResult.SourceMapChunk.ShouldIgnore { + prevOffset.AdvanceBytes(compileResult.JS) + } else { + prevOffset = sourcemap.LineColumnOffset{} + + // Include this file in the source map + if c.options.SourceMap != config.SourceMapNone { + compileResultsForSourceMap = append(compileResultsForSourceMap, compileResult) + } + } + + // Include this file in the metadata + if c.options.NeedsMetafile { + // Accumulate file sizes since a given file may be split into multiple parts + path := c.graph.Files[compileResult.sourceIndex].InputFile.Source.PrettyPath + if count, ok := metaByteCount[path]; ok { + metaByteCount[path] = count + len(compileResult.JS) } else { - sourceMapBaseName = chunk.baseNameOrEmpty + ".map" + metaOrder = append(metaOrder, compileResult.sourceIndex) + metaByteCount[path] = len(compileResult.JS) } - - // Add a comment linking the source to its map - if c.options.SourceMap == config.SourceMapLinkedWithComment { - j.AddString("//# sourceMappingURL=") - j.AddString(sourceMapBaseName) - j.AddString("\n") - } - - results = append(results, OutputFile{ - AbsPath: c.fs.Join(c.options.AbsOutputDir, chunk.relDir, sourceMapBaseName), - Contents: sourceMap, - jsonMetadataChunk: jsonMetadataChunk, - }) } } - // The JavaScript contents are done now that the source map comment is in - jsContents := j.Done() - - // Figure out the base name for this chunk now that the content hash is known - if chunk.baseNameOrEmpty == "" { - hash := hashForFileName(jsContents) - chunk.baseNameOrEmpty = "chunk." + hash + c.options.OutputExtensionJS + // Put a newline before the next file path comment + if len(compileResult.JS) > 0 { + newlineBeforeComment = true } + } - // End the metadata - var jsonMetadataChunk []byte - if c.options.AbsMetadataFile != "" { + // Stick the entry point tail at the end of the file. Deliberately don't + // include any source mapping information for this because it's automatically + // generated and doesn't correspond to a location in the input file. + j.AddBytes(entryPointTail.JS) + + // Put the cross-chunk suffix inside the IIFE + if len(crossChunkSuffix) > 0 { + if newlineBeforeComment { + j.AddString(newline) + } + j.AddBytes(crossChunkSuffix) + } + + // Optionally wrap with an IIFE + if c.options.OutputFormat == config.FormatIIFE { + j.AddString("})();" + newline) + } + + // Make sure the file ends with a newline + j.EnsureNewlineAtEnd() + + // Add all unique license comments to the end of the file. These are + // deduplicated because some projects have thousands of files with the same + // comment. The comment must be preserved in the output for legal reasons but + // at the same time we want to generate a small bundle when minifying. + sort.Strings(commentList) + for _, text := range commentList { + j.AddString(text) + j.AddString("\n") + } + + if len(c.options.JSFooter) > 0 { + j.AddString(c.options.JSFooter) + j.AddString("\n") + } + + if c.options.SourceMap != config.SourceMapNone { + chunk.outputSourceMap = c.generateSourceMapForChunk(compileResultsForSourceMap, chunkAbsDir, dataForSourceMaps) + } + + // The JavaScript contents are done now that the source map comment is in + jsContents := j.Done() + + // End the metadata lazily. The final output size is not known until the + // final import paths are substituted into the output pieces generated below. + if c.options.NeedsMetafile { + chunk.jsonMetadataChunkCallback = func(finalOutputSize int) []byte { isFirstMeta := true - for _, path := range metaOrder { + for _, sourceIndex := range metaOrder { if isFirstMeta { isFirstMeta = false } else { jMeta.AddString(",") } - jMeta.AddString(fmt.Sprintf("\n %s: {\n \"bytesInOutput\": %d\n }", - js_printer.QuoteForJSON(path, c.options.ASCIIOnly), metaByteCount[path])) + path := c.graph.Files[sourceIndex].InputFile.Source.PrettyPath + extra := c.generateExtraDataForFileJS(sourceIndex) + jMeta.AddString(fmt.Sprintf("\n %s: {\n \"bytesInOutput\": %d\n %s}", + js_printer.QuoteForJSON(path, c.options.ASCIIOnly), metaByteCount[path], extra)) } if !isFirstMeta { jMeta.AddString("\n ") } - jMeta.AddString(fmt.Sprintf("},\n \"bytes\": %d\n }", len(jsContents))) - jsonMetadataChunk = jMeta.Done() + jMeta.AddString(fmt.Sprintf("},\n \"bytes\": %d\n }", finalOutputSize)) + return jMeta.Done() } - - results = append(results, OutputFile{ - AbsPath: c.fs.Join(c.options.AbsOutputDir, chunk.relPath()), - Contents: jsContents, - jsonMetadataChunk: jsonMetadataChunk, - IsExecutable: isExecutable, - }) - return results } + + chunk.outputPieces = c.breakOutputIntoPieces(jsContents, uint32(len(chunks))) + c.generateIsolatedHashInParallel(chunk) + chunk.isExecutable = isExecutable + chunkWaitGroup.Done() } func (c *linkerContext) generateGlobalNamePrefix() string { @@ -4045,30 +4351,31 @@ func (c *linkerContext) generateGlobalNamePrefix() string { } type compileResultCSS struct { - printedCSS string - sourceIndex uint32 - hasCharset bool - externalImportRecords []ast.ImportRecord + printedCSS string + sourceIndex uint32 + hasCharset bool + externalImports []externalImportCSS } -func (repr *chunkReprCSS) generate(c *linkerContext, chunk *chunkInfo) func(generateContinue) []OutputFile { - var results []OutputFile +type externalImportCSS struct { + record ast.ImportRecord + conditions []css_ast.Token +} + +func (c *linkerContext) generateChunkCSS(chunks []chunkInfo, chunkIndex int, chunkWaitGroup *sync.WaitGroup) { + chunk := &chunks[chunkIndex] compileResults := make([]compileResultCSS, 0, len(chunk.filesInChunkInOrder)) // Generate CSS for each file in parallel waitGroup := sync.WaitGroup{} for _, sourceIndex := range chunk.filesInChunkInOrder { - // Each file may optionally contain additional files to be copied to the - // output directory. This is used by the "file" loader. - results = append(results, c.files[sourceIndex].additionalFiles...) - // Create a goroutine for this file compileResults = append(compileResults, compileResultCSS{}) compileResult := &compileResults[len(compileResults)-1] waitGroup.Add(1) go func(sourceIndex uint32, compileResult *compileResultCSS) { - file := &c.files[sourceIndex] - ast := file.repr.(*reprCSS).ast + file := &c.graph.Files[sourceIndex] + ast := file.InputFile.Repr.(*graph.CSSRepr).AST // Filter out "@import" rules rules := make([]css_ast.R, 0, len(ast.Rules)) @@ -4078,8 +4385,11 @@ func (repr *chunkReprCSS) generate(c *linkerContext, chunk *chunkInfo) func(gene compileResult.hasCharset = true continue case *css_ast.RAtImport: - if record := ast.ImportRecords[r.ImportRecordIndex]; record.SourceIndex == nil { - compileResult.externalImportRecords = append(compileResult.externalImportRecords, record) + if record := ast.ImportRecords[r.ImportRecordIndex]; !record.SourceIndex.IsValid() { + compileResult.externalImports = append(compileResult.externalImports, externalImportCSS{ + record: record, + conditions: r.ImportConditions, + }) } continue } @@ -4096,145 +4406,308 @@ func (repr *chunkReprCSS) generate(c *linkerContext, chunk *chunkInfo) func(gene }(sourceIndex, compileResult) } - // Wait for cross-chunk import records before continuing - return func(continueData generateContinue) []OutputFile { - waitGroup.Wait() - j := js_printer.Joiner{} - newlineBeforeComment := false + waitGroup.Wait() + j := helpers.Joiner{} + newlineBeforeComment := false - // Generate any prefix rules now - { - ast := css_ast.AST{} + if len(c.options.CSSBanner) > 0 { + j.AddString(c.options.CSSBanner) + j.AddString("\n") + } - // "@charset" is the only thing that comes before "@import" - for _, compileResult := range compileResults { - if compileResult.hasCharset { - ast.Rules = append(ast.Rules, &css_ast.RAtCharset{Encoding: "UTF-8"}) - break - } - } + // Generate any prefix rules now + { + ast := css_ast.AST{} - // Insert all external "@import" rules at the front. In CSS, all "@import" - // rules must come first or the browser will just ignore them. - for _, compileResult := range compileResults { - for _, record := range compileResult.externalImportRecords { - ast.Rules = append(ast.Rules, &css_ast.RAtImport{ImportRecordIndex: uint32(len(ast.ImportRecords))}) - ast.ImportRecords = append(ast.ImportRecords, record) - } - } - - if len(ast.Rules) > 0 { - css := css_printer.Print(ast, css_printer.Options{ - RemoveWhitespace: c.options.RemoveWhitespace, - }) - if len(css) > 0 { - j.AddString(css) - newlineBeforeComment = true - } - } - } - - // Start the metadata - jMeta := js_printer.Joiner{} - if c.options.AbsMetadataFile != "" { - isFirstMeta := true - jMeta.AddString("{\n \"imports\": [") - for i, importAbsPath := range continueData.crossChunkAbsPaths { - if isFirstMeta { - isFirstMeta = false - } else { - jMeta.AddString(",") - } - jMeta.AddString(fmt.Sprintf("\n {\n \"path\": %s,\n \"kind\": %s\n }", - js_printer.QuoteForJSON(c.res.PrettyPath(logger.Path{Text: importAbsPath, Namespace: "file"}), c.options.ASCIIOnly), - js_printer.QuoteForJSON(continueData.crossChunkImportRecords[i].Kind.StringForMetafile(), c.options.ASCIIOnly))) - } - if !isFirstMeta { - jMeta.AddString("\n ") - } - jMeta.AddString("],\n \"inputs\": {") - } - isFirstMeta := true - - // Concatenate the generated CSS chunks together + // "@charset" is the only thing that comes before "@import" for _, compileResult := range compileResults { - if c.options.Mode == config.ModeBundle && !c.options.RemoveWhitespace { - if newlineBeforeComment { - j.AddString("\n") - } - j.AddString(fmt.Sprintf("/* %s */\n", c.files[compileResult.sourceIndex].source.PrettyPath)) + if compileResult.hasCharset { + ast.Rules = append(ast.Rules, &css_ast.RAtCharset{Encoding: "UTF-8"}) + break } - if len(compileResult.printedCSS) > 0 { + } + + // Insert all external "@import" rules at the front. In CSS, all "@import" + // rules must come first or the browser will just ignore them. + for _, compileResult := range compileResults { + for _, external := range compileResult.externalImports { + ast.Rules = append(ast.Rules, &css_ast.RAtImport{ + ImportRecordIndex: uint32(len(ast.ImportRecords)), + ImportConditions: external.conditions, + }) + ast.ImportRecords = append(ast.ImportRecords, external.record) + } + } + + if len(ast.Rules) > 0 { + css := css_printer.Print(ast, css_printer.Options{ + RemoveWhitespace: c.options.RemoveWhitespace, + }) + if len(css) > 0 { + j.AddString(css) newlineBeforeComment = true } - j.AddString(compileResult.printedCSS) + } + } - // Include this file in the metadata - if c.options.AbsMetadataFile != "" { - if isFirstMeta { - isFirstMeta = false - } else { - jMeta.AddString(",") - } - jMeta.AddString(fmt.Sprintf("\n %s: {\n \"bytesInOutput\": %d\n }", - js_printer.QuoteForJSON(c.files[compileResult.sourceIndex].source.PrettyPath, c.options.ASCIIOnly), - len(compileResult.printedCSS))) + // Start the metadata + jMeta := helpers.Joiner{} + if c.options.NeedsMetafile { + isFirstMeta := true + jMeta.AddString("{\n \"imports\": [") + for _, chunkImport := range chunk.crossChunkImports { + if isFirstMeta { + isFirstMeta = false + } else { + jMeta.AddString(",") } + jMeta.AddString(fmt.Sprintf("\n {\n \"path\": %s,\n \"kind\": %s\n }", + js_printer.QuoteForJSON(c.res.PrettyPath(logger.Path{Text: chunks[chunkImport.chunkIndex].uniqueKey, Namespace: "file"}), c.options.ASCIIOnly), + js_printer.QuoteForJSON(chunkImport.importKind.StringForMetafile(), c.options.ASCIIOnly))) } - - // Make sure the file ends with a newline - if j.Length() > 0 && j.LastByte() != '\n' { - j.AddString("\n") + if !isFirstMeta { + jMeta.AddString("\n ") } + if chunk.isEntryPoint { + file := &c.graph.Files[chunk.sourceIndex] - // The CSS contents are done now that the source map comment is in - cssContents := j.Done() - - // Figure out the base name for this chunk now that the content hash is known - if chunk.baseNameOrEmpty == "" { - hash := hashForFileName(cssContents) - chunk.baseNameOrEmpty = "chunk." + hash + c.options.OutputExtensionCSS + // Do not generate "entryPoint" for CSS files that are the result of + // importing CSS into JavaScript. We want this to be a 1:1 relationship + // and there is already an output file for the JavaScript entry point. + if _, ok := file.InputFile.Repr.(*graph.CSSRepr); ok { + jMeta.AddString(fmt.Sprintf("],\n \"entryPoint\": %s,\n \"inputs\": {", + js_printer.QuoteForJSON(file.InputFile.Source.PrettyPath, c.options.ASCIIOnly))) + } else { + jMeta.AddString("],\n \"inputs\": {") + } + } else { + jMeta.AddString("],\n \"inputs\": {") } + } + isFirstMeta := true - // End the metadata - var jsonMetadataChunk []byte - if c.options.AbsMetadataFile != "" { + // Concatenate the generated CSS chunks together + for _, compileResult := range compileResults { + if c.options.Mode == config.ModeBundle && !c.options.RemoveWhitespace { + if newlineBeforeComment { + j.AddString("\n") + } + j.AddString(fmt.Sprintf("/* %s */\n", c.graph.Files[compileResult.sourceIndex].InputFile.Source.PrettyPath)) + } + if len(compileResult.printedCSS) > 0 { + newlineBeforeComment = true + } + j.AddString(compileResult.printedCSS) + + // Include this file in the metadata + if c.options.NeedsMetafile { + if isFirstMeta { + isFirstMeta = false + } else { + jMeta.AddString(",") + } + jMeta.AddString(fmt.Sprintf("\n %s: {\n \"bytesInOutput\": %d\n }", + js_printer.QuoteForJSON(c.graph.Files[compileResult.sourceIndex].InputFile.Source.PrettyPath, c.options.ASCIIOnly), + len(compileResult.printedCSS))) + } + } + + // Make sure the file ends with a newline + j.EnsureNewlineAtEnd() + + if len(c.options.CSSFooter) > 0 { + j.AddString(c.options.CSSFooter) + j.AddString("\n") + } + + // The CSS contents are done now that the source map comment is in + cssContents := j.Done() + + // End the metadata lazily. The final output size is not known until the + // final import paths are substituted into the output pieces generated below. + if c.options.NeedsMetafile { + chunk.jsonMetadataChunkCallback = func(finalOutputSize int) []byte { if !isFirstMeta { jMeta.AddString("\n ") } - jMeta.AddString(fmt.Sprintf("},\n \"bytes\": %d\n }", len(cssContents))) - jsonMetadataChunk = jMeta.Done() + jMeta.AddString(fmt.Sprintf("},\n \"bytes\": %d\n }", finalOutputSize)) + return jMeta.Done() + } + } + + chunk.outputPieces = c.breakOutputIntoPieces(cssContents, uint32(len(chunks))) + c.generateIsolatedHashInParallel(chunk) + chunkWaitGroup.Done() +} + +func appendIsolatedHashesForImportedChunks( + hash hash.Hash, + chunks []chunkInfo, + chunkIndex uint32, + visited []uint32, + visitedKey uint32, +) { + // Only visit each chunk at most once. This is important because there may be + // cycles in the chunk import graph. If there's a cycle, we want to include + // the hash of every chunk involved in the cycle (along with all of their + // dependencies). This depth-first traversal will naturally do that. + if visited[chunkIndex] == visitedKey { + return + } + visited[chunkIndex] = visitedKey + chunk := &chunks[chunkIndex] + + // Visit the other chunks that this chunk imports before visiting this chunk + for _, chunkImport := range chunk.crossChunkImports { + appendIsolatedHashesForImportedChunks(hash, chunks, chunkImport.chunkIndex, visited, visitedKey) + } + + // Mix in the hash for this chunk + hash.Write(chunk.waitForIsolatedHash()) +} + +func (c *linkerContext) breakOutputIntoPieces(output []byte, chunkCount uint32) []outputPiece { + var pieces []outputPiece + prefix := c.uniqueKeyPrefixBytes + for { + // Scan for the next chunk path + boundary := bytes.Index(output, prefix) + + // Try to parse the chunk index + var chunkIndex uint32 + if boundary != -1 { + if start := boundary + len(prefix); start+8 > len(output) { + boundary = -1 + } else { + for j := 0; j < 8; j++ { + c := output[start+j] + if c < '0' || c > '9' { + boundary = -1 + break + } + chunkIndex = chunkIndex*10 + uint32(c) - '0' + } + } + if chunkIndex >= chunkCount { + boundary = -1 + } } - results = append(results, OutputFile{ - AbsPath: c.fs.Join(c.options.AbsOutputDir, chunk.relPath()), - Contents: cssContents, - jsonMetadataChunk: jsonMetadataChunk, + // If we're at the end, generate one final piece + if boundary == -1 { + pieces = append(pieces, outputPiece{ + data: output, + }) + break + } + + // Otherwise, generate an interior piece and continue + pieces = append(pieces, outputPiece{ + data: output[:boundary], + chunkIndex: ast.MakeIndex32(chunkIndex), }) - return results + output = output[boundary+len(prefix)+8:] } + return pieces } -func (offset *lineColumnOffset) advanceBytes(bytes []byte) { - for i, n := 0, len(bytes); i < n; i++ { - if bytes[i] == '\n' { - offset.lines++ - offset.columns = 0 - } else { - offset.columns++ - } +func (c *linkerContext) generateIsolatedHashInParallel(chunk *chunkInfo) { + // Compute the hash in parallel. This is a speedup when it turns out the hash + // isn't needed (well, as long as there are threads to spare). + channel := make(chan []byte, 1) + chunk.waitForIsolatedHash = func() []byte { + data := <-channel + channel <- data + return data } + go c.generateIsolatedHash(chunk, channel) } -func (offset *lineColumnOffset) advanceString(text string) { - for i, n := 0, len(text); i < n; i++ { - if text[i] == '\n' { - offset.lines++ - offset.columns = 0 +func (c *linkerContext) generateIsolatedHash(chunk *chunkInfo, channel chan []byte) { + hash := xxhash.New() + + // Mix the file names and part ranges of all of the files in this chunk into + // the hash. Objects that appear identical but that live in separate files or + // that live in separate parts in the same file must not be merged. This only + // needs to be done for JavaScript files, not CSS files. + for _, partRange := range chunk.partsInChunkInOrder { + var filePath string + file := &c.graph.Files[partRange.sourceIndex] + + if file.InputFile.Source.KeyPath.Namespace == "file" { + // Use the pretty path as the file name since it should be platform- + // independent (relative paths and the "/" path separator) + filePath = file.InputFile.Source.PrettyPath } else { - offset.columns++ + // If this isn't in the "file" namespace, just use the full path text + // verbatim. This could be a source of cross-platform differences if + // plugins are storing platform-specific information in here, but then + // that problem isn't caused by esbuild itself. + filePath = file.InputFile.Source.KeyPath.Text } + + // Include the path namespace in the hash + hashWriteLengthPrefixed(hash, []byte(file.InputFile.Source.KeyPath.Namespace)) + + // Then include the file path + hashWriteLengthPrefixed(hash, []byte(filePath)) + + // Also write the part range. These numbers are deterministic and allocated + // per-file so this should be a well-behaved base for a hash. + hashWriteUint32(hash, partRange.partIndexBegin) + hashWriteUint32(hash, partRange.partIndexEnd) } + + // Hash the output path template as part of the content hash because we want + // any import to be considered different if the import's output path has changed. + for _, part := range chunk.finalTemplate { + hashWriteLengthPrefixed(hash, []byte(part.Data)) + } + + // Include the generated output content in the hash. This excludes the + // randomly-generated import paths (the unique keys) and only includes the + // data in the spans between them. + for _, piece := range chunk.outputPieces { + hashWriteLengthPrefixed(hash, piece.data) + } + + // Also include the source map data in the hash. The source map is named the + // same name as the chunk name for ease of discovery. So we want the hash to + // change if the source map data changes even if the chunk data doesn't change. + // Otherwise the output path for the source map wouldn't change and the source + // map wouldn't end up being updated. + // + // Note that this means the contents of all input files are included in the + // hash because of "sourcesContent", so changing a comment in an input file + // can now change the hash of the output file. This only happens when you + // have source maps enabled (and "sourcesContent", which is on by default). + // + // The generated positions in the mappings here are in the output content + // *before* the final paths have been substituted. This may seem weird. + // However, I think this shouldn't cause issues because a) the unique key + // values are all always the same length so the offsets are deterministic + // and b) the final paths will be folded into the final hash later. + hashWriteLengthPrefixed(hash, chunk.outputSourceMap.Prefix) + hashWriteLengthPrefixed(hash, chunk.outputSourceMap.Mappings) + hashWriteLengthPrefixed(hash, chunk.outputSourceMap.Suffix) + + // Store the hash so far. All other chunks that import this chunk will mix + // this hash into their final hash to ensure that the import path changes + // if this chunk (or any dependencies of this chunk) is changed. + channel <- hash.Sum(nil) +} + +func hashWriteUint32(hash hash.Hash, value uint32) { + var lengthBytes [4]byte + binary.LittleEndian.PutUint32(lengthBytes[:], value) + hash.Write(lengthBytes[:]) +} + +// Hash the data in length-prefixed form because boundary locations are +// important. We don't want "a" + "bc" to hash the same as "ab" + "c". +func hashWriteLengthPrefixed(hash hash.Hash, bytes []byte) { + hashWriteUint32(hash, uint32(len(bytes))) + hash.Write(bytes) } func preventBindingsFromBeingRenamed(binding js_ast.Binding, symbols js_ast.SymbolMap) { @@ -4263,13 +4736,13 @@ func preventBindingsFromBeingRenamed(binding js_ast.Binding, symbols js_ast.Symb // This is only used when a module is compiled independently. We use a very // different way of handling exports and renaming/minifying when bundling. func (c *linkerContext) preventExportsFromBeingRenamed(sourceIndex uint32) { - repr, ok := c.files[sourceIndex].repr.(*reprJS) + repr, ok := c.graph.Files[sourceIndex].InputFile.Repr.(*graph.JSRepr) if !ok { return } hasImportOrExport := false - for _, part := range repr.ast.Parts { + for _, part := range repr.AST.Parts { for _, stmt := range part.Stmts { switch s := stmt.Data.(type) { case *js_ast.SImport: @@ -4277,8 +4750,8 @@ func (c *linkerContext) preventExportsFromBeingRenamed(sourceIndex uint32) { // automatically and aren't part of the original source code. We // shouldn't consider the file a module if the only ES6 import or // export is the automatically generated one. - record := &repr.ast.ImportRecords[s.ImportRecordIndex] - if record.SourceIndex != nil && *record.SourceIndex == runtime.SourceIndex { + record := &repr.AST.ImportRecords[s.ImportRecordIndex] + if record.SourceIndex.IsValid() && record.SourceIndex.GetIndex() == runtime.SourceIndex { continue } @@ -4287,20 +4760,20 @@ func (c *linkerContext) preventExportsFromBeingRenamed(sourceIndex uint32) { case *js_ast.SLocal: if s.IsExport { for _, decl := range s.Decls { - preventBindingsFromBeingRenamed(decl.Binding, c.symbols) + preventBindingsFromBeingRenamed(decl.Binding, c.graph.Symbols) } hasImportOrExport = true } case *js_ast.SFunction: if s.IsExport { - c.symbols.Get(s.Fn.Name.Ref).Kind = js_ast.SymbolUnbound + c.graph.Symbols.Get(s.Fn.Name.Ref).Kind = js_ast.SymbolUnbound hasImportOrExport = true } case *js_ast.SClass: if s.IsExport { - c.symbols.Get(s.Class.Name.Ref).Kind = js_ast.SymbolUnbound + c.graph.Symbols.Get(s.Class.Name.Ref).Kind = js_ast.SymbolUnbound hasImportOrExport = true } @@ -4322,8 +4795,8 @@ func (c *linkerContext) preventExportsFromBeingRenamed(sourceIndex uint32) { // since they are all potentially exported (e.g. if this is used in a // + +{{ else if hasPrefix . "UA-" }} -{{ end }} {{- end -}} +{{- end }}{{ end -}} + {{- define "__ga_js_set_doNotTrack" -}}{{/* This is also used in the async version. */}} {{- $pc := .Site.Config.Privacy.GoogleAnalytics -}} {{- if not $pc.RespectDoNotTrack -}} diff --git a/vendor/github.com/gohugoio/hugo/tpl/tplimpl/shortcodes.go b/vendor/github.com/gohugoio/hugo/tpl/tplimpl/shortcodes.go index 789bc07..938fc74 100644 --- a/vendor/github.com/gohugoio/hugo/tpl/tplimpl/shortcodes.go +++ b/vendor/github.com/gohugoio/hugo/tpl/tplimpl/shortcodes.go @@ -56,7 +56,7 @@ func (s *shortcodeTemplates) fromVariants(variants tpl.TemplateVariants) (shortc return s.fromVariantsSlice([]string{ variants.Language, strings.ToLower(variants.OutputFormat.Name), - variants.OutputFormat.MediaType.Suffix(), + variants.OutputFormat.MediaType.FirstSuffix.Suffix, }) } diff --git a/vendor/github.com/golang/protobuf/proto/registry.go b/vendor/github.com/golang/protobuf/proto/registry.go index 1e7ff64..066b432 100644 --- a/vendor/github.com/golang/protobuf/proto/registry.go +++ b/vendor/github.com/golang/protobuf/proto/registry.go @@ -13,6 +13,7 @@ import ( "strings" "sync" + "google.golang.org/protobuf/reflect/protodesc" "google.golang.org/protobuf/reflect/protoreflect" "google.golang.org/protobuf/reflect/protoregistry" "google.golang.org/protobuf/runtime/protoimpl" @@ -62,14 +63,7 @@ func FileDescriptor(s filePath) fileDescGZIP { // Find the descriptor in the v2 registry. var b []byte if fd, _ := protoregistry.GlobalFiles.FindFileByPath(s); fd != nil { - if fd, ok := fd.(interface{ ProtoLegacyRawDesc() []byte }); ok { - b = fd.ProtoLegacyRawDesc() - } else { - // TODO: Use protodesc.ToFileDescriptorProto to construct - // a descriptorpb.FileDescriptorProto and marshal it. - // However, doing so causes the proto package to have a dependency - // on descriptorpb, leading to cyclic dependency issues. - } + b, _ = Marshal(protodesc.ToFileDescriptorProto(fd)) } // Locally cache the raw descriptor form for the file. diff --git a/vendor/github.com/golang/protobuf/ptypes/any.go b/vendor/github.com/golang/protobuf/ptypes/any.go index e729dcf..85f9f57 100644 --- a/vendor/github.com/golang/protobuf/ptypes/any.go +++ b/vendor/github.com/golang/protobuf/ptypes/any.go @@ -19,6 +19,8 @@ const urlPrefix = "type.googleapis.com/" // AnyMessageName returns the message name contained in an anypb.Any message. // Most type assertions should use the Is function instead. +// +// Deprecated: Call the any.MessageName method instead. func AnyMessageName(any *anypb.Any) (string, error) { name, err := anyMessageName(any) return string(name), err @@ -38,6 +40,8 @@ func anyMessageName(any *anypb.Any) (protoreflect.FullName, error) { } // MarshalAny marshals the given message m into an anypb.Any message. +// +// Deprecated: Call the anypb.New function instead. func MarshalAny(m proto.Message) (*anypb.Any, error) { switch dm := m.(type) { case DynamicAny: @@ -58,6 +62,9 @@ func MarshalAny(m proto.Message) (*anypb.Any, error) { // Empty returns a new message of the type specified in an anypb.Any message. // It returns protoregistry.NotFound if the corresponding message type could not // be resolved in the global registry. +// +// Deprecated: Use protoregistry.GlobalTypes.FindMessageByName instead +// to resolve the message name and create a new instance of it. func Empty(any *anypb.Any) (proto.Message, error) { name, err := anyMessageName(any) if err != nil { @@ -76,6 +83,8 @@ func Empty(any *anypb.Any) (proto.Message, error) { // // The target message m may be a *DynamicAny message. If the underlying message // type could not be resolved, then this returns protoregistry.NotFound. +// +// Deprecated: Call the any.UnmarshalTo method instead. func UnmarshalAny(any *anypb.Any, m proto.Message) error { if dm, ok := m.(*DynamicAny); ok { if dm.Message == nil { @@ -100,6 +109,8 @@ func UnmarshalAny(any *anypb.Any, m proto.Message) error { } // Is reports whether the Any message contains a message of the specified type. +// +// Deprecated: Call the any.MessageIs method instead. func Is(any *anypb.Any, m proto.Message) bool { if any == nil || m == nil { return false @@ -119,6 +130,9 @@ func Is(any *anypb.Any, m proto.Message) bool { // var x ptypes.DynamicAny // if err := ptypes.UnmarshalAny(a, &x); err != nil { ... } // fmt.Printf("unmarshaled message: %v", x.Message) +// +// Deprecated: Use the any.UnmarshalNew method instead to unmarshal +// the any message contents into a new instance of the underlying message. type DynamicAny struct{ proto.Message } func (m DynamicAny) String() string { diff --git a/vendor/github.com/golang/protobuf/ptypes/doc.go b/vendor/github.com/golang/protobuf/ptypes/doc.go index fb9edd5..d3c3325 100644 --- a/vendor/github.com/golang/protobuf/ptypes/doc.go +++ b/vendor/github.com/golang/protobuf/ptypes/doc.go @@ -3,4 +3,8 @@ // license that can be found in the LICENSE file. // Package ptypes provides functionality for interacting with well-known types. +// +// Deprecated: Well-known types have specialized functionality directly +// injected into the generated packages for each message type. +// See the deprecation notice for each function for the suggested alternative. package ptypes diff --git a/vendor/github.com/golang/protobuf/ptypes/duration.go b/vendor/github.com/golang/protobuf/ptypes/duration.go index 6110ae8..b2b55dd 100644 --- a/vendor/github.com/golang/protobuf/ptypes/duration.go +++ b/vendor/github.com/golang/protobuf/ptypes/duration.go @@ -21,6 +21,8 @@ const ( // Duration converts a durationpb.Duration to a time.Duration. // Duration returns an error if dur is invalid or overflows a time.Duration. +// +// Deprecated: Call the dur.AsDuration and dur.CheckValid methods instead. func Duration(dur *durationpb.Duration) (time.Duration, error) { if err := validateDuration(dur); err != nil { return 0, err @@ -39,6 +41,8 @@ func Duration(dur *durationpb.Duration) (time.Duration, error) { } // DurationProto converts a time.Duration to a durationpb.Duration. +// +// Deprecated: Call the durationpb.New function instead. func DurationProto(d time.Duration) *durationpb.Duration { nanos := d.Nanoseconds() secs := nanos / 1e9 diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp.go b/vendor/github.com/golang/protobuf/ptypes/timestamp.go index 026d0d4..8368a3f 100644 --- a/vendor/github.com/golang/protobuf/ptypes/timestamp.go +++ b/vendor/github.com/golang/protobuf/ptypes/timestamp.go @@ -33,6 +33,8 @@ const ( // // A nil Timestamp returns an error. The first return value in that case is // undefined. +// +// Deprecated: Call the ts.AsTime and ts.CheckValid methods instead. func Timestamp(ts *timestamppb.Timestamp) (time.Time, error) { // Don't return the zero value on error, because corresponds to a valid // timestamp. Instead return whatever time.Unix gives us. @@ -46,6 +48,8 @@ func Timestamp(ts *timestamppb.Timestamp) (time.Time, error) { } // TimestampNow returns a google.protobuf.Timestamp for the current time. +// +// Deprecated: Call the timestamppb.Now function instead. func TimestampNow() *timestamppb.Timestamp { ts, err := TimestampProto(time.Now()) if err != nil { @@ -56,6 +60,8 @@ func TimestampNow() *timestamppb.Timestamp { // TimestampProto converts the time.Time to a google.protobuf.Timestamp proto. // It returns an error if the resulting Timestamp is invalid. +// +// Deprecated: Call the timestamppb.New function instead. func TimestampProto(t time.Time) (*timestamppb.Timestamp, error) { ts := ×tamppb.Timestamp{ Seconds: t.Unix(), @@ -69,6 +75,9 @@ func TimestampProto(t time.Time) (*timestamppb.Timestamp, error) { // TimestampString returns the RFC 3339 string for valid Timestamps. // For invalid Timestamps, it returns an error message in parentheses. +// +// Deprecated: Call the ts.AsTime method instead, +// followed by a call to the Format method on the time.Time value. func TimestampString(ts *timestamppb.Timestamp) string { t, err := Timestamp(ts) if err != nil { diff --git a/vendor/github.com/google/go-cmp/cmp/report_compare.go b/vendor/github.com/google/go-cmp/cmp/report_compare.go index a6c070c..104bb30 100644 --- a/vendor/github.com/google/go-cmp/cmp/report_compare.go +++ b/vendor/github.com/google/go-cmp/cmp/report_compare.go @@ -79,7 +79,7 @@ func (opts formatOptions) verbosity() uint { } } -const maxVerbosityPreset = 3 +const maxVerbosityPreset = 6 // verbosityPreset modifies the verbosity settings given an index // between 0 and maxVerbosityPreset, inclusive. @@ -100,7 +100,7 @@ func verbosityPreset(opts formatOptions, i int) formatOptions { func (opts formatOptions) FormatDiff(v *valueNode, ptrs *pointerReferences) (out textNode) { if opts.DiffMode == diffIdentical { opts = opts.WithVerbosity(1) - } else { + } else if opts.verbosity() < 3 { opts = opts.WithVerbosity(3) } diff --git a/vendor/github.com/google/go-cmp/cmp/report_slices.go b/vendor/github.com/google/go-cmp/cmp/report_slices.go index da04caf..168f92f 100644 --- a/vendor/github.com/google/go-cmp/cmp/report_slices.go +++ b/vendor/github.com/google/go-cmp/cmp/report_slices.go @@ -26,8 +26,6 @@ func (opts formatOptions) CanFormatDiffSlice(v *valueNode) bool { return false // No differences detected case !v.ValueX.IsValid() || !v.ValueY.IsValid(): return false // Both values must be valid - case v.Type.Kind() == reflect.Slice && (v.ValueX.Len() == 0 || v.ValueY.Len() == 0): - return false // Both slice values have to be non-empty case v.NumIgnored > 0: return false // Some ignore option was used case v.NumTransformed > 0: @@ -45,7 +43,16 @@ func (opts formatOptions) CanFormatDiffSlice(v *valueNode) bool { return false } - switch t := v.Type; t.Kind() { + // Check whether this is an interface with the same concrete types. + t := v.Type + vx, vy := v.ValueX, v.ValueY + if t.Kind() == reflect.Interface && !vx.IsNil() && !vy.IsNil() && vx.Elem().Type() == vy.Elem().Type() { + vx, vy = vx.Elem(), vy.Elem() + t = vx.Type() + } + + // Check whether we provide specialized diffing for this type. + switch t.Kind() { case reflect.String: case reflect.Array, reflect.Slice: // Only slices of primitive types have specialized handling. @@ -57,6 +64,11 @@ func (opts formatOptions) CanFormatDiffSlice(v *valueNode) bool { return false } + // Both slice values have to be non-empty. + if t.Kind() == reflect.Slice && (vx.Len() == 0 || vy.Len() == 0) { + return false + } + // If a sufficient number of elements already differ, // use specialized formatting even if length requirement is not met. if v.NumDiff > v.NumSame { @@ -68,7 +80,7 @@ func (opts formatOptions) CanFormatDiffSlice(v *valueNode) bool { // Use specialized string diffing for longer slices or strings. const minLength = 64 - return v.ValueX.Len() >= minLength && v.ValueY.Len() >= minLength + return vx.Len() >= minLength && vy.Len() >= minLength } // FormatDiffSlice prints a diff for the slices (or strings) represented by v. @@ -77,6 +89,11 @@ func (opts formatOptions) CanFormatDiffSlice(v *valueNode) bool { func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { assert(opts.DiffMode == diffUnknown) t, vx, vy := v.Type, v.ValueX, v.ValueY + if t.Kind() == reflect.Interface { + vx, vy = vx.Elem(), vy.Elem() + t = vx.Type() + opts = opts.WithTypeMode(emitType) + } // Auto-detect the type of the data. var isLinedText, isText, isBinary bool diff --git a/vendor/github.com/google/uuid/hash.go b/vendor/github.com/google/uuid/hash.go index b174616..b404f4b 100644 --- a/vendor/github.com/google/uuid/hash.go +++ b/vendor/github.com/google/uuid/hash.go @@ -26,8 +26,8 @@ var ( // NewMD5 and NewSHA1. func NewHash(h hash.Hash, space UUID, data []byte, version int) UUID { h.Reset() - h.Write(space[:]) - h.Write(data) + h.Write(space[:]) //nolint:errcheck + h.Write(data) //nolint:errcheck s := h.Sum(nil) var uuid UUID copy(uuid[:], s) diff --git a/vendor/github.com/google/uuid/sql.go b/vendor/github.com/google/uuid/sql.go index f326b54..2e02ec0 100644 --- a/vendor/github.com/google/uuid/sql.go +++ b/vendor/github.com/google/uuid/sql.go @@ -9,7 +9,7 @@ import ( "fmt" ) -// Scan implements sql.Scanner so UUIDs can be read from databases transparently +// Scan implements sql.Scanner so UUIDs can be read from databases transparently. // Currently, database types that map to string and []byte are supported. Please // consult database-specific driver documentation for matching types. func (uuid *UUID) Scan(src interface{}) error { diff --git a/vendor/github.com/google/uuid/uuid.go b/vendor/github.com/google/uuid/uuid.go index 524404c..60d26bb 100644 --- a/vendor/github.com/google/uuid/uuid.go +++ b/vendor/github.com/google/uuid/uuid.go @@ -35,6 +35,12 @@ const ( var rander = rand.Reader // random function +type invalidLengthError struct{ len int } + +func (err invalidLengthError) Error() string { + return fmt.Sprintf("invalid UUID length: %d", err.len) +} + // Parse decodes s into a UUID or returns an error. Both the standard UUID // forms of xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx are decoded as well as the @@ -68,7 +74,7 @@ func Parse(s string) (UUID, error) { } return uuid, nil default: - return uuid, fmt.Errorf("invalid UUID length: %d", len(s)) + return uuid, invalidLengthError{len(s)} } // s is now at least 36 bytes long // it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx @@ -112,7 +118,7 @@ func ParseBytes(b []byte) (UUID, error) { } return uuid, nil default: - return uuid, fmt.Errorf("invalid UUID length: %d", len(b)) + return uuid, invalidLengthError{len(b)} } // s is now at least 36 bytes long // it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx diff --git a/vendor/github.com/google/uuid/version4.go b/vendor/github.com/google/uuid/version4.go index c110465..86160fb 100644 --- a/vendor/github.com/google/uuid/version4.go +++ b/vendor/github.com/google/uuid/version4.go @@ -14,6 +14,14 @@ func New() UUID { return Must(NewRandom()) } +// NewString creates a new random UUID and returns it as a string or panics. +// NewString is equivalent to the expression +// +// uuid.New().String() +func NewString() string { + return Must(NewRandom()).String() +} + // NewRandom returns a Random (Version 4) UUID. // // The strength of the UUIDs is based on the strength of the crypto/rand diff --git a/vendor/github.com/google/wire/README.md b/vendor/github.com/google/wire/README.md index d432b63..10983be 100644 --- a/vendor/github.com/google/wire/README.md +++ b/vendor/github.com/google/wire/README.md @@ -52,7 +52,7 @@ reports and fixes. ## Community -You can contact us on the [go-cloud mailing list][]. +For questions, please use [GitHub Discussions](https://github.com/google/wire/discussions). This project is covered by the Go [Code of Conduct][]. diff --git a/vendor/github.com/google/wire/go.sum b/vendor/github.com/google/wire/go.sum index 88ea58c..132636d 100644 --- a/vendor/github.com/google/wire/go.sum +++ b/vendor/github.com/google/wire/go.sum @@ -7,6 +7,9 @@ github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZN golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223 h1:DH4skfRX4EBpamg7iV4ZlCpblAHI6s6TDM39bFZumv8= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/tools v0.0.0-20190422233926-fe54fb35175b h1:NVD8gBK33xpdqCaZVVtd6OFJp+3dxkXuz7+U7KaVN6s= golang.org/x/tools v0.0.0-20190422233926-fe54fb35175b/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= diff --git a/vendor/github.com/googleapis/gax-go/.gitignore b/vendor/github.com/googleapis/gax-go/.gitignore deleted file mode 100644 index 289bf1e..0000000 --- a/vendor/github.com/googleapis/gax-go/.gitignore +++ /dev/null @@ -1 +0,0 @@ -*.cover diff --git a/vendor/github.com/googleapis/gax-go/.travis.yml b/vendor/github.com/googleapis/gax-go/.travis.yml deleted file mode 100644 index cc0a91e..0000000 --- a/vendor/github.com/googleapis/gax-go/.travis.yml +++ /dev/null @@ -1,12 +0,0 @@ -sudo: false -language: go -go: - - 1.9.x - - 1.10.x - - 1.11.x -script: - - gofmt -l . - - go tool vet . - - go test -coverprofile=coverage.txt -covermode=atomic -after_success: - - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/github.com/googleapis/gax-go/CODE_OF_CONDUCT.md b/vendor/github.com/googleapis/gax-go/CODE_OF_CONDUCT.md deleted file mode 100644 index 46b2a08..0000000 --- a/vendor/github.com/googleapis/gax-go/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,43 +0,0 @@ -# Contributor Code of Conduct - -As contributors and maintainers of this project, -and in the interest of fostering an open and welcoming community, -we pledge to respect all people who contribute through reporting issues, -posting feature requests, updating documentation, -submitting pull requests or patches, and other activities. - -We are committed to making participation in this project -a harassment-free experience for everyone, -regardless of level of experience, gender, gender identity and expression, -sexual orientation, disability, personal appearance, -body size, race, ethnicity, age, religion, or nationality. - -Examples of unacceptable behavior by participants include: - -* The use of sexualized language or imagery -* Personal attacks -* Trolling or insulting/derogatory comments -* Public or private harassment -* Publishing other's private information, -such as physical or electronic -addresses, without explicit permission -* Other unethical or unprofessional conduct. - -Project maintainers have the right and responsibility to remove, edit, or reject -comments, commits, code, wiki edits, issues, and other contributions -that are not aligned to this Code of Conduct. -By adopting this Code of Conduct, -project maintainers commit themselves to fairly and consistently -applying these principles to every aspect of managing this project. -Project maintainers who do not follow or enforce the Code of Conduct -may be permanently removed from the project team. - -This code of conduct applies both within project spaces and in public spaces -when an individual is representing the project or its community. - -Instances of abusive, harassing, or otherwise unacceptable behavior -may be reported by opening an issue -or contacting one or more of the project maintainers. - -This Code of Conduct is adapted from the [Contributor Covenant](http://contributor-covenant.org), version 1.2.0, -available at [http://contributor-covenant.org/version/1/2/0/](http://contributor-covenant.org/version/1/2/0/) diff --git a/vendor/github.com/googleapis/gax-go/CONTRIBUTING.md b/vendor/github.com/googleapis/gax-go/CONTRIBUTING.md deleted file mode 100644 index 2827b7d..0000000 --- a/vendor/github.com/googleapis/gax-go/CONTRIBUTING.md +++ /dev/null @@ -1,27 +0,0 @@ -Want to contribute? Great! First, read this page (including the small print at the end). - -### Before you contribute -Before we can use your code, you must sign the -[Google Individual Contributor License Agreement] -(https://cla.developers.google.com/about/google-individual) -(CLA), which you can do online. The CLA is necessary mainly because you own the -copyright to your changes, even after your contribution becomes part of our -codebase, so we need your permission to use and distribute your code. We also -need to be sure of various other things—for instance that you'll tell us if you -know that your code infringes on other people's patents. You don't have to sign -the CLA until after you've submitted your code for review and a member has -approved it, but you must do it before we can put your code into our codebase. -Before you start working on a larger contribution, you should get in touch with -us first through the issue tracker with your idea so that we can help out and -possibly guide you. Coordinating up front makes it much easier to avoid -frustration later on. - -### Code reviews -All submissions, including submissions by project members, require review. We -use Github pull requests for this purpose. - -### The small print -Contributions made by corporations are covered by a different agreement than -the one above, the -[Software Grant and Corporate Contributor License Agreement] -(https://cla.developers.google.com/about/google-corporate). diff --git a/vendor/github.com/googleapis/gax-go/LICENSE b/vendor/github.com/googleapis/gax-go/LICENSE deleted file mode 100644 index 6d16b65..0000000 --- a/vendor/github.com/googleapis/gax-go/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright 2016, Google Inc. -All rights reserved. -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/googleapis/gax-go/README.md b/vendor/github.com/googleapis/gax-go/README.md deleted file mode 100644 index d6e214e..0000000 --- a/vendor/github.com/googleapis/gax-go/README.md +++ /dev/null @@ -1,29 +0,0 @@ -Google API Extensions for Go -============================ - -[![Build Status](https://travis-ci.org/googleapis/gax-go.svg?branch=master)](https://travis-ci.org/googleapis/gax-go) -[![Code Coverage](https://img.shields.io/codecov/c/github/googleapis/gax-go.svg)](https://codecov.io/github/googleapis/gax-go) -[![GoDoc](https://godoc.org/github.com/googleapis/gax-go?status.svg)](https://godoc.org/github.com/googleapis/gax-go) - -Google API Extensions for Go (gax-go) is a set of modules which aids the -development of APIs for clients and servers based on `gRPC` and Google API -conventions. - -To install the API extensions, use: - -``` -go get -u github.com/googleapis/gax-go -``` - -**Note:** Application code will rarely need to use this library directly, -but the code generated automatically from API definition files can use it -to simplify code generation and to provide more convenient and idiomatic API surface. - -Go Versions -=========== -This library requires Go 1.6 or above. - -License -======= -BSD - please see [LICENSE](https://github.com/googleapis/gax-go/blob/master/LICENSE) -for more information. diff --git a/vendor/github.com/googleapis/gax-go/call_option.go b/vendor/github.com/googleapis/gax-go/call_option.go deleted file mode 100644 index 7b62164..0000000 --- a/vendor/github.com/googleapis/gax-go/call_option.go +++ /dev/null @@ -1,157 +0,0 @@ -// Copyright 2016, Google Inc. -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package gax - -import ( - "math/rand" - "time" - - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -// CallOption is an option used by Invoke to control behaviors of RPC calls. -// CallOption works by modifying relevant fields of CallSettings. -type CallOption interface { - // Resolve applies the option by modifying cs. - Resolve(cs *CallSettings) -} - -// Retryer is used by Invoke to determine retry behavior. -type Retryer interface { - // Retry reports whether a request should be retriedand how long to pause before retrying - // if the previous attempt returned with err. Invoke never calls Retry with nil error. - Retry(err error) (pause time.Duration, shouldRetry bool) -} - -type retryerOption func() Retryer - -func (o retryerOption) Resolve(s *CallSettings) { - s.Retry = o -} - -// WithRetry sets CallSettings.Retry to fn. -func WithRetry(fn func() Retryer) CallOption { - return retryerOption(fn) -} - -// OnCodes returns a Retryer that retries if and only if -// the previous attempt returns a GRPC error whose error code is stored in cc. -// Pause times between retries are specified by bo. -// -// bo is only used for its parameters; each Retryer has its own copy. -func OnCodes(cc []codes.Code, bo Backoff) Retryer { - return &boRetryer{ - backoff: bo, - codes: append([]codes.Code(nil), cc...), - } -} - -type boRetryer struct { - backoff Backoff - codes []codes.Code -} - -func (r *boRetryer) Retry(err error) (time.Duration, bool) { - st, ok := status.FromError(err) - if !ok { - return 0, false - } - c := st.Code() - for _, rc := range r.codes { - if c == rc { - return r.backoff.Pause(), true - } - } - return 0, false -} - -// Backoff implements exponential backoff. -// The wait time between retries is a random value between 0 and the "retry envelope". -// The envelope starts at Initial and increases by the factor of Multiplier every retry, -// but is capped at Max. -type Backoff struct { - // Initial is the initial value of the retry envelope, defaults to 1 second. - Initial time.Duration - - // Max is the maximum value of the retry envelope, defaults to 30 seconds. - Max time.Duration - - // Multiplier is the factor by which the retry envelope increases. - // It should be greater than 1 and defaults to 2. - Multiplier float64 - - // cur is the current retry envelope - cur time.Duration -} - -func (bo *Backoff) Pause() time.Duration { - if bo.Initial == 0 { - bo.Initial = time.Second - } - if bo.cur == 0 { - bo.cur = bo.Initial - } - if bo.Max == 0 { - bo.Max = 30 * time.Second - } - if bo.Multiplier < 1 { - bo.Multiplier = 2 - } - // Select a duration between zero and the current max. It might seem counterintuitive to - // have so much jitter, but https://www.awsarchitectureblog.com/2015/03/backoff.html - // argues that that is the best strategy. - d := time.Duration(rand.Int63n(int64(bo.cur))) - bo.cur = time.Duration(float64(bo.cur) * bo.Multiplier) - if bo.cur > bo.Max { - bo.cur = bo.Max - } - return d -} - -type grpcOpt []grpc.CallOption - -func (o grpcOpt) Resolve(s *CallSettings) { - s.GRPC = o -} - -func WithGRPCOptions(opt ...grpc.CallOption) CallOption { - return grpcOpt(append([]grpc.CallOption(nil), opt...)) -} - -type CallSettings struct { - // Retry returns a Retryer to be used to control retry logic of a method call. - // If Retry is nil or the returned Retryer is nil, the call will not be retried. - Retry func() Retryer - - // CallOptions to be forwarded to GRPC. - GRPC []grpc.CallOption -} diff --git a/vendor/github.com/googleapis/gax-go/gax.go b/vendor/github.com/googleapis/gax-go/gax.go deleted file mode 100644 index 8b2900e..0000000 --- a/vendor/github.com/googleapis/gax-go/gax.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2016, Google Inc. -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Package gax contains a set of modules which aid the development of APIs -// for clients and servers based on gRPC and Google API conventions. -// -// Application code will rarely need to use this library directly. -// However, code generated automatically from API definition files can use it -// to simplify code generation and to provide more convenient and idiomatic API surfaces. -package gax - -const Version = "2.0.0" diff --git a/vendor/github.com/googleapis/gax-go/header.go b/vendor/github.com/googleapis/gax-go/header.go deleted file mode 100644 index d81455e..0000000 --- a/vendor/github.com/googleapis/gax-go/header.go +++ /dev/null @@ -1,24 +0,0 @@ -package gax - -import "bytes" - -// XGoogHeader is for use by the Google Cloud Libraries only. -// -// XGoogHeader formats key-value pairs. -// The resulting string is suitable for x-goog-api-client header. -func XGoogHeader(keyval ...string) string { - if len(keyval) == 0 { - return "" - } - if len(keyval)%2 != 0 { - panic("gax.Header: odd argument count") - } - var buf bytes.Buffer - for i := 0; i < len(keyval); i += 2 { - buf.WriteByte(' ') - buf.WriteString(keyval[i]) - buf.WriteByte('/') - buf.WriteString(keyval[i+1]) - } - return buf.String()[1:] -} diff --git a/vendor/github.com/googleapis/gax-go/invoke.go b/vendor/github.com/googleapis/gax-go/invoke.go deleted file mode 100644 index cb5cd2a..0000000 --- a/vendor/github.com/googleapis/gax-go/invoke.go +++ /dev/null @@ -1,89 +0,0 @@ -// Copyright 2016, Google Inc. -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package gax - -import ( - "context" - "time" -) - -// A user defined call stub. -type APICall func(context.Context, CallSettings) error - -// Invoke calls the given APICall, -// performing retries as specified by opts, if any. -func Invoke(ctx context.Context, call APICall, opts ...CallOption) error { - var settings CallSettings - for _, opt := range opts { - opt.Resolve(&settings) - } - return invoke(ctx, call, settings, Sleep) -} - -// Sleep is similar to time.Sleep, but it can be interrupted by ctx.Done() closing. -// If interrupted, Sleep returns ctx.Err(). -func Sleep(ctx context.Context, d time.Duration) error { - t := time.NewTimer(d) - select { - case <-ctx.Done(): - t.Stop() - return ctx.Err() - case <-t.C: - return nil - } -} - -type sleeper func(ctx context.Context, d time.Duration) error - -// invoke implements Invoke, taking an additional sleeper argument for testing. -func invoke(ctx context.Context, call APICall, settings CallSettings, sp sleeper) error { - var retryer Retryer - for { - err := call(ctx, settings) - if err == nil { - return nil - } - if settings.Retry == nil { - return err - } - if retryer == nil { - if r := settings.Retry(); r != nil { - retryer = r - } else { - return err - } - } - if d, ok := retryer.Retry(err); !ok { - return err - } else if err = sp(ctx, d); err != nil { - return err - } - } -} diff --git a/vendor/github.com/josharian/intern/README.md b/vendor/github.com/josharian/intern/README.md new file mode 100644 index 0000000..ffc44b2 --- /dev/null +++ b/vendor/github.com/josharian/intern/README.md @@ -0,0 +1,5 @@ +Docs: https://godoc.org/github.com/josharian/intern + +See also [Go issue 5160](https://golang.org/issue/5160). + +License: MIT diff --git a/vendor/github.com/josharian/intern/go.mod b/vendor/github.com/josharian/intern/go.mod new file mode 100644 index 0000000..f2262ff --- /dev/null +++ b/vendor/github.com/josharian/intern/go.mod @@ -0,0 +1,3 @@ +module github.com/josharian/intern + +go 1.5 diff --git a/vendor/github.com/josharian/intern/intern.go b/vendor/github.com/josharian/intern/intern.go new file mode 100644 index 0000000..7acb1fe --- /dev/null +++ b/vendor/github.com/josharian/intern/intern.go @@ -0,0 +1,44 @@ +// Package intern interns strings. +// Interning is best effort only. +// Interned strings may be removed automatically +// at any time without notification. +// All functions may be called concurrently +// with themselves and each other. +package intern + +import "sync" + +var ( + pool sync.Pool = sync.Pool{ + New: func() interface{} { + return make(map[string]string) + }, + } +) + +// String returns s, interned. +func String(s string) string { + m := pool.Get().(map[string]string) + c, ok := m[s] + if ok { + pool.Put(m) + return c + } + m[s] = s + pool.Put(m) + return s +} + +// Bytes returns b converted to a string, interned. +func Bytes(b []byte) string { + m := pool.Get().(map[string]string) + c, ok := m[string(b)] + if ok { + pool.Put(m) + return c + } + s := string(b) + m[s] = s + pool.Put(m) + return s +} diff --git a/vendor/github.com/josharian/intern/license.md b/vendor/github.com/josharian/intern/license.md new file mode 100644 index 0000000..353d305 --- /dev/null +++ b/vendor/github.com/josharian/intern/license.md @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2019 Josh Bleecher Snyder + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/kyokomi/emoji/v2/README.md b/vendor/github.com/kyokomi/emoji/v2/README.md index 9ad97f5..e604598 100644 --- a/vendor/github.com/kyokomi/emoji/v2/README.md +++ b/vendor/github.com/kyokomi/emoji/v2/README.md @@ -3,19 +3,19 @@ Emoji is a simple golang package. [![wercker status](https://app.wercker.com/status/7bef60de2c6d3e0e6c13d56b2393c5d8/s/master "wercker status")](https://app.wercker.com/project/byKey/7bef60de2c6d3e0e6c13d56b2393c5d8) [![Coverage Status](https://coveralls.io/repos/kyokomi/emoji/badge.png?branch=master)](https://coveralls.io/r/kyokomi/emoji?branch=master) -[![GoDoc](https://godoc.org/github.com/kyokomi/emoji?status.svg)](https://godoc.org/github.com/kyokomi/emoji) +[![GoDoc](https://pkg.go.dev/badge/github.com/kyokomi/emoji.svg)](https://pkg.go.dev/github.com/kyokomi/emoji/v2) Get it: ``` -go get github.com/kyokomi/emoji +go get github.com/kyokomi/emoji/v2 ``` Import it: ``` import ( - "github.com/kyokomi/emoji" + "github.com/kyokomi/emoji/v2" ) ``` @@ -27,7 +27,7 @@ package main import ( "fmt" - "github.com/kyokomi/emoji" + "github.com/kyokomi/emoji/v2" ) func main() { diff --git a/vendor/github.com/magiconair/properties/.travis.yml b/vendor/github.com/magiconair/properties/.travis.yml index f07376f..baf9031 100644 --- a/vendor/github.com/magiconair/properties/.travis.yml +++ b/vendor/github.com/magiconair/properties/.travis.yml @@ -1,5 +1,6 @@ language: go go: + - 1.3.x - 1.4.x - 1.5.x - 1.6.x @@ -9,4 +10,8 @@ go: - "1.10.x" - "1.11.x" - "1.12.x" + - "1.13.x" + - "1.14.x" + - "1.15.x" + - "1.16.x" - tip diff --git a/vendor/github.com/magiconair/properties/CHANGELOG.md b/vendor/github.com/magiconair/properties/CHANGELOG.md index 176626a..ff8d025 100644 --- a/vendor/github.com/magiconair/properties/CHANGELOG.md +++ b/vendor/github.com/magiconair/properties/CHANGELOG.md @@ -1,8 +1,29 @@ ## Changelog +### [1.8.2](https://github.com/magiconair/properties/tree/v1.8.2) - 25 Aug 2020 + + * [PR #36](https://github.com/magiconair/properties/pull/36): Escape backslash on write + + This patch ensures that backslashes are escaped on write. Existing applications which + rely on the old behavior may need to be updated. + + Thanks to [@apesternikov](https://github.com/apesternikov) for the patch. + + * [PR #42](https://github.com/magiconair/properties/pull/42): Made Content-Type check whitespace agnostic in LoadURL() + + Thanks to [@aliras1](https://github.com/aliras1) for the patch. + + * [PR #41](https://github.com/magiconair/properties/pull/41): Make key/value separator configurable on Write() + + Thanks to [@mkjor](https://github.com/mkjor) for the patch. + + * [PR #40](https://github.com/magiconair/properties/pull/40): Add method to return a sorted list of keys + + Thanks to [@mkjor](https://github.com/mkjor) for the patch. + ### [1.8.1](https://github.com/magiconair/properties/tree/v1.8.1) - 10 May 2019 - * [PR #26](https://github.com/magiconair/properties/pull/35): Close body always after request + * [PR #35](https://github.com/magiconair/properties/pull/35): Close body always after request This patch ensures that in `LoadURL` the response body is always closed. diff --git a/vendor/github.com/magiconair/properties/LICENSE b/vendor/github.com/magiconair/properties/LICENSE.md similarity index 84% rename from vendor/github.com/magiconair/properties/LICENSE rename to vendor/github.com/magiconair/properties/LICENSE.md index b387087..79c87e3 100644 --- a/vendor/github.com/magiconair/properties/LICENSE +++ b/vendor/github.com/magiconair/properties/LICENSE.md @@ -1,15 +1,14 @@ -goproperties - properties file decoder for Go - -Copyright (c) 2013-2018 - Frank Schroeder +Copyright (c) 2013-2020, Frank Schroeder All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: -1. Redistributions of source code must retain the above copyright notice, this + * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. -2. Redistributions in binary form must reproduce the above copyright notice, + + * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. diff --git a/vendor/github.com/magiconair/properties/README.md b/vendor/github.com/magiconair/properties/README.md index 42ed5c3..e2edda0 100644 --- a/vendor/github.com/magiconair/properties/README.md +++ b/vendor/github.com/magiconair/properties/README.md @@ -1,6 +1,5 @@ [![](https://img.shields.io/github/tag/magiconair/properties.svg?style=flat-square&label=release)](https://github.com/magiconair/properties/releases) [![Travis CI Status](https://img.shields.io/travis/magiconair/properties.svg?branch=master&style=flat-square&label=travis)](https://travis-ci.org/magiconair/properties) -[![CircleCI Status](https://img.shields.io/circleci/project/github/magiconair/properties.svg?label=circle+ci&style=flat-square)](https://circleci.com/gh/magiconair/properties) [![License](https://img.shields.io/badge/License-BSD%202--Clause-orange.svg?style=flat-square)](https://raw.githubusercontent.com/magiconair/properties/master/LICENSE) [![GoDoc](http://img.shields.io/badge/godoc-reference-5272B4.svg?style=flat-square)](http://godoc.org/github.com/magiconair/properties) diff --git a/vendor/github.com/magiconair/properties/go.mod b/vendor/github.com/magiconair/properties/go.mod index 02a6f86..4ff090b 100644 --- a/vendor/github.com/magiconair/properties/go.mod +++ b/vendor/github.com/magiconair/properties/go.mod @@ -1 +1,3 @@ module github.com/magiconair/properties + +go 1.13 diff --git a/vendor/github.com/magiconair/properties/load.go b/vendor/github.com/magiconair/properties/load.go index ab95325..c83c2da 100644 --- a/vendor/github.com/magiconair/properties/load.go +++ b/vendor/github.com/magiconair/properties/load.go @@ -132,11 +132,12 @@ func (l *Loader) LoadURL(url string) (*Properties, error) { } ct := resp.Header.Get("Content-Type") + ct = strings.Join(strings.Fields(ct), "") var enc Encoding switch strings.ToLower(ct) { - case "text/plain", "text/plain; charset=iso-8859-1", "text/plain; charset=latin1": + case "text/plain", "text/plain;charset=iso-8859-1", "text/plain;charset=latin1": enc = ISO_8859_1 - case "", "text/plain; charset=utf-8": + case "", "text/plain;charset=utf-8": enc = UTF8 default: return nil, fmt.Errorf("properties: invalid content type %s", ct) diff --git a/vendor/github.com/magiconair/properties/properties.go b/vendor/github.com/magiconair/properties/properties.go index cb3d1a3..1529e72 100644 --- a/vendor/github.com/magiconair/properties/properties.go +++ b/vendor/github.com/magiconair/properties/properties.go @@ -8,11 +8,13 @@ package properties // BUG(frank): Write() does not allow to configure the newline character. Therefore, on Windows LF is used. import ( + "bytes" "fmt" "io" "log" "os" "regexp" + "sort" "strconv" "strings" "time" @@ -69,6 +71,9 @@ type Properties struct { // Stores the keys in order of appearance. k []string + + // WriteSeparator specifies the separator of key and value while writing the properties. + WriteSeparator string } // NewProperties creates a new Properties struct with the default @@ -111,7 +116,7 @@ func (p *Properties) Get(key string) (value string, ok bool) { // circular references and malformed expressions // so we panic if we still get an error here. if err != nil { - ErrorHandler(fmt.Errorf("%s in %q", err, key+" = "+v)) + ErrorHandler(err) } return expanded, true @@ -586,6 +591,12 @@ func (p *Properties) String() string { return s } +// Sort sorts the properties keys in alphabetical order. +// This is helpfully before writing the properties. +func (p *Properties) Sort() { + sort.Strings(p.k) +} + // Write writes all unexpanded 'key = value' pairs to the given writer. // Write returns the number of bytes written and any write error encountered. func (p *Properties) Write(w io.Writer, enc Encoding) (n int, err error) { @@ -626,7 +637,7 @@ func (p *Properties) WriteComment(w io.Writer, prefix string, enc Encoding) (n i } for _, c := range comments { - x, err = fmt.Fprintf(w, "%s%s\n", prefix, encode(c, "", enc)) + x, err = fmt.Fprintf(w, "%s%s\n", prefix, c) if err != nil { return } @@ -635,8 +646,11 @@ func (p *Properties) WriteComment(w io.Writer, prefix string, enc Encoding) (n i } } } - - x, err = fmt.Fprintf(w, "%s = %s\n", encode(key, " :", enc), encode(value, "", enc)) + sep := " = " + if p.WriteSeparator != "" { + sep = p.WriteSeparator + } + x, err = fmt.Fprintf(w, "%s%s%s\n", encode(key, " :", enc), sep, encode(value, "", enc)) if err != nil { return } @@ -753,7 +767,12 @@ func expand(s string, keys []string, prefix, postfix string, values map[string]s for _, k := range keys { if key == k { - return "", fmt.Errorf("circular reference") + var b bytes.Buffer + b.WriteString("circular reference in:\n") + for _, k1 := range keys { + fmt.Fprintf(&b, "%s=%s\n", k1, values[k1]) + } + return "", fmt.Errorf(b.String()) } } @@ -820,6 +839,8 @@ func escape(r rune, special string) string { return "\\r" case '\t': return "\\t" + case '\\': + return "\\\\" default: if strings.ContainsRune(special, r) { return "\\" + string(r) diff --git a/vendor/github.com/mailru/easyjson/buffer/pool.go b/vendor/github.com/mailru/easyjson/buffer/pool.go index 07fb4bc..598a54a 100644 --- a/vendor/github.com/mailru/easyjson/buffer/pool.go +++ b/vendor/github.com/mailru/easyjson/buffer/pool.go @@ -4,6 +4,7 @@ package buffer import ( "io" + "net" "sync" ) @@ -52,14 +53,12 @@ func putBuf(buf []byte) { // getBuf gets a chunk from reuse pool or creates a new one if reuse failed. func getBuf(size int) []byte { - if size < config.PooledSize { - return make([]byte, 0, size) - } - - if c := buffers[size]; c != nil { - v := c.Get() - if v != nil { - return v.([]byte) + if size >= config.PooledSize { + if c := buffers[size]; c != nil { + v := c.Get() + if v != nil { + return v.([]byte) + } } } return make([]byte, 0, size) @@ -78,9 +77,12 @@ type Buffer struct { // EnsureSpace makes sure that the current chunk contains at least s free bytes, // possibly creating a new chunk. func (b *Buffer) EnsureSpace(s int) { - if cap(b.Buf)-len(b.Buf) >= s { - return + if cap(b.Buf)-len(b.Buf) < s { + b.ensureSpaceSlow(s) } +} + +func (b *Buffer) ensureSpaceSlow(s int) { l := len(b.Buf) if l > 0 { if cap(b.toPool) != cap(b.Buf) { @@ -105,18 +107,22 @@ func (b *Buffer) EnsureSpace(s int) { // AppendByte appends a single byte to buffer. func (b *Buffer) AppendByte(data byte) { - if cap(b.Buf) == len(b.Buf) { // EnsureSpace won't be inlined. - b.EnsureSpace(1) - } + b.EnsureSpace(1) b.Buf = append(b.Buf, data) } // AppendBytes appends a byte slice to buffer. func (b *Buffer) AppendBytes(data []byte) { + if len(data) <= cap(b.Buf)-len(b.Buf) { + b.Buf = append(b.Buf, data...) // fast path + } else { + b.appendBytesSlow(data) + } +} + +func (b *Buffer) appendBytesSlow(data []byte) { for len(data) > 0 { - if cap(b.Buf) == len(b.Buf) { // EnsureSpace won't be inlined. - b.EnsureSpace(1) - } + b.EnsureSpace(1) sz := cap(b.Buf) - len(b.Buf) if sz > len(data) { @@ -128,12 +134,18 @@ func (b *Buffer) AppendBytes(data []byte) { } } -// AppendBytes appends a string to buffer. +// AppendString appends a string to buffer. func (b *Buffer) AppendString(data string) { + if len(data) <= cap(b.Buf)-len(b.Buf) { + b.Buf = append(b.Buf, data...) // fast path + } else { + b.appendStringSlow(data) + } +} + +func (b *Buffer) appendStringSlow(data string) { for len(data) > 0 { - if cap(b.Buf) == len(b.Buf) { // EnsureSpace won't be inlined. - b.EnsureSpace(1) - } + b.EnsureSpace(1) sz := cap(b.Buf) - len(b.Buf) if sz > len(data) { @@ -156,18 +168,14 @@ func (b *Buffer) Size() int { // DumpTo outputs the contents of a buffer to a writer and resets the buffer. func (b *Buffer) DumpTo(w io.Writer) (written int, err error) { - var n int - for _, buf := range b.bufs { - if err == nil { - n, err = w.Write(buf) - written += n - } - putBuf(buf) + bufs := net.Buffers(b.bufs) + if len(b.Buf) > 0 { + bufs = append(bufs, b.Buf) } + n, err := bufs.WriteTo(w) - if err == nil { - n, err = w.Write(b.Buf) - written += n + for _, buf := range b.bufs { + putBuf(buf) } putBuf(b.toPool) @@ -175,7 +183,7 @@ func (b *Buffer) DumpTo(w io.Writer) (written int, err error) { b.Buf = nil b.toPool = nil - return + return int(n), err } // BuildBytes creates a single byte slice with all the contents of the buffer. Data is @@ -192,7 +200,7 @@ func (b *Buffer) BuildBytes(reuse ...[]byte) []byte { var ret []byte size := b.Size() - // If we got a buffer as argument and it is big enought, reuse it. + // If we got a buffer as argument and it is big enough, reuse it. if len(reuse) == 1 && cap(reuse[0]) >= size { ret = reuse[0][:0] } else { diff --git a/vendor/github.com/mailru/easyjson/jlexer/lexer.go b/vendor/github.com/mailru/easyjson/jlexer/lexer.go index ddd376b..b5f5e26 100644 --- a/vendor/github.com/mailru/easyjson/jlexer/lexer.go +++ b/vendor/github.com/mailru/easyjson/jlexer/lexer.go @@ -5,6 +5,7 @@ package jlexer import ( + "bytes" "encoding/base64" "encoding/json" "errors" @@ -14,6 +15,8 @@ import ( "unicode" "unicode/utf16" "unicode/utf8" + + "github.com/josharian/intern" ) // tokenKind determines type of a token. @@ -32,9 +35,10 @@ const ( type token struct { kind tokenKind // Type of a token. - boolValue bool // Value if a boolean literal token. - byteValue []byte // Raw value of a token. - delimValue byte + boolValue bool // Value if a boolean literal token. + byteValueCloned bool // true if byteValue was allocated and does not refer to original json body + byteValue []byte // Raw value of a token. + delimValue byte } // Lexer is a JSON lexer: it iterates over JSON tokens in a byte slice. @@ -240,23 +244,65 @@ func (r *Lexer) fetchNumber() { // findStringLen tries to scan into the string literal for ending quote char to determine required size. // The size will be exact if no escapes are present and may be inexact if there are escaped chars. -func findStringLen(data []byte) (isValid, hasEscapes bool, length int) { - delta := 0 - - for i := 0; i < len(data); i++ { - switch data[i] { - case '\\': - i++ - delta++ - if i < len(data) && data[i] == 'u' { - delta++ - } - case '"': - return true, (delta > 0), (i - delta) +func findStringLen(data []byte) (isValid bool, length int) { + for { + idx := bytes.IndexByte(data, '"') + if idx == -1 { + return false, len(data) } + if idx == 0 || (idx > 0 && data[idx-1] != '\\') { + return true, length + idx + } + + // count \\\\\\\ sequences. even number of slashes means quote is not really escaped + cnt := 1 + for idx-cnt-1 >= 0 && data[idx-cnt-1] == '\\' { + cnt++ + } + if cnt%2 == 0 { + return true, length + idx + } + + length += idx + 1 + data = data[idx+1:] + } +} + +// unescapeStringToken performs unescaping of string token. +// if no escaping is needed, original string is returned, otherwise - a new one allocated +func (r *Lexer) unescapeStringToken() (err error) { + data := r.token.byteValue + var unescapedData []byte + + for { + i := bytes.IndexByte(data, '\\') + if i == -1 { + break + } + + escapedRune, escapedBytes, err := decodeEscape(data[i:]) + if err != nil { + r.errParse(err.Error()) + return err + } + + if unescapedData == nil { + unescapedData = make([]byte, 0, len(r.token.byteValue)) + } + + var d [4]byte + s := utf8.EncodeRune(d[:], escapedRune) + unescapedData = append(unescapedData, data[:i]...) + unescapedData = append(unescapedData, d[:s]...) + + data = data[i+escapedBytes:] } - return false, false, len(data) + if unescapedData != nil { + r.token.byteValue = append(unescapedData, data...) + r.token.byteValueCloned = true + } + return } // getu4 decodes \uXXXX from the beginning of s, returning the hex value, @@ -286,36 +332,30 @@ func getu4(s []byte) rune { return val } -// processEscape processes a single escape sequence and returns number of bytes processed. -func (r *Lexer) processEscape(data []byte) (int, error) { +// decodeEscape processes a single escape sequence and returns number of bytes processed. +func decodeEscape(data []byte) (decoded rune, bytesProcessed int, err error) { if len(data) < 2 { - return 0, fmt.Errorf("syntax error at %v", string(data)) + return 0, 0, errors.New("incorrect escape symbol \\ at the end of token") } c := data[1] switch c { case '"', '/', '\\': - r.token.byteValue = append(r.token.byteValue, c) - return 2, nil + return rune(c), 2, nil case 'b': - r.token.byteValue = append(r.token.byteValue, '\b') - return 2, nil + return '\b', 2, nil case 'f': - r.token.byteValue = append(r.token.byteValue, '\f') - return 2, nil + return '\f', 2, nil case 'n': - r.token.byteValue = append(r.token.byteValue, '\n') - return 2, nil + return '\n', 2, nil case 'r': - r.token.byteValue = append(r.token.byteValue, '\r') - return 2, nil + return '\r', 2, nil case 't': - r.token.byteValue = append(r.token.byteValue, '\t') - return 2, nil + return '\t', 2, nil case 'u': rr := getu4(data) if rr < 0 { - return 0, errors.New("syntax error") + return 0, 0, errors.New("incorrectly escaped \\uXXXX sequence") } read := 6 @@ -328,13 +368,10 @@ func (r *Lexer) processEscape(data []byte) (int, error) { rr = unicode.ReplacementChar } } - var d [4]byte - s := utf8.EncodeRune(d[:], rr) - r.token.byteValue = append(r.token.byteValue, d[:s]...) - return read, nil + return rr, read, nil } - return 0, errors.New("syntax error") + return 0, 0, errors.New("incorrectly escaped bytes") } // fetchString scans a string literal token. @@ -342,43 +379,14 @@ func (r *Lexer) fetchString() { r.pos++ data := r.Data[r.pos:] - isValid, hasEscapes, length := findStringLen(data) + isValid, length := findStringLen(data) if !isValid { r.pos += length r.errParse("unterminated string literal") return } - if !hasEscapes { - r.token.byteValue = data[:length] - r.pos += length + 1 - return - } - - r.token.byteValue = make([]byte, 0, length) - p := 0 - for i := 0; i < len(data); { - switch data[i] { - case '"': - r.pos += i + 1 - r.token.byteValue = append(r.token.byteValue, data[p:i]...) - i++ - return - - case '\\': - r.token.byteValue = append(r.token.byteValue, data[p:i]...) - off, err := r.processEscape(data[i:]) - if err != nil { - r.errParse(err.Error()) - return - } - i += off - p = i - - default: - i++ - } - } - r.errParse("unterminated string literal") + r.token.byteValue = data[:length] + r.pos += length + 1 // skip closing '"' as well } // scanToken scans the next token if no token is currently available in the lexer. @@ -393,6 +401,7 @@ func (r *Lexer) scanToken() { // consume resets the current token to allow scanning the next one. func (r *Lexer) consume() { r.token.kind = tokenUndef + r.token.byteValueCloned = false r.token.delimValue = 0 } @@ -520,6 +529,7 @@ func (r *Lexer) Skip() { func (r *Lexer) SkipRecursive() { r.scanToken() var start, end byte + startPos := r.start switch r.token.delimValue { case '{': @@ -545,6 +555,14 @@ func (r *Lexer) SkipRecursive() { level-- if level == 0 { r.pos += i + 1 + if !json.Valid(r.Data[startPos:r.pos]) { + r.pos = len(r.Data) + r.fatalError = &LexerError{ + Reason: "skipped array/object json value is invalid", + Offset: r.pos, + Data: string(r.Data[r.pos:]), + } + } return } case c == '\\' && inQuotes: @@ -602,7 +620,7 @@ func (r *Lexer) Consumed() { } } -func (r *Lexer) unsafeString() (string, []byte) { +func (r *Lexer) unsafeString(skipUnescape bool) (string, []byte) { if r.token.kind == tokenUndef && r.Ok() { r.FetchToken() } @@ -610,6 +628,13 @@ func (r *Lexer) unsafeString() (string, []byte) { r.errInvalidToken("string") return "", nil } + if !skipUnescape { + if err := r.unescapeStringToken(); err != nil { + r.errInvalidToken("string") + return "", nil + } + } + bytes := r.token.byteValue ret := bytesToStr(r.token.byteValue) r.consume() @@ -621,13 +646,19 @@ func (r *Lexer) unsafeString() (string, []byte) { // Warning: returned string may point to the input buffer, so the string should not outlive // the input buffer. Intended pattern of usage is as an argument to a switch statement. func (r *Lexer) UnsafeString() string { - ret, _ := r.unsafeString() + ret, _ := r.unsafeString(false) return ret } // UnsafeBytes returns the byte slice if the token is a string literal. func (r *Lexer) UnsafeBytes() []byte { - _, ret := r.unsafeString() + _, ret := r.unsafeString(false) + return ret +} + +// UnsafeFieldName returns current member name string token +func (r *Lexer) UnsafeFieldName(skipUnescape bool) string { + ret, _ := r.unsafeString(skipUnescape) return ret } @@ -640,7 +671,34 @@ func (r *Lexer) String() string { r.errInvalidToken("string") return "" } - ret := string(r.token.byteValue) + if err := r.unescapeStringToken(); err != nil { + r.errInvalidToken("string") + return "" + } + var ret string + if r.token.byteValueCloned { + ret = bytesToStr(r.token.byteValue) + } else { + ret = string(r.token.byteValue) + } + r.consume() + return ret +} + +// StringIntern reads a string literal, and performs string interning on it. +func (r *Lexer) StringIntern() string { + if r.token.kind == tokenUndef && r.Ok() { + r.FetchToken() + } + if !r.Ok() || r.token.kind != tokenString { + r.errInvalidToken("string") + return "" + } + if err := r.unescapeStringToken(); err != nil { + r.errInvalidToken("string") + return "" + } + ret := intern.Bytes(r.token.byteValue) r.consume() return ret } @@ -654,6 +712,10 @@ func (r *Lexer) Bytes() []byte { r.errInvalidToken("string") return nil } + if err := r.unescapeStringToken(); err != nil { + r.errInvalidToken("string") + return nil + } ret := make([]byte, base64.StdEncoding.DecodedLen(len(r.token.byteValue))) n, err := base64.StdEncoding.Decode(ret, r.token.byteValue) if err != nil { @@ -839,7 +901,7 @@ func (r *Lexer) Int() int { } func (r *Lexer) Uint8Str() uint8 { - s, b := r.unsafeString() + s, b := r.unsafeString(false) if !r.Ok() { return 0 } @@ -856,7 +918,7 @@ func (r *Lexer) Uint8Str() uint8 { } func (r *Lexer) Uint16Str() uint16 { - s, b := r.unsafeString() + s, b := r.unsafeString(false) if !r.Ok() { return 0 } @@ -873,7 +935,7 @@ func (r *Lexer) Uint16Str() uint16 { } func (r *Lexer) Uint32Str() uint32 { - s, b := r.unsafeString() + s, b := r.unsafeString(false) if !r.Ok() { return 0 } @@ -890,7 +952,7 @@ func (r *Lexer) Uint32Str() uint32 { } func (r *Lexer) Uint64Str() uint64 { - s, b := r.unsafeString() + s, b := r.unsafeString(false) if !r.Ok() { return 0 } @@ -915,7 +977,7 @@ func (r *Lexer) UintptrStr() uintptr { } func (r *Lexer) Int8Str() int8 { - s, b := r.unsafeString() + s, b := r.unsafeString(false) if !r.Ok() { return 0 } @@ -932,7 +994,7 @@ func (r *Lexer) Int8Str() int8 { } func (r *Lexer) Int16Str() int16 { - s, b := r.unsafeString() + s, b := r.unsafeString(false) if !r.Ok() { return 0 } @@ -949,7 +1011,7 @@ func (r *Lexer) Int16Str() int16 { } func (r *Lexer) Int32Str() int32 { - s, b := r.unsafeString() + s, b := r.unsafeString(false) if !r.Ok() { return 0 } @@ -966,7 +1028,7 @@ func (r *Lexer) Int32Str() int32 { } func (r *Lexer) Int64Str() int64 { - s, b := r.unsafeString() + s, b := r.unsafeString(false) if !r.Ok() { return 0 } @@ -1004,7 +1066,7 @@ func (r *Lexer) Float32() float32 { } func (r *Lexer) Float32Str() float32 { - s, b := r.unsafeString() + s, b := r.unsafeString(false) if !r.Ok() { return 0 } @@ -1037,7 +1099,7 @@ func (r *Lexer) Float64() float64 { } func (r *Lexer) Float64Str() float64 { - s, b := r.unsafeString() + s, b := r.unsafeString(false) if !r.Ok() { return 0 } diff --git a/vendor/github.com/mailru/easyjson/jwriter/writer.go b/vendor/github.com/mailru/easyjson/jwriter/writer.go index b9ed7cc..2c5b201 100644 --- a/vendor/github.com/mailru/easyjson/jwriter/writer.go +++ b/vendor/github.com/mailru/easyjson/jwriter/writer.go @@ -270,16 +270,25 @@ func (w *Writer) Bool(v bool) { const chars = "0123456789abcdef" -func isNotEscapedSingleChar(c byte, escapeHTML bool) bool { - // Note: might make sense to use a table if there are more chars to escape. With 4 chars - // it benchmarks the same. - if escapeHTML { - return c != '<' && c != '>' && c != '&' && c != '\\' && c != '"' && c >= 0x20 && c < utf8.RuneSelf - } else { - return c != '\\' && c != '"' && c >= 0x20 && c < utf8.RuneSelf +func getTable(falseValues ...int) [128]bool { + table := [128]bool{} + + for i := 0; i < 128; i++ { + table[i] = true } + + for _, v := range falseValues { + table[v] = false + } + + return table } +var ( + htmlEscapeTable = getTable(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, '"', '&', '<', '>', '\\') + htmlNoEscapeTable = getTable(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, '"', '\\') +) + func (w *Writer) String(s string) { w.Buffer.AppendByte('"') @@ -288,15 +297,21 @@ func (w *Writer) String(s string) { p := 0 // last non-escape symbol + escapeTable := &htmlEscapeTable + if w.NoEscapeHTML { + escapeTable = &htmlNoEscapeTable + } + for i := 0; i < len(s); { c := s[i] - if isNotEscapedSingleChar(c, !w.NoEscapeHTML) { - // single-width character, no escaping is required - i++ - continue - } else if c < utf8.RuneSelf { - // single-with character, need to escape + if c < utf8.RuneSelf { + if escapeTable[c] { + // single-width character, no escaping is required + i++ + continue + } + w.Buffer.AppendString(s[p:i]) switch c { case '\t': diff --git a/vendor/github.com/mattn/go-runewidth/go.mod b/vendor/github.com/mattn/go-runewidth/go.mod index fa7f4d8..8a9d524 100644 --- a/vendor/github.com/mattn/go-runewidth/go.mod +++ b/vendor/github.com/mattn/go-runewidth/go.mod @@ -1,3 +1,5 @@ module github.com/mattn/go-runewidth go 1.9 + +require github.com/rivo/uniseg v0.1.0 diff --git a/vendor/github.com/mattn/go-runewidth/go.sum b/vendor/github.com/mattn/go-runewidth/go.sum new file mode 100644 index 0000000..0213566 --- /dev/null +++ b/vendor/github.com/mattn/go-runewidth/go.sum @@ -0,0 +1,2 @@ +github.com/rivo/uniseg v0.1.0 h1:+2KBaVoUmb9XzDsrx/Ct0W/EYOSFf/nWTauy++DprtY= +github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= diff --git a/vendor/github.com/mattn/go-runewidth/runewidth.go b/vendor/github.com/mattn/go-runewidth/runewidth.go index 19f8e04..3d7fa56 100644 --- a/vendor/github.com/mattn/go-runewidth/runewidth.go +++ b/vendor/github.com/mattn/go-runewidth/runewidth.go @@ -2,6 +2,8 @@ package runewidth import ( "os" + + "github.com/rivo/uniseg" ) //go:generate go run script/generate.go @@ -10,11 +12,14 @@ var ( // EastAsianWidth will be set true if the current locale is CJK EastAsianWidth bool - // ZeroWidthJoiner is flag to set to use UTR#51 ZWJ - ZeroWidthJoiner bool + // StrictEmojiNeutral should be set false if handle broken fonts + StrictEmojiNeutral bool = true // DefaultCondition is a condition in current locale - DefaultCondition = &Condition{} + DefaultCondition = &Condition{ + EastAsianWidth: false, + StrictEmojiNeutral: true, + } ) func init() { @@ -30,7 +35,6 @@ func handleEnv() { } // update DefaultCondition DefaultCondition.EastAsianWidth = EastAsianWidth - DefaultCondition.ZeroWidthJoiner = ZeroWidthJoiner } type interval struct { @@ -85,63 +89,69 @@ var nonprint = table{ // Condition have flag EastAsianWidth whether the current locale is CJK or not. type Condition struct { - EastAsianWidth bool - ZeroWidthJoiner bool + EastAsianWidth bool + StrictEmojiNeutral bool } // NewCondition return new instance of Condition which is current locale. func NewCondition() *Condition { return &Condition{ - EastAsianWidth: EastAsianWidth, - ZeroWidthJoiner: ZeroWidthJoiner, + EastAsianWidth: EastAsianWidth, + StrictEmojiNeutral: StrictEmojiNeutral, } } // RuneWidth returns the number of cells in r. // See http://www.unicode.org/reports/tr11/ func (c *Condition) RuneWidth(r rune) int { - switch { - case r < 0 || r > 0x10FFFF || inTables(r, nonprint, combining, notassigned): - return 0 - case (c.EastAsianWidth && IsAmbiguousWidth(r)) || inTables(r, doublewidth): - return 2 - default: - return 1 - } -} - -func (c *Condition) stringWidth(s string) (width int) { - for _, r := range []rune(s) { - width += c.RuneWidth(r) - } - return width -} - -func (c *Condition) stringWidthZeroJoiner(s string) (width int) { - r1, r2 := rune(0), rune(0) - for _, r := range []rune(s) { - if r == 0xFE0E || r == 0xFE0F { - continue + // optimized version, verified by TestRuneWidthChecksums() + if !c.EastAsianWidth { + switch { + case r < 0x20 || r > 0x10FFFF: + return 0 + case (r >= 0x7F && r <= 0x9F) || r == 0xAD: // nonprint + return 0 + case r < 0x300: + return 1 + case inTable(r, narrow): + return 1 + case inTables(r, nonprint, combining): + return 0 + case inTable(r, doublewidth): + return 2 + default: + return 1 } - w := c.RuneWidth(r) - if r2 == 0x200D && inTables(r, emoji) && inTables(r1, emoji) { - if width < w { - width = w - } - } else { - width += w + } else { + switch { + case r < 0 || r > 0x10FFFF || inTables(r, nonprint, combining): + return 0 + case inTable(r, narrow): + return 1 + case inTables(r, ambiguous, doublewidth): + return 2 + case !c.StrictEmojiNeutral && inTables(r, ambiguous, emoji, narrow): + return 2 + default: + return 1 } - r1, r2 = r2, r } - return width } // StringWidth return width as you can see func (c *Condition) StringWidth(s string) (width int) { - if c.ZeroWidthJoiner { - return c.stringWidthZeroJoiner(s) + g := uniseg.NewGraphemes(s) + for g.Next() { + var chWidth int + for _, r := range g.Runes() { + chWidth = c.RuneWidth(r) + if chWidth > 0 { + break // Our best guess at this point is to use the width of the first non-zero-width rune. + } + } + width += chWidth } - return c.stringWidth(s) + return } // Truncate return string truncated with w cells @@ -149,19 +159,25 @@ func (c *Condition) Truncate(s string, w int, tail string) string { if c.StringWidth(s) <= w { return s } - r := []rune(s) - tw := c.StringWidth(tail) - w -= tw - width := 0 - i := 0 - for ; i < len(r); i++ { - cw := c.RuneWidth(r[i]) - if width+cw > w { + w -= c.StringWidth(tail) + var width int + pos := len(s) + g := uniseg.NewGraphemes(s) + for g.Next() { + var chWidth int + for _, r := range g.Runes() { + chWidth = c.RuneWidth(r) + if chWidth > 0 { + break // See StringWidth() for details. + } + } + if width+chWidth > w { + pos, _ = g.Positions() break } - width += cw + width += chWidth } - return string(r[0:i]) + tail + return s[:pos] + tail } // Wrap return string wrapped with w cells @@ -169,7 +185,7 @@ func (c *Condition) Wrap(s string, w int) string { width := 0 out := "" for _, r := range []rune(s) { - cw := RuneWidth(r) + cw := c.RuneWidth(r) if r == '\n' { out += string(r) width = 0 diff --git a/vendor/github.com/mattn/go-runewidth/runewidth_table.go b/vendor/github.com/mattn/go-runewidth/runewidth_table.go index b27d77d..e5d890c 100644 --- a/vendor/github.com/mattn/go-runewidth/runewidth_table.go +++ b/vendor/github.com/mattn/go-runewidth/runewidth_table.go @@ -124,8 +124,10 @@ var ambiguous = table{ {0x1F18F, 0x1F190}, {0x1F19B, 0x1F1AC}, {0xE0100, 0xE01EF}, {0xF0000, 0xFFFFD}, {0x100000, 0x10FFFD}, } -var notassigned = table{ - {0x27E6, 0x27ED}, {0x2985, 0x2986}, +var narrow = table{ + {0x0020, 0x007E}, {0x00A2, 0x00A3}, {0x00A5, 0x00A6}, + {0x00AC, 0x00AC}, {0x00AF, 0x00AF}, {0x27E6, 0x27ED}, + {0x2985, 0x2986}, } var neutral = table{ diff --git a/vendor/github.com/mitchellh/hashstructure/README.md b/vendor/github.com/mitchellh/hashstructure/README.md index 28ce45a..feb0c24 100644 --- a/vendor/github.com/mitchellh/hashstructure/README.md +++ b/vendor/github.com/mitchellh/hashstructure/README.md @@ -17,12 +17,14 @@ sending data across the network, caching values locally (de-dup), and so on. doesn't affect the hash code but the field itself is still taken into account to create the hash value. - * Optionally specify a custom hash function to optimize for speed, collision + * Optionally, specify a custom hash function to optimize for speed, collision avoidance for your data set, etc. - - * Optionally hash the output of `.String()` on structs that implement fmt.Stringer, + + * Optionally, hash the output of `.String()` on structs that implement fmt.Stringer, allowing effective hashing of time.Time + * Optionally, override the hashing process by implementing `Hashable`. + ## Installation Standard `go get`: diff --git a/vendor/github.com/mitchellh/hashstructure/go.mod b/vendor/github.com/mitchellh/hashstructure/go.mod index 966582a..981e501 100644 --- a/vendor/github.com/mitchellh/hashstructure/go.mod +++ b/vendor/github.com/mitchellh/hashstructure/go.mod @@ -1 +1,3 @@ module github.com/mitchellh/hashstructure + +go 1.14 diff --git a/vendor/github.com/mitchellh/hashstructure/hashstructure.go b/vendor/github.com/mitchellh/hashstructure/hashstructure.go index ea13a15..89dd4d3 100644 --- a/vendor/github.com/mitchellh/hashstructure/hashstructure.go +++ b/vendor/github.com/mitchellh/hashstructure/hashstructure.go @@ -6,6 +6,7 @@ import ( "hash" "hash/fnv" "reflect" + "time" ) // ErrNotStringer is returned when there's an error with hash:"string" @@ -31,6 +32,21 @@ type HashOptions struct { // ZeroNil is flag determining if nil pointer should be treated equal // to a zero value of pointed type. By default this is false. ZeroNil bool + + // IgnoreZeroValue is determining if zero value fields should be + // ignored for hash calculation. + IgnoreZeroValue bool + + // SlicesAsSets assumes that a `set` tag is always present for slices. + // Default is false (in which case the tag is used instead) + SlicesAsSets bool + + // UseStringer will attempt to use fmt.Stringer aways. If the struct + // doesn't implement fmt.Stringer, it'll fall back to trying usual tricks. + // If this is true, and the "string" tag is also set, the tag takes + // precedense (meaning that if the type doesn't implement fmt.Stringer, we + // panic) + UseStringer bool } // Hash returns the hash value of an arbitrary value. @@ -82,17 +98,23 @@ func Hash(v interface{}, opts *HashOptions) (uint64, error) { // Create our walker and walk the structure w := &walker{ - h: opts.Hasher, - tag: opts.TagName, - zeronil: opts.ZeroNil, + h: opts.Hasher, + tag: opts.TagName, + zeronil: opts.ZeroNil, + ignorezerovalue: opts.IgnoreZeroValue, + sets: opts.SlicesAsSets, + stringer: opts.UseStringer, } return w.visit(reflect.ValueOf(v), nil) } type walker struct { - h hash.Hash64 - tag string - zeronil bool + h hash.Hash64 + tag string + zeronil bool + ignorezerovalue bool + sets bool + stringer bool } type visitOpts struct { @@ -104,6 +126,8 @@ type visitOpts struct { StructField string } +var timeType = reflect.TypeOf(time.Time{}) + func (w *walker) visit(v reflect.Value, opts *visitOpts) (uint64, error) { t := reflect.TypeOf(0) @@ -159,6 +183,18 @@ func (w *walker) visit(v reflect.Value, opts *visitOpts) (uint64, error) { return w.h.Sum64(), err } + switch v.Type() { + case timeType: + w.h.Reset() + b, err := v.Interface().(time.Time).MarshalBinary() + if err != nil { + return 0, err + } + + err = binary.Write(w.h, binary.LittleEndian, b) + return w.h.Sum64(), err + } + switch k { case reflect.Array: var h uint64 @@ -220,6 +256,24 @@ func (w *walker) visit(v reflect.Value, opts *visitOpts) (uint64, error) { include = impl } + if impl, ok := parent.(Hashable); ok { + return impl.Hash() + } + + // If we can address this value, check if the pointer value + // implements our interfaces and use that if so. + if v.CanAddr() { + vptr := v.Addr() + parentptr := vptr.Interface() + if impl, ok := parentptr.(Includable); ok { + include = impl + } + + if impl, ok := parentptr.(Hashable); ok { + return impl.Hash() + } + } + t := v.Type() h, err := w.visit(reflect.ValueOf(t.Name()), nil) if err != nil { @@ -229,6 +283,7 @@ func (w *walker) visit(v reflect.Value, opts *visitOpts) (uint64, error) { l := v.NumField() for i := 0; i < l; i++ { if innerV := v.Field(i); v.CanSet() || t.Field(i).Name != "_" { + var f visitFlag fieldType := t.Field(i) if fieldType.PkgPath != "" { @@ -242,11 +297,20 @@ func (w *walker) visit(v reflect.Value, opts *visitOpts) (uint64, error) { continue } + if w.ignorezerovalue { + zeroVal := reflect.Zero(reflect.TypeOf(innerV.Interface())).Interface() + if innerV.Interface() == zeroVal { + continue + } + } + // if string is set, use the string value - if tag == "string" { + if tag == "string" || w.stringer { if impl, ok := innerV.Interface().(fmt.Stringer); ok { innerV = reflect.ValueOf(impl.String()) - } else { + } else if tag == "string" { + // We only show this error if the tag explicitly + // requests a stringer. return 0, &ErrNotStringer{ Field: v.Type().Field(i).Name, } @@ -306,7 +370,7 @@ func (w *walker) visit(v reflect.Value, opts *visitOpts) (uint64, error) { return 0, err } - if set { + if set || w.sets { h = hashUpdateUnordered(h, current) } else { h = hashUpdateOrdered(w.h, h, current) diff --git a/vendor/github.com/mitchellh/hashstructure/include.go b/vendor/github.com/mitchellh/hashstructure/include.go index b6289c0..702d354 100644 --- a/vendor/github.com/mitchellh/hashstructure/include.go +++ b/vendor/github.com/mitchellh/hashstructure/include.go @@ -13,3 +13,10 @@ type Includable interface { type IncludableMap interface { HashIncludeMap(field string, k, v interface{}) (bool, error) } + +// Hashable is an interface that can optionally be implemented by a struct +// to override the hash value. This value will override the hash value for +// the entire struct. Entries in the struct will not be hashed. +type Hashable interface { + Hash() (uint64, error) +} diff --git a/vendor/github.com/mitchellh/mapstructure/.travis.yml b/vendor/github.com/mitchellh/mapstructure/.travis.yml deleted file mode 100644 index 5e31a95..0000000 --- a/vendor/github.com/mitchellh/mapstructure/.travis.yml +++ /dev/null @@ -1,9 +0,0 @@ -language: go - -go: - - "1.14.x" - - tip - -script: - - go test - - go test -bench . -benchmem diff --git a/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md b/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md index 20eea2b..1955f28 100644 --- a/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md +++ b/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md @@ -1,3 +1,15 @@ +## unreleased + +* Fix regression where `*time.Time` value would be set to empty and not be sent + to decode hooks properly [GH-232] + +## 1.4.0 + +* A new decode hook type `DecodeHookFuncValue` has been added that has + access to the full values. [GH-183] +* Squash is now supported with embedded fields that are struct pointers [GH-205] +* Empty strings will convert to 0 for all numeric types when weakly decoding [GH-206] + ## 1.3.3 * Decoding maps from maps creates a settable value for decode hooks [GH-203] diff --git a/vendor/github.com/mitchellh/mapstructure/decode_hooks.go b/vendor/github.com/mitchellh/mapstructure/decode_hooks.go index 1f0abc6..92e6f76 100644 --- a/vendor/github.com/mitchellh/mapstructure/decode_hooks.go +++ b/vendor/github.com/mitchellh/mapstructure/decode_hooks.go @@ -1,6 +1,7 @@ package mapstructure import ( + "encoding" "errors" "fmt" "net" @@ -16,10 +17,11 @@ func typedDecodeHook(h DecodeHookFunc) DecodeHookFunc { // Create variables here so we can reference them with the reflect pkg var f1 DecodeHookFuncType var f2 DecodeHookFuncKind + var f3 DecodeHookFuncValue // Fill in the variables into this interface and the rest is done // automatically using the reflect package. - potential := []interface{}{f1, f2} + potential := []interface{}{f1, f2, f3} v := reflect.ValueOf(h) vt := v.Type() @@ -38,13 +40,15 @@ func typedDecodeHook(h DecodeHookFunc) DecodeHookFunc { // that took reflect.Kind instead of reflect.Type. func DecodeHookExec( raw DecodeHookFunc, - from reflect.Type, to reflect.Type, - data interface{}) (interface{}, error) { + from reflect.Value, to reflect.Value) (interface{}, error) { + switch f := typedDecodeHook(raw).(type) { case DecodeHookFuncType: - return f(from, to, data) + return f(from.Type(), to.Type(), from.Interface()) case DecodeHookFuncKind: - return f(from.Kind(), to.Kind(), data) + return f(from.Kind(), to.Kind(), from.Interface()) + case DecodeHookFuncValue: + return f(from, to) default: return nil, errors.New("invalid decode hook signature") } @@ -56,22 +60,16 @@ func DecodeHookExec( // The composed funcs are called in order, with the result of the // previous transformation. func ComposeDecodeHookFunc(fs ...DecodeHookFunc) DecodeHookFunc { - return func( - f reflect.Type, - t reflect.Type, - data interface{}) (interface{}, error) { + return func(f reflect.Value, t reflect.Value) (interface{}, error) { var err error + var data interface{} + newFrom := f for _, f1 := range fs { - data, err = DecodeHookExec(f1, f, t, data) + data, err = DecodeHookExec(f1, newFrom, t) if err != nil { return nil, err } - - // Modify the from kind to be correct with the new data - f = nil - if val := reflect.ValueOf(data); val.IsValid() { - f = val.Type() - } + newFrom = reflect.ValueOf(data) } return data, nil @@ -215,3 +213,44 @@ func WeaklyTypedHook( return data, nil } + +func RecursiveStructToMapHookFunc() DecodeHookFunc { + return func(f reflect.Value, t reflect.Value) (interface{}, error) { + if f.Kind() != reflect.Struct { + return f.Interface(), nil + } + + var i interface{} = struct{}{} + if t.Type() != reflect.TypeOf(&i).Elem() { + return f.Interface(), nil + } + + m := make(map[string]interface{}) + t.Set(reflect.ValueOf(m)) + + return f.Interface(), nil + } +} + +// TextUnmarshallerHookFunc returns a DecodeHookFunc that applies +// strings to the UnmarshalText function, when the target type +// implements the encoding.TextUnmarshaler interface +func TextUnmarshallerHookFunc() DecodeHookFuncType { + return func( + f reflect.Type, + t reflect.Type, + data interface{}) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + result := reflect.New(t).Interface() + unmarshaller, ok := result.(encoding.TextUnmarshaler) + if !ok { + return data, nil + } + if err := unmarshaller.UnmarshalText([]byte(data.(string))); err != nil { + return nil, err + } + return result, nil + } +} diff --git a/vendor/github.com/mitchellh/mapstructure/mapstructure.go b/vendor/github.com/mitchellh/mapstructure/mapstructure.go index f41bcc5..3643901 100644 --- a/vendor/github.com/mitchellh/mapstructure/mapstructure.go +++ b/vendor/github.com/mitchellh/mapstructure/mapstructure.go @@ -72,6 +72,17 @@ // "name": "alice", // } // +// When decoding from a struct to a map, the squash tag squashes the struct +// fields into a single map. Using the example structs from above: +// +// Friend{Person: Person{Name: "alice"}} +// +// Will be decoded into a map: +// +// map[string]interface{}{ +// "name": "alice", +// } +// // DecoderConfig has a field that changes the behavior of mapstructure // to always squash embedded structs. // @@ -161,10 +172,11 @@ import ( // data transformations. See "DecodeHook" in the DecoderConfig // struct. // -// The type should be DecodeHookFuncType or DecodeHookFuncKind. -// Either is accepted. Types are a superset of Kinds (Types can return -// Kinds) and are generally a richer thing to use, but Kinds are simpler -// if you only need those. +// The type must be one of DecodeHookFuncType, DecodeHookFuncKind, or +// DecodeHookFuncValue. +// Values are a superset of Types (Values can return types), and Types are a +// superset of Kinds (Types can return Kinds) and are generally a richer thing +// to use, but Kinds are simpler if you only need those. // // The reason DecodeHookFunc is multi-typed is for backwards compatibility: // we started with Kinds and then realized Types were the better solution, @@ -180,15 +192,22 @@ type DecodeHookFuncType func(reflect.Type, reflect.Type, interface{}) (interface // source and target types. type DecodeHookFuncKind func(reflect.Kind, reflect.Kind, interface{}) (interface{}, error) +// DecodeHookFuncRaw is a DecodeHookFunc which has complete access to both the source and target +// values. +type DecodeHookFuncValue func(from reflect.Value, to reflect.Value) (interface{}, error) + // DecoderConfig is the configuration that is used to create a new decoder // and allows customization of various aspects of decoding. type DecoderConfig struct { // DecodeHook, if set, will be called before any decoding and any // type conversion (if WeaklyTypedInput is on). This lets you modify - // the values before they're set down onto the resulting struct. + // the values before they're set down onto the resulting struct. The + // DecodeHook is called for every map and value in the input. This means + // that if a struct has embedded fields with squash tags the decode hook + // is called only once with all of the input data, not once for each + // embedded struct. // - // If an error is returned, the entire decode will fail with that - // error. + // If an error is returned, the entire decode will fail with that error. DecodeHook DecodeHookFunc // If ErrorUnused is true, then it is an error for there to exist @@ -409,9 +428,7 @@ func (d *Decoder) decode(name string, input interface{}, outVal reflect.Value) e if d.config.DecodeHook != nil { // We have a DecodeHook, so let's pre-process the input. var err error - input, err = DecodeHookExec( - d.config.DecodeHook, - inputVal.Type(), outVal.Type(), input) + input, err = DecodeHookExec(d.config.DecodeHook, inputVal, outVal) if err != nil { return fmt.Errorf("error decoding '%s': %s", name, err) } @@ -562,8 +579,8 @@ func (d *Decoder) decodeString(name string, data interface{}, val reflect.Value) if !converted { return fmt.Errorf( - "'%s' expected type '%s', got unconvertible type '%s'", - name, val.Type(), dataVal.Type()) + "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", + name, val.Type(), dataVal.Type(), data) } return nil @@ -588,7 +605,12 @@ func (d *Decoder) decodeInt(name string, data interface{}, val reflect.Value) er val.SetInt(0) } case dataKind == reflect.String && d.config.WeaklyTypedInput: - i, err := strconv.ParseInt(dataVal.String(), 0, val.Type().Bits()) + str := dataVal.String() + if str == "" { + str = "0" + } + + i, err := strconv.ParseInt(str, 0, val.Type().Bits()) if err == nil { val.SetInt(i) } else { @@ -604,8 +626,8 @@ func (d *Decoder) decodeInt(name string, data interface{}, val reflect.Value) er val.SetInt(i) default: return fmt.Errorf( - "'%s' expected type '%s', got unconvertible type '%s'", - name, val.Type(), dataVal.Type()) + "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", + name, val.Type(), dataVal.Type(), data) } return nil @@ -640,7 +662,12 @@ func (d *Decoder) decodeUint(name string, data interface{}, val reflect.Value) e val.SetUint(0) } case dataKind == reflect.String && d.config.WeaklyTypedInput: - i, err := strconv.ParseUint(dataVal.String(), 0, val.Type().Bits()) + str := dataVal.String() + if str == "" { + str = "0" + } + + i, err := strconv.ParseUint(str, 0, val.Type().Bits()) if err == nil { val.SetUint(i) } else { @@ -660,8 +687,8 @@ func (d *Decoder) decodeUint(name string, data interface{}, val reflect.Value) e val.SetUint(uint64(i)) default: return fmt.Errorf( - "'%s' expected type '%s', got unconvertible type '%s'", - name, val.Type(), dataVal.Type()) + "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", + name, val.Type(), dataVal.Type(), data) } return nil @@ -691,8 +718,8 @@ func (d *Decoder) decodeBool(name string, data interface{}, val reflect.Value) e } default: return fmt.Errorf( - "'%s' expected type '%s', got unconvertible type '%s'", - name, val.Type(), dataVal.Type()) + "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", + name, val.Type(), dataVal.Type(), data) } return nil @@ -717,7 +744,12 @@ func (d *Decoder) decodeFloat(name string, data interface{}, val reflect.Value) val.SetFloat(0) } case dataKind == reflect.String && d.config.WeaklyTypedInput: - f, err := strconv.ParseFloat(dataVal.String(), val.Type().Bits()) + str := dataVal.String() + if str == "" { + str = "0" + } + + f, err := strconv.ParseFloat(str, val.Type().Bits()) if err == nil { val.SetFloat(f) } else { @@ -733,8 +765,8 @@ func (d *Decoder) decodeFloat(name string, data interface{}, val reflect.Value) val.SetFloat(i) default: return fmt.Errorf( - "'%s' expected type '%s', got unconvertible type '%s'", - name, val.Type(), dataVal.Type()) + "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", + name, val.Type(), dataVal.Type(), data) } return nil @@ -785,7 +817,7 @@ func (d *Decoder) decodeMapFromSlice(name string, dataVal reflect.Value, val ref for i := 0; i < dataVal.Len(); i++ { err := d.decode( - fmt.Sprintf("%s[%d]", name, i), + name+"["+strconv.Itoa(i)+"]", dataVal.Index(i).Interface(), val) if err != nil { return err @@ -818,7 +850,7 @@ func (d *Decoder) decodeMapFromMap(name string, dataVal reflect.Value, val refle } for _, k := range dataVal.MapKeys() { - fieldName := fmt.Sprintf("%s[%s]", name, k) + fieldName := name + "[" + k.String() + "]" // First decode the key into the proper type currentKey := reflect.Indirect(reflect.New(valKeyType)) @@ -871,6 +903,7 @@ func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val re // If Squash is set in the config, we squash the field down. squash := d.config.Squash && v.Kind() == reflect.Struct && f.Anonymous + // Determine the name of the key in the map if index := strings.Index(tagValue, ","); index != -1 { if tagValue[:index] == "-" { @@ -883,8 +916,16 @@ func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val re // If "squash" is specified in the tag, we squash the field down. squash = !squash && strings.Index(tagValue[index+1:], "squash") != -1 - if squash && v.Kind() != reflect.Struct { - return fmt.Errorf("cannot squash non-struct type '%s'", v.Type()) + if squash { + // When squashing, the embedded type can be a pointer to a struct. + if v.Kind() == reflect.Ptr && v.Elem().Kind() == reflect.Struct { + v = v.Elem() + } + + // The final type must be a struct + if v.Kind() != reflect.Struct { + return fmt.Errorf("cannot squash non-struct type '%s'", v.Type()) + } } keyName = tagValue[:index] } else if len(tagValue) > 0 { @@ -995,8 +1036,8 @@ func (d *Decoder) decodeFunc(name string, data interface{}, val reflect.Value) e dataVal := reflect.Indirect(reflect.ValueOf(data)) if val.Type() != dataVal.Type() { return fmt.Errorf( - "'%s' expected type '%s', got unconvertible type '%s'", - name, val.Type(), dataVal.Type()) + "'%s' expected type '%s', got unconvertible type '%s', value: '%v'", + name, val.Type(), dataVal.Type(), data) } val.Set(dataVal) return nil @@ -1062,7 +1103,7 @@ func (d *Decoder) decodeSlice(name string, data interface{}, val reflect.Value) } currentField := valSlice.Index(i) - fieldName := fmt.Sprintf("%s[%d]", name, i) + fieldName := name + "[" + strconv.Itoa(i) + "]" if err := d.decode(fieldName, currentData, currentField); err != nil { errors = appendErrors(errors, err) } @@ -1129,7 +1170,7 @@ func (d *Decoder) decodeArray(name string, data interface{}, val reflect.Value) currentData := dataVal.Index(i).Interface() currentField := valArray.Index(i) - fieldName := fmt.Sprintf("%s[%d]", name, i) + fieldName := name + "[" + strconv.Itoa(i) + "]" if err := d.decode(fieldName, currentData, currentField); err != nil { errors = appendErrors(errors, err) } @@ -1232,10 +1273,14 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e for i := 0; i < structType.NumField(); i++ { fieldType := structType.Field(i) - fieldKind := fieldType.Type.Kind() + fieldVal := structVal.Field(i) + if fieldVal.Kind() == reflect.Ptr && fieldVal.Elem().Kind() == reflect.Struct { + // Handle embedded struct pointers as embedded structs. + fieldVal = fieldVal.Elem() + } // If "squash" is specified in the tag, we squash the field down. - squash := d.config.Squash && fieldKind == reflect.Struct && fieldType.Anonymous + squash := d.config.Squash && fieldVal.Kind() == reflect.Struct && fieldType.Anonymous remain := false // We always parse the tags cause we're looking for other tags too @@ -1253,21 +1298,21 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e } if squash { - if fieldKind != reflect.Struct { + if fieldVal.Kind() != reflect.Struct { errors = appendErrors(errors, - fmt.Errorf("%s: unsupported type for squash: %s", fieldType.Name, fieldKind)) + fmt.Errorf("%s: unsupported type for squash: %s", fieldType.Name, fieldVal.Kind())) } else { - structs = append(structs, structVal.FieldByName(fieldType.Name)) + structs = append(structs, fieldVal) } continue } // Build our field if remain { - remainField = &field{fieldType, structVal.Field(i)} + remainField = &field{fieldType, fieldVal} } else { // Normal struct field, store it away - fields = append(fields, field{fieldType, structVal.Field(i)}) + fields = append(fields, field{fieldType, fieldVal}) } } } @@ -1326,7 +1371,7 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e // If the name is empty string, then we're at the root, and we // don't dot-join the fields. if name != "" { - fieldName = fmt.Sprintf("%s.%s", name, fieldName) + fieldName = name + "." + fieldName } if err := d.decode(fieldName, rawMapVal.Interface(), fieldValue); err != nil { @@ -1373,7 +1418,7 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e for rawKey := range dataValKeysUnused { key := rawKey.(string) if name != "" { - key = fmt.Sprintf("%s.%s", name, key) + key = name + "." + key } d.config.Metadata.Unused = append(d.config.Metadata.Unused, key) diff --git a/vendor/github.com/niklasfasching/go-org/org/block.go b/vendor/github.com/niklasfasching/go-org/org/block.go index 11b135f..9793f81 100644 --- a/vendor/github.com/niklasfasching/go-org/org/block.go +++ b/vendor/github.com/niklasfasching/go-org/org/block.go @@ -54,7 +54,7 @@ func isRawTextBlock(name string) bool { return name == "SRC" || name == "EXAMPLE func (d *Document) parseBlock(i int, parentStop stopFn) (int, Node) { t, start := d.tokens[i], i - name, parameters := t.content, strings.Fields(t.matches[3]) + name, parameters := t.content, splitParameters(t.matches[3]) trim := trimIndentUpTo(d.tokens[i].lvl) stop := func(d *Document, i int) bool { return i >= len(d.tokens) || (d.tokens[i].kind == "endBlock" && d.tokens[i].content == name) @@ -121,6 +121,19 @@ func trimIndentUpTo(max int) func(string) string { } } +func splitParameters(s string) []string { + parameters, parts := []string{}, strings.Split(s, " :") + lang, rest := strings.TrimSpace(parts[0]), parts[1:] + if lang != "" { + parameters = append(parameters, lang) + } + for _, p := range rest { + kv := strings.SplitN(p+" ", " ", 2) + parameters = append(parameters, ":"+kv[0], strings.TrimSpace(kv[1])) + } + return parameters +} + func (b Block) ParameterMap() map[string]string { if len(b.Parameters) == 0 { return nil diff --git a/vendor/github.com/niklasfasching/go-org/org/html_writer.go b/vendor/github.com/niklasfasching/go-org/org/html_writer.go index f2e75a0..9dfe7ad 100644 --- a/vendor/github.com/niklasfasching/go-org/org/html_writer.go +++ b/vendor/github.com/niklasfasching/go-org/org/html_writer.go @@ -55,6 +55,7 @@ var listItemStatuses = map[string]string{ } var cleanHeadlineTitleForHTMLAnchorRegexp = regexp.MustCompile(`]*>`) // nested a tags are not valid HTML +var tocHeadlineMaxLvlRegexp = regexp.MustCompile(`headlines\s+(\d+)`) func NewHTMLWriter() *HTMLWriter { defaultConfig := New() @@ -100,7 +101,10 @@ func (w *HTMLWriter) Before(d *Document) { } w.WriteString(fmt.Sprintf(`

%s

`+"\n", title)) } - w.WriteOutline(d) + if w.document.GetOption("toc") != "nil" { + maxLvl, _ := strconv.Atoi(w.document.GetOption("toc")) + w.WriteOutline(d, maxLvl) + } } func (w *HTMLWriter) After(d *Document) { @@ -168,6 +172,11 @@ func (w *HTMLWriter) WriteDrawer(d Drawer) { func (w *HTMLWriter) WriteKeyword(k Keyword) { if k.Key == "HTML" { w.WriteString(k.Value + "\n") + } else if k.Key == "TOC" { + if m := tocHeadlineMaxLvlRegexp.FindStringSubmatch(k.Value); m != nil { + maxLvl, _ := strconv.Atoi(m[1]) + w.WriteOutline(w.document, maxLvl) + } } } @@ -207,9 +216,8 @@ func (w *HTMLWriter) WriteFootnotes(d *Document) { w.WriteString("\n\n") } -func (w *HTMLWriter) WriteOutline(d *Document) { - if w.document.GetOption("toc") != "nil" && len(d.Outline.Children) != 0 { - maxLvl, _ := strconv.Atoi(w.document.GetOption("toc")) +func (w *HTMLWriter) WriteOutline(d *Document, maxLvl int) { + if len(d.Outline.Children) != 0 { w.WriteString("